Add 'apps/arv-web/' from commit 'f9732ad8460d013c2f28363655d0d1b91894dca5'
authorPeter Amstutz <peter.amstutz@curoverse.com>
Fri, 16 Jan 2015 19:05:48 +0000 (14:05 -0500)
committerPeter Amstutz <peter.amstutz@curoverse.com>
Fri, 16 Jan 2015 19:05:48 +0000 (14:05 -0500)
git-subtree-dir: apps/arv-web
git-subtree-mainline: b97ac7f96234cbbb491bdbaade840ab50802f357
git-subtree-split: f9732ad8460d013c2f28363655d0d1b91894dca5

1286 files changed:
.gitignore [new file with mode: 0644]
COPYING [new file with mode: 0644]
LICENSE-2.0.txt [new file with mode: 0644]
README
agpl-3.0.txt [new file with mode: 0644]
apps/arv-web/Dockerfile [moved from Dockerfile with 100% similarity]
apps/arv-web/README [new file with mode: 0644]
apps/arv-web/apache2_foreground.sh [moved from apache2_foreground.sh with 100% similarity]
apps/arv-web/apache2_vhost [moved from apache2_vhost with 100% similarity]
apps/arv-web/arv-web.py [moved from arv-web.py with 100% similarity]
apps/arv-web/sample-cgi-app/public/.htaccess [moved from sample-cgi-app/public/.htaccess with 100% similarity]
apps/arv-web/sample-cgi-app/public/index.cgi [moved from sample-cgi-app/public/index.cgi with 100% similarity]
apps/arv-web/sample-cgi-app/tmp/.keepkeep [moved from sample-cgi-app/tmp/.keepkeep with 100% similarity]
apps/arv-web/sample-rack-app/config.ru [moved from sample-rack-app/config.ru with 100% similarity]
apps/arv-web/sample-rack-app/public/.keepkeep [moved from sample-rack-app/public/.keepkeep with 100% similarity]
apps/arv-web/sample-rack-app/tmp/.keepkeep [moved from sample-rack-app/tmp/.keepkeep with 100% similarity]
apps/arv-web/sample-static-page/public/index.html [moved from sample-static-page/public/index.html with 100% similarity]
apps/arv-web/sample-static-page/tmp/.keepkeep [moved from sample-static-page/tmp/.keepkeep with 100% similarity]
apps/arv-web/sample-wsgi-app/passenger_wsgi.py [moved from sample-wsgi-app/passenger_wsgi.py with 100% similarity]
apps/arv-web/sample-wsgi-app/public/.keepkeep [moved from sample-wsgi-app/public/.keepkeep with 100% similarity]
apps/arv-web/sample-wsgi-app/tmp/.keepkeep [moved from sample-wsgi-app/tmp/.keepkeep with 100% similarity]
apps/workbench/.gitignore [new file with mode: 0644]
apps/workbench/Gemfile [new file with mode: 0644]
apps/workbench/Gemfile.lock [new file with mode: 0644]
apps/workbench/README.textile [new file with mode: 0644]
apps/workbench/Rakefile [new file with mode: 0644]
apps/workbench/app/assets/images/dax.png [new file with mode: 0644]
apps/workbench/app/assets/images/rails.png [new file with mode: 0644]
apps/workbench/app/assets/images/spinner_32px.gif [new file with mode: 0644]
apps/workbench/app/assets/javascripts/angular_shim.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/application.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/arvados_client.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/bootstrap.js.coffee [new file with mode: 0644]
apps/workbench/app/assets/javascripts/collections.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/dates.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/editable.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/event_log.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/filterable.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/infinite_scroll.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/keep_disks.js.coffee [new file with mode: 0644]
apps/workbench/app/assets/javascripts/list.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/log_viewer.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/permission_toggle.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/pipeline_instances.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/report_issue.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/select_modal.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/selection.js.erb [new file with mode: 0644]
apps/workbench/app/assets/javascripts/sizing.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/tab_panes.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/upload_to_collection.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/user_agreements.js [new file with mode: 0644]
apps/workbench/app/assets/javascripts/users.js [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/api_client_authorizations.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/application.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/authorized_keys.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/badges.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/cards.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/collections.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/groups.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/humans.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/job_tasks.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/jobs.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/keep_disks.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/links.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/loading.css.scss.erb [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/log_viewer.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/logs.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/nodes.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/pipeline_instances.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/pipeline_templates.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/projects.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/repositories.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/sb-admin.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/scaffolds.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/select_modal.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/sessions.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/specimens.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/traits.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/user_agreements.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/users.css.scss [new file with mode: 0644]
apps/workbench/app/assets/stylesheets/virtual_machines.css.scss [new file with mode: 0644]
apps/workbench/app/controllers/actions_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/api_client_authorizations_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/application_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/authorized_keys_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/collections_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/groups_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/humans_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/job_tasks_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/jobs_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/keep_disks_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/keep_services_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/links_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/logs_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/nodes_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/pipeline_instances_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/pipeline_templates_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/projects_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/repositories_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/search_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/sessions_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/specimens_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/traits_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/user_agreements_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/users_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/virtual_machines_controller.rb [new file with mode: 0644]
apps/workbench/app/controllers/websocket_controller.rb [new file with mode: 0644]
apps/workbench/app/helpers/application_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/arvados_api_client_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/collections_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/jobs_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/pipeline_components_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/pipeline_instances_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/provenance_helper.rb [new file with mode: 0644]
apps/workbench/app/helpers/version_helper.rb [new file with mode: 0644]
apps/workbench/app/mailers/.gitkeep [new file with mode: 0644]
apps/workbench/app/mailers/issue_reporter.rb [new file with mode: 0644]
apps/workbench/app/models/.gitkeep [new file with mode: 0644]
apps/workbench/app/models/api_client_authorization.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_api_client.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_base.rb [new file with mode: 0644]
apps/workbench/app/models/arvados_resource_list.rb [new file with mode: 0644]
apps/workbench/app/models/authorized_key.rb [new file with mode: 0644]
apps/workbench/app/models/collection.rb [new file with mode: 0644]
apps/workbench/app/models/group.rb [new file with mode: 0644]
apps/workbench/app/models/human.rb [new file with mode: 0644]
apps/workbench/app/models/job.rb [new file with mode: 0644]
apps/workbench/app/models/job_task.rb [new file with mode: 0644]
apps/workbench/app/models/keep_disk.rb [new file with mode: 0644]
apps/workbench/app/models/keep_service.rb [new file with mode: 0644]
apps/workbench/app/models/link.rb [new file with mode: 0644]
apps/workbench/app/models/log.rb [new file with mode: 0644]
apps/workbench/app/models/node.rb [new file with mode: 0644]
apps/workbench/app/models/pipeline_instance.rb [new file with mode: 0644]
apps/workbench/app/models/pipeline_template.rb [new file with mode: 0644]
apps/workbench/app/models/repository.rb [new file with mode: 0644]
apps/workbench/app/models/specimen.rb [new file with mode: 0644]
apps/workbench/app/models/trait.rb [new file with mode: 0644]
apps/workbench/app/models/user.rb [new file with mode: 0644]
apps/workbench/app/models/user_agreement.rb [new file with mode: 0644]
apps/workbench/app/models/virtual_machine.rb [new file with mode: 0644]
apps/workbench/app/views/api_client_authorizations/_show_help.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/404.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/404.json.erb [new file with mode: 0644]
apps/workbench/app/views/application/_arvados_attr_value.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_arvados_object.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_arvados_object_attr.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_breadcrumb_page_name.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_breadcrumbs.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_choose.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_choose.js.erb [new file with mode: 0644]
apps/workbench/app/views/application/_content.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_content_layout.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_delete_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_index.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_job_progress.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_loading.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_loading_modal.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_name_and_description.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_paging.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_pipeline_progress.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_pipeline_status_label.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_projects_tree_menu.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_report_error.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_report_issue_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_selection_checkbox.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_api_response.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_cli_example.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_curl_example.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_metadata.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_advanced_python_example.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_api.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_attributes.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_object_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_object_description_cell.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_svg_div.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_tab_line_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/_title_and_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/api_error.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/api_error.json.erb [new file with mode: 0644]
apps/workbench/app/views/application/destroy.js.erb [new file with mode: 0644]
apps/workbench/app/views/application/error.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/error.json.erb [new file with mode: 0644]
apps/workbench/app/views/application/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/application/report_issue_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/application/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/authorized_keys/create.js.erb [new file with mode: 0644]
apps/workbench/app/views/authorized_keys/edit.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_choose.js.erb [new symlink]
apps/workbench/app/views/collections/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_index_tbody.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_sharing_button.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_chooser_preview.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_files.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_provenance_graph.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_source_summary.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_upload.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/_show_used_by.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/graph.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/hash_matches.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/index.js.erb [new file with mode: 0644]
apps/workbench/app/views/collections/sharing_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/collections/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/collections/show_file_links.html.erb [new file with mode: 0644]
apps/workbench/app/views/groups/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/groups/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/issue_reporter/send_report.text.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_details.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_job_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_object_description_cell.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_provenance.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/_show_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/jobs/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/keep_disks/_content_layout.html.erb [new file with mode: 0644]
apps/workbench/app/views/layouts/application.html.erb [new file with mode: 0644]
apps/workbench/app/views/layouts/body.html.erb [new file with mode: 0644]
apps/workbench/app/views/links/_breadcrumb_page_name.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_collections_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_jobs_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_pipelines_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/notifications/_ssh_key_notification.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_component_labels.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_running_component.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_compare.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components_editable.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components_json.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_graph.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_inputs.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_log.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_object_description_cell.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_recent_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/_show_tab_buttons.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/compare.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_instances/show.js.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_choose.js.erb [new symlink]
apps/workbench/app/views/pipeline_templates/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_attributes.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_chooser_preview.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_components.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_pipelines.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/_show_recent.html.erb [new file with mode: 0644]
apps/workbench/app/views/pipeline_templates/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_choose.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_choose.js.erb [new symlink]
apps/workbench/app/views/projects/_compute_node_status.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_compute_node_summary.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_index_jobs_and_pipelines.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_index_projects.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_contents_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_dashboard.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_data_collections.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_featured.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_other_objects.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_pipeline_templates.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_sharing.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_subprojects.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/_show_tab_contents.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/remove_items.js.erb [new file with mode: 0644]
apps/workbench/app/views/projects/show.html.erb [new file with mode: 0644]
apps/workbench/app/views/projects/tab_counts.js.erb [new file with mode: 0644]
apps/workbench/app/views/repositories/_show_help.html.erb [new file with mode: 0644]
apps/workbench/app/views/search/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/sessions/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/user_agreements/index.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_add_ssh_key_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_choose_rows.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_home.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_manage_account.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_manage_current_token.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_manage_repositories.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_manage_ssh_keys.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_manage_virtual_machines.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_setup_popup.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_show_activity.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_show_admin.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/_tables.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/activity.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/add_ssh_key.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/add_ssh_key_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/home.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/home.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/inactive.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/manage_account.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/profile.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/setup.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/setup_popup.js.erb [new file with mode: 0644]
apps/workbench/app/views/users/storage.html.erb [new file with mode: 0644]
apps/workbench/app/views/users/welcome.html.erb [new file with mode: 0644]
apps/workbench/app/views/virtual_machines/_show_help.html.erb [new file with mode: 0644]
apps/workbench/app/views/websocket/index.html.erb [new file with mode: 0644]
apps/workbench/config.ru [new file with mode: 0644]
apps/workbench/config/application.default.yml [new file with mode: 0644]
apps/workbench/config/application.rb [new file with mode: 0644]
apps/workbench/config/application.yml.example [new file with mode: 0644]
apps/workbench/config/boot.rb [new file with mode: 0644]
apps/workbench/config/database.yml [new file with mode: 0644]
apps/workbench/config/environment.rb [new file with mode: 0644]
apps/workbench/config/environments/development.rb.example [new file with mode: 0644]
apps/workbench/config/environments/production.rb.example [new file with mode: 0644]
apps/workbench/config/environments/test.rb [new symlink]
apps/workbench/config/environments/test.rb.example [new file with mode: 0644]
apps/workbench/config/initializers/backtrace_silencers.rb [new file with mode: 0644]
apps/workbench/config/initializers/inflections.rb [new file with mode: 0644]
apps/workbench/config/initializers/mime_types.rb [new file with mode: 0644]
apps/workbench/config/initializers/redcloth.rb [new file with mode: 0644]
apps/workbench/config/initializers/secret_token.rb.example [new file with mode: 0644]
apps/workbench/config/initializers/session_store.rb [new file with mode: 0644]
apps/workbench/config/initializers/wrap_parameters.rb [new file with mode: 0644]
apps/workbench/config/load_config.rb [new file with mode: 0644]
apps/workbench/config/locales/en.bootstrap.yml [new file with mode: 0644]
apps/workbench/config/locales/en.yml [new file with mode: 0644]
apps/workbench/config/piwik.yml.example [new file with mode: 0644]
apps/workbench/config/routes.rb [new file with mode: 0644]
apps/workbench/db/schema.rb [new file with mode: 0644]
apps/workbench/db/seeds.rb [new file with mode: 0644]
apps/workbench/lib/assets/.gitkeep [new file with mode: 0644]
apps/workbench/lib/tasks/.gitkeep [new file with mode: 0644]
apps/workbench/lib/tasks/config_check.rake [new file with mode: 0644]
apps/workbench/log/.gitkeep [new file with mode: 0644]
apps/workbench/public/404.html [new file with mode: 0644]
apps/workbench/public/422.html [new file with mode: 0644]
apps/workbench/public/500.html [new file with mode: 0644]
apps/workbench/public/d3.v3.min.js [new file with mode: 0644]
apps/workbench/public/favicon.ico [new file with mode: 0644]
apps/workbench/public/graph-example.html [new file with mode: 0644]
apps/workbench/public/robots.txt [new file with mode: 0644]
apps/workbench/script/rails [new file with mode: 0755]
apps/workbench/test/controllers/actions_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/api_client_authorizations_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/application_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/authorized_keys_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/collections_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/groups_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/humans_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/job_tasks_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/jobs_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/keep_disks_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/links_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/logs_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/nodes_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/pipeline_instances_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/pipeline_templates_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/projects_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/repositories_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/search_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/sessions_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/specimens_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/traits_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/user_agreements_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/users_controller_test.rb [new file with mode: 0644]
apps/workbench/test/controllers/virtual_machines_controller_test.rb [new file with mode: 0644]
apps/workbench/test/diagnostics/pipeline_test.rb [new file with mode: 0644]
apps/workbench/test/diagnostics_test_helper.rb [new file with mode: 0644]
apps/workbench/test/fixtures/.gitkeep [new file with mode: 0644]
apps/workbench/test/helpers/pipeline_instances_helper_test.rb [new file with mode: 0644]
apps/workbench/test/helpers/search_helper_test.rb [new file with mode: 0644]
apps/workbench/test/integration/.gitkeep [new file with mode: 0644]
apps/workbench/test/integration/application_layout_test.rb [new file with mode: 0644]
apps/workbench/test/integration/collection_upload_test.rb [new file with mode: 0644]
apps/workbench/test/integration/collections_test.rb [new file with mode: 0644]
apps/workbench/test/integration/errors_test.rb [new file with mode: 0644]
apps/workbench/test/integration/filterable_infinite_scroll_test.rb [new file with mode: 0644]
apps/workbench/test/integration/jobs_test.rb [new file with mode: 0644]
apps/workbench/test/integration/logins_test.rb [new file with mode: 0644]
apps/workbench/test/integration/pipeline_instances_test.rb [new file with mode: 0644]
apps/workbench/test/integration/pipeline_templates_test.rb [new file with mode: 0644]
apps/workbench/test/integration/projects_test.rb [new file with mode: 0644]
apps/workbench/test/integration/report_issue_test.rb [new file with mode: 0644]
apps/workbench/test/integration/search_box_test.rb [new file with mode: 0644]
apps/workbench/test/integration/smoke_test.rb [new file with mode: 0644]
apps/workbench/test/integration/user_agreements_test.rb [new file with mode: 0644]
apps/workbench/test/integration/user_manage_account_test.rb [new file with mode: 0644]
apps/workbench/test/integration/user_profile_test.rb [new file with mode: 0644]
apps/workbench/test/integration/users_test.rb [new file with mode: 0644]
apps/workbench/test/integration/virtual_machines_test.rb [new file with mode: 0644]
apps/workbench/test/integration/websockets_test.rb [new file with mode: 0644]
apps/workbench/test/integration_helper.rb [new file with mode: 0644]
apps/workbench/test/performance/browsing_test.rb [new file with mode: 0644]
apps/workbench/test/performance_test_helper.rb [new file with mode: 0644]
apps/workbench/test/test_helper.rb [new file with mode: 0644]
apps/workbench/test/unit/.gitkeep [new file with mode: 0644]
apps/workbench/test/unit/arvados_resource_list_test.rb [new file with mode: 0644]
apps/workbench/test/unit/collection_test.rb [new file with mode: 0644]
apps/workbench/test/unit/group_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/api_client_authorizations_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/authorized_keys_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/collections_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/groups_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/humans_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/job_tasks_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/jobs_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/keep_disks_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/links_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/logs_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/nodes_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/pipeline_instances_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/pipeline_templates_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/projects_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/repositories_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/sessions_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/specimens_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/traits_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/user_agreements_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/users_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/helpers/virtual_machines_helper_test.rb [new file with mode: 0644]
apps/workbench/test/unit/job_test.rb [new file with mode: 0644]
apps/workbench/test/unit/link_test.rb [new file with mode: 0644]
apps/workbench/test/unit/pipeline_instance_test.rb [new file with mode: 0644]
apps/workbench/test/unit/user_test.rb [new file with mode: 0644]
apps/workbench/vendor/assets/javascripts/.gitkeep [new file with mode: 0644]
apps/workbench/vendor/assets/javascripts/jquery.number.min.js [new file with mode: 0644]
apps/workbench/vendor/assets/stylesheets/.gitkeep [new file with mode: 0644]
apps/workbench/vendor/plugins/.gitkeep [new file with mode: 0644]
by-sa-3.0.txt [new file with mode: 0644]
crunch_scripts/GATK2-VariantFiltration [new file with mode: 0755]
crunch_scripts/GATK2-bqsr [new file with mode: 0755]
crunch_scripts/GATK2-merge-call [new file with mode: 0755]
crunch_scripts/GATK2-realign [new file with mode: 0755]
crunch_scripts/arvados-bcbio-nextgen.py [new file with mode: 0755]
crunch_scripts/arvados_bwa.py [new file with mode: 0644]
crunch_scripts/arvados_gatk2.py [new file with mode: 0644]
crunch_scripts/arvados_ipc.py [new file with mode: 0644]
crunch_scripts/arvados_picard.py [new file with mode: 0644]
crunch_scripts/arvados_samtools.py [new file with mode: 0644]
crunch_scripts/bwa-aln [new file with mode: 0755]
crunch_scripts/bwa-index [new file with mode: 0755]
crunch_scripts/collection-merge [new file with mode: 0755]
crunch_scripts/crunchutil/__init__.py [new file with mode: 0644]
crunch_scripts/crunchutil/robust_put.py [new file with mode: 0644]
crunch_scripts/crunchutil/subst.py [new file with mode: 0644]
crunch_scripts/crunchutil/vwd.py [new file with mode: 0644]
crunch_scripts/decompress-all.py [new file with mode: 0755]
crunch_scripts/file-select [new file with mode: 0755]
crunch_scripts/grep [new file with mode: 0755]
crunch_scripts/hash [new file with mode: 0755]
crunch_scripts/pgp-survey-import [new file with mode: 0755]
crunch_scripts/pgp-survey-parse [new file with mode: 0755]
crunch_scripts/picard-gatk2-prep [new file with mode: 0755]
crunch_scripts/pyrtg.py [new file with mode: 0644]
crunch_scripts/rtg-fasta2sdf [new file with mode: 0755]
crunch_scripts/rtg-fastq2sdf [new file with mode: 0755]
crunch_scripts/rtg-map [new file with mode: 0755]
crunch_scripts/rtg-snp [new file with mode: 0755]
crunch_scripts/run-command [new file with mode: 0755]
crunch_scripts/split-fastq.py [new file with mode: 0755]
doc/Gemfile [new file with mode: 0644]
doc/Gemfile.lock [new file with mode: 0644]
doc/README.textile [new file with mode: 0644]
doc/Rakefile [new file with mode: 0644]
doc/_config.yml [new file with mode: 0644]
doc/_includes/_0_filter_py.liquid [new file with mode: 0644]
doc/_includes/_alert-incomplete.liquid [new file with mode: 0644]
doc/_includes/_alert_stub.liquid [new file with mode: 0644]
doc/_includes/_concurrent_hash_script_py.liquid [new file with mode: 0644]
doc/_includes/_example_docker.liquid [new file with mode: 0644]
doc/_includes/_example_sdk_go.liquid [new file with mode: 0644]
doc/_includes/_example_sdk_go_imports.liquid [new file with mode: 0644]
doc/_includes/_navbar_left.liquid [new file with mode: 0644]
doc/_includes/_navbar_top.liquid [new file with mode: 0644]
doc/_includes/_notebox_begin.liquid [new file with mode: 0644]
doc/_includes/_notebox_end.liquid [new file with mode: 0644]
doc/_includes/_run_command_foreach_example.liquid [new file with mode: 0644]
doc/_includes/_run_command_simple_example.liquid [new file with mode: 0644]
doc/_includes/_run_md5sum_py.liquid [new file with mode: 0644]
doc/_includes/_skip_sso_server_install.liquid [new file with mode: 0644]
doc/_includes/_ssh_addkey.liquid [new file with mode: 0644]
doc/_includes/_ssh_intro.liquid [new file with mode: 0644]
doc/_includes/_tutorial_bwa_sortsam_pipeline.liquid [new file with mode: 0644]
doc/_includes/_tutorial_expectations.liquid [new file with mode: 0644]
doc/_includes/_tutorial_hash_script_py.liquid [new file with mode: 0644]
doc/_includes/_tutorial_submit_job.liquid [new file with mode: 0644]
doc/_includes/_webring.liquid [new file with mode: 0644]
doc/_layouts/default.html.liquid [new file with mode: 0644]
doc/admin/cheat_sheet.html.textile.liquid [new file with mode: 0644]
doc/admin/index.html.md.liquid [new file with mode: 0644]
doc/api/authentication.html.textile.liquid [new file with mode: 0644]
doc/api/crunch-scripts.html.textile.liquid [new file with mode: 0644]
doc/api/index.html.textile.liquid [new file with mode: 0644]
doc/api/methods.html.textile.liquid [new file with mode: 0644]
doc/api/methods/api_client_authorizations.html.textile.liquid [new file with mode: 0644]
doc/api/methods/api_clients.html.textile.liquid [new file with mode: 0644]
doc/api/methods/authorized_keys.html.textile.liquid [new file with mode: 0644]
doc/api/methods/collections.html.textile.liquid [new file with mode: 0644]
doc/api/methods/groups.html.textile.liquid [new file with mode: 0644]
doc/api/methods/humans.html.textile.liquid [new file with mode: 0644]
doc/api/methods/job_tasks.html.textile.liquid [new file with mode: 0644]
doc/api/methods/jobs.html.textile.liquid [new file with mode: 0644]
doc/api/methods/keep_disks.html.textile.liquid [new file with mode: 0644]
doc/api/methods/keep_services.html.textile.liquid [new file with mode: 0644]
doc/api/methods/links.html.textile.liquid [new file with mode: 0644]
doc/api/methods/logs.html.textile.liquid [new file with mode: 0644]
doc/api/methods/nodes.html.textile.liquid [new file with mode: 0644]
doc/api/methods/pipeline_instances.html.textile.liquid [new file with mode: 0644]
doc/api/methods/pipeline_templates.html.textile.liquid [new file with mode: 0644]
doc/api/methods/repositories.html.textile.liquid [new file with mode: 0644]
doc/api/methods/specimens.html.textile.liquid [new file with mode: 0644]
doc/api/methods/traits.html.textile.liquid [new file with mode: 0644]
doc/api/methods/users.html.textile.liquid [new file with mode: 0644]
doc/api/methods/virtual_machines.html.textile.liquid [new file with mode: 0644]
doc/api/permission-model.html.textile.liquid [new file with mode: 0644]
doc/api/resources.html.textile.liquid [new file with mode: 0644]
doc/api/schema/ApiClient.html.textile.liquid [new file with mode: 0644]
doc/api/schema/ApiClientAuthorization.html.textile.liquid [new file with mode: 0644]
doc/api/schema/AuthorizedKey.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Collection.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Group.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Human.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Job.html.textile.liquid [new file with mode: 0644]
doc/api/schema/JobTask.html.textile.liquid [new file with mode: 0644]
doc/api/schema/KeepDisk.html.textile.liquid [new file with mode: 0644]
doc/api/schema/KeepService.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Link.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Log.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Node.html.textile.liquid [new file with mode: 0644]
doc/api/schema/PipelineInstance.html.textile.liquid [new file with mode: 0644]
doc/api/schema/PipelineTemplate.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Repository.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Specimen.html.textile.liquid [new file with mode: 0644]
doc/api/schema/Trait.html.textile.liquid [new file with mode: 0644]
doc/api/schema/User.html.textile.liquid [new file with mode: 0644]
doc/api/schema/VirtualMachine.html.textile.liquid [new file with mode: 0644]
doc/css/badges.css [new file with mode: 0644]
doc/css/bootstrap-theme.css [new file with mode: 0644]
doc/css/bootstrap-theme.css.map [new file with mode: 0644]
doc/css/bootstrap-theme.min.css [new file with mode: 0644]
doc/css/bootstrap.css [new file with mode: 0644]
doc/css/bootstrap.css.map [new file with mode: 0644]
doc/css/bootstrap.min.css [new file with mode: 0644]
doc/css/code.css [new file with mode: 0644]
doc/css/font-awesome.css [new file with mode: 0644]
doc/css/nav-list.css [new file with mode: 0644]
doc/examples/pipeline_templates/gatk-exome-fq-snp.json [new file with mode: 0644]
doc/examples/pipeline_templates/rtg-fq-snp.json [new file with mode: 0644]
doc/examples/ruby/list-active-nodes.rb [new file with mode: 0755]
doc/fonts/FontAwesome.otf [new file with mode: 0644]
doc/fonts/fontawesome-webfont.eot [new file with mode: 0755]
doc/fonts/fontawesome-webfont.svg [new file with mode: 0755]
doc/fonts/fontawesome-webfont.ttf [new file with mode: 0755]
doc/fonts/fontawesome-webfont.woff [new file with mode: 0755]
doc/fonts/glyphicons-halflings-regular.eot [new file with mode: 0644]
doc/fonts/glyphicons-halflings-regular.svg [new file with mode: 0644]
doc/fonts/glyphicons-halflings-regular.ttf [new file with mode: 0644]
doc/fonts/glyphicons-halflings-regular.woff [new file with mode: 0644]
doc/gen_api_method_docs.py [new file with mode: 0755]
doc/gen_api_schema_docs.py [new file with mode: 0755]
doc/images/dax-reading-book.png [new file with mode: 0644]
doc/images/dax.png [new file with mode: 0644]
doc/images/doc-bg.jpg [new file with mode: 0644]
doc/images/favicon.ico [new file with mode: 0644]
doc/images/glyphicons-halflings-white.png [new file with mode: 0644]
doc/images/glyphicons-halflings.png [new file with mode: 0644]
doc/images/ssh-adding-public-key.png [new file with mode: 0644]
doc/images/workbench-dashboard.png [new file with mode: 0644]
doc/images/workbench-move-selected.png [new file with mode: 0644]
doc/index.html.liquid [new file with mode: 0644]
doc/install/client.html.textile.liquid [new file with mode: 0644]
doc/install/create-standard-objects.html.textile.liquid [new file with mode: 0644]
doc/install/index.html.textile.liquid [new file with mode: 0644]
doc/install/install-api-server.html.textile.liquid [new file with mode: 0644]
doc/install/install-crunch-dispatch.html.textile.liquid [new file with mode: 0644]
doc/install/install-docker.html.textile.liquid [new file with mode: 0644]
doc/install/install-keepproxy.html.textile.liquid [new file with mode: 0644]
doc/install/install-keepstore.html.textile.liquid [new file with mode: 0644]
doc/install/install-manual-overview.html.textile.liquid [new file with mode: 0644]
doc/install/install-manual-prerequisites-ruby.html.textile.liquid [new file with mode: 0644]
doc/install/install-manual-prerequisites.html.textile.liquid [new file with mode: 0644]
doc/install/install-shell-server.html.textile.liquid [new file with mode: 0644]
doc/install/install-sso.html.textile.liquid [new file with mode: 0644]
doc/install/install-workbench-app.html.textile.liquid [new file with mode: 0644]
doc/js/bootstrap.js [new file with mode: 0644]
doc/js/bootstrap.min.js [new file with mode: 0644]
doc/js/jquery.min.js [new file with mode: 0644]
doc/sdk/cli/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/install.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/reference.html.textile.liquid [new file with mode: 0644]
doc/sdk/cli/subcommands.html.textile.liquid [new file with mode: 0644]
doc/sdk/go/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/java/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/perl/index.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/crunch-utility-libraries.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/python.html.textile.liquid [new file with mode: 0644]
doc/sdk/python/sdk-python.html.textile.liquid [new file with mode: 0644]
doc/sdk/ruby/index.html.textile.liquid [new file with mode: 0644]
doc/user/copying/LICENSE-2.0.html [new file with mode: 0644]
doc/user/copying/agpl-3.0.html [new file with mode: 0644]
doc/user/copying/by-sa-3.0.html [new file with mode: 0644]
doc/user/copying/copying.html.textile.liquid [new file with mode: 0644]
doc/user/examples/crunch-examples.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/check-environment.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/community.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/ssh-access-unix.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/ssh-access-windows.html.textile.liquid [new file with mode: 0644]
doc/user/getting_started/workbench.html.textile.liquid [new file with mode: 0644]
doc/user/index.html.textile.liquid [new file with mode: 0644]
doc/user/reference/api-tokens.html.textile.liquid [new file with mode: 0644]
doc/user/reference/job-pipeline-ref.html.textile.liquid [new file with mode: 0644]
doc/user/topics/arv-docker.html.textile.liquid [new file with mode: 0644]
doc/user/topics/arv-run.html.textile.liquid [new file with mode: 0644]
doc/user/topics/keep.html.textile.liquid [new file with mode: 0644]
doc/user/topics/run-command.html.textile.liquid [new file with mode: 0644]
doc/user/topics/running-pipeline-command-line.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-gatk-variantfiltration.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-job1.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-parallel.html.textile.liquid [new file with mode: 0644]
doc/user/topics/tutorial-trait-search.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/intro-crunch.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/running-external-program.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-firstscript.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep-get.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep-mount.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-keep.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-pipeline-workbench.html.textile.liquid [new file with mode: 0644]
doc/user/tutorials/tutorial-submit-job.html.textile.liquid [new file with mode: 0644]
doc/zenweb-liquid.rb [new file with mode: 0644]
doc/zenweb-textile.rb [new file with mode: 0644]
docker/.gitignore [new file with mode: 0644]
docker/README.md [new file with mode: 0644]
docker/api/Dockerfile [new file with mode: 0644]
docker/api/apache2_foreground.sh [new file with mode: 0755]
docker/api/apache2_vhost.in [new file with mode: 0644]
docker/api/application.yml.in [new file with mode: 0644]
docker/api/apt.arvados.org.list [new file with mode: 0644]
docker/api/arvados-clients.yml.in [new file with mode: 0644]
docker/api/config_databases.sh.in [new file with mode: 0755]
docker/api/crunch-dispatch-run.sh [new file with mode: 0755]
docker/api/database.yml.in [new file with mode: 0644]
docker/api/keep_server_0.json [new file with mode: 0644]
docker/api/keep_server_1.json [new file with mode: 0644]
docker/api/munge.key [new file with mode: 0644]
docker/api/omniauth.rb.in [new file with mode: 0644]
docker/api/setup-gitolite.sh.in [new file with mode: 0755]
docker/api/setup.sh.in [new file with mode: 0755]
docker/api/slurm.conf.in [new file with mode: 0644]
docker/api/superuser_token.in [new file with mode: 0644]
docker/api/supervisor.conf [new file with mode: 0644]
docker/api/update-gitolite.rb [new file with mode: 0755]
docker/arvdock [new file with mode: 0755]
docker/base/Dockerfile [new file with mode: 0644]
docker/base/apt.arvados.org.list [new file with mode: 0644]
docker/bcbio-nextgen/Dockerfile [new file with mode: 0644]
docker/build.sh [new file with mode: 0755]
docker/build_tools/Makefile [new file with mode: 0644]
docker/build_tools/build.rb [new file with mode: 0755]
docker/build_tools/config.rb [new file with mode: 0755]
docker/compute/Dockerfile [new file with mode: 0644]
docker/compute/fuse.conf [new file with mode: 0644]
docker/compute/setup.sh.in [new file with mode: 0755]
docker/compute/supervisor.conf [new file with mode: 0644]
docker/compute/wrapdocker [new file with mode: 0755]
docker/config.yml.example [new file with mode: 0644]
docker/doc/Dockerfile [new file with mode: 0644]
docker/doc/apache2_foreground.sh [new file with mode: 0755]
docker/doc/apache2_vhost [new file with mode: 0644]
docker/install_sdk.sh [new file with mode: 0755]
docker/java-bwa-samtools/Dockerfile [new file with mode: 0644]
docker/jobs/Dockerfile [new file with mode: 0644]
docker/keep/Dockerfile [new file with mode: 0644]
docker/keep/keep_signing_secret.in [new file with mode: 0644]
docker/keep/run-keep.in [new file with mode: 0755]
docker/mkimage-debootstrap.sh [new file with mode: 0755]
docker/passenger/Dockerfile [new file with mode: 0644]
docker/postgresql/Dockerfile [new file with mode: 0644]
docker/postgresql/pg_hba.conf [new file with mode: 0644]
docker/postgresql/postgresql.conf [new file with mode: 0644]
docker/shell/Dockerfile [new file with mode: 0644]
docker/shell/fuse.conf [new file with mode: 0644]
docker/shell/setup.sh.in [new file with mode: 0755]
docker/shell/superuser_token.in [new file with mode: 0644]
docker/shell/supervisor.conf [new file with mode: 0644]
docker/slurm/Dockerfile [new file with mode: 0644]
docker/slurm/munge.key [new file with mode: 0644]
docker/slurm/slurm.conf.in [new file with mode: 0644]
docker/slurm/supervisor.conf [new file with mode: 0644]
docker/sso/Dockerfile [new file with mode: 0644]
docker/sso/apache2_foreground.sh [new file with mode: 0755]
docker/sso/apache2_vhost.in [new file with mode: 0644]
docker/sso/secret_token.rb.in [new file with mode: 0644]
docker/sso/seeds.rb.in [new file with mode: 0644]
docker/workbench/.gitignore [new file with mode: 0644]
docker/workbench/Dockerfile [new file with mode: 0644]
docker/workbench/apache2_foreground.sh [new file with mode: 0755]
docker/workbench/apache2_vhost.in [new file with mode: 0644]
docker/workbench/application.yml.in [new file with mode: 0644]
docker/workbench/production.rb.in [new file with mode: 0644]
docker/workbench/secret_token.rb.in [new file with mode: 0644]
docker/workbench/workbench_rails_env.in [new file with mode: 0644]
sdk/cli/.gitignore [new file with mode: 0644]
sdk/cli/Gemfile [new file with mode: 0644]
sdk/cli/Rakefile [new file with mode: 0644]
sdk/cli/arvados-cli.gemspec [new file with mode: 0644]
sdk/cli/bin/arv [new file with mode: 0755]
sdk/cli/bin/arv-copy [new symlink]
sdk/cli/bin/arv-crunch-job [new file with mode: 0755]
sdk/cli/bin/arv-get [new symlink]
sdk/cli/bin/arv-keepdocker [new symlink]
sdk/cli/bin/arv-ls [new symlink]
sdk/cli/bin/arv-mount [new symlink]
sdk/cli/bin/arv-normalize [new symlink]
sdk/cli/bin/arv-put [new symlink]
sdk/cli/bin/arv-run-pipeline-instance [new file with mode: 0755]
sdk/cli/bin/arv-tag [new file with mode: 0755]
sdk/cli/bin/arv-ws [new symlink]
sdk/cli/bin/crunch-job [new file with mode: 0755]
sdk/cli/test/test_arv-collection-create.rb [new file with mode: 0644]
sdk/cli/test/test_arv-get.rb [new file with mode: 0644]
sdk/cli/test/test_arv-put.rb [new file with mode: 0644]
sdk/cli/test/test_arv-run-pipeline-instance.rb [new file with mode: 0644]
sdk/cli/test/test_arv-tag.rb [new file with mode: 0644]
sdk/cli/test/test_arv-ws.rb [new file with mode: 0644]
sdk/go/arvadosclient/arvadosclient.go [new file with mode: 0644]
sdk/go/arvadosclient/arvadosclient_test.go [new file with mode: 0644]
sdk/go/keepclient/hashcheck.go [new file with mode: 0644]
sdk/go/keepclient/hashcheck_test.go [new file with mode: 0644]
sdk/go/keepclient/keepclient.go [new file with mode: 0644]
sdk/go/keepclient/keepclient_test.go [new file with mode: 0644]
sdk/go/keepclient/root_sorter.go [new file with mode: 0644]
sdk/go/keepclient/root_sorter_test.go [new file with mode: 0644]
sdk/go/keepclient/support.go [new file with mode: 0644]
sdk/go/streamer/streamer.go [new file with mode: 0644]
sdk/go/streamer/streamer_test.go [new file with mode: 0644]
sdk/go/streamer/transfer.go [new file with mode: 0644]
sdk/java/.classpath [new file with mode: 0644]
sdk/java/.project [new file with mode: 0644]
sdk/java/.settings/org.eclipse.jdt.core.prefs [new file with mode: 0644]
sdk/java/ArvadosSDKJavaExample.java [new file with mode: 0644]
sdk/java/ArvadosSDKJavaExampleWithPrompt.java [new file with mode: 0644]
sdk/java/README [new file with mode: 0644]
sdk/java/pom.xml [new file with mode: 0644]
sdk/java/src/main/java/org/arvados/sdk/java/Arvados.java [new file with mode: 0644]
sdk/java/src/main/java/org/arvados/sdk/java/MethodDetails.java [new file with mode: 0644]
sdk/java/src/main/resources/log4j.properties [new file with mode: 0644]
sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java [new file with mode: 0644]
sdk/java/src/test/resources/first_pipeline.json [new file with mode: 0644]
sdk/perl/Makefile.PL [new file with mode: 0644]
sdk/perl/lib/Arvados.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/Request.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceAccessor.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceMethod.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceProxy.pm [new file with mode: 0644]
sdk/perl/lib/Arvados/ResourceProxyList.pm [new file with mode: 0644]
sdk/python/.gitignore [new file with mode: 0644]
sdk/python/MANIFEST.in [new file with mode: 0644]
sdk/python/README.rst [new file with mode: 0644]
sdk/python/arvados/__init__.py [new file with mode: 0644]
sdk/python/arvados/api.py [new file with mode: 0644]
sdk/python/arvados/arvfile.py [new file with mode: 0644]
sdk/python/arvados/collection.py [new file with mode: 0644]
sdk/python/arvados/commands/__init__.py [new file with mode: 0644]
sdk/python/arvados/commands/_util.py [new file with mode: 0644]
sdk/python/arvados/commands/arv_copy.py [new file with mode: 0755]
sdk/python/arvados/commands/keepdocker.py [new file with mode: 0644]
sdk/python/arvados/commands/ls.py [new file with mode: 0755]
sdk/python/arvados/commands/put.py [new file with mode: 0644]
sdk/python/arvados/commands/run.py [new file with mode: 0644]
sdk/python/arvados/commands/ws.py [new file with mode: 0644]
sdk/python/arvados/config.py [new file with mode: 0644]
sdk/python/arvados/errors.py [new file with mode: 0644]
sdk/python/arvados/events.py [new file with mode: 0644]
sdk/python/arvados/keep.py [new file with mode: 0644]
sdk/python/arvados/retry.py [new file with mode: 0644]
sdk/python/arvados/stream.py [new file with mode: 0644]
sdk/python/arvados/timer.py [new file with mode: 0644]
sdk/python/arvados/util.py [new file with mode: 0644]
sdk/python/bin/arv-copy [new file with mode: 0755]
sdk/python/bin/arv-get [new file with mode: 0755]
sdk/python/bin/arv-keepdocker [new file with mode: 0755]
sdk/python/bin/arv-ls [new file with mode: 0755]
sdk/python/bin/arv-normalize [new file with mode: 0755]
sdk/python/bin/arv-put [new file with mode: 0755]
sdk/python/bin/arv-run [new file with mode: 0755]
sdk/python/bin/arv-ws [new file with mode: 0755]
sdk/python/setup.py [new file with mode: 0644]
sdk/python/tests/__init__.py [new file with mode: 0644]
sdk/python/tests/arvados_testutil.py [new file with mode: 0644]
sdk/python/tests/data/1000G_ref_manifest [new file with mode: 0644]
sdk/python/tests/data/jlake_manifest [new file with mode: 0644]
sdk/python/tests/run_test_server.py [new file with mode: 0644]
sdk/python/tests/test_api.py [new file with mode: 0644]
sdk/python/tests/test_arv_ls.py [new file with mode: 0644]
sdk/python/tests/test_arv_put.py [new file with mode: 0644]
sdk/python/tests/test_collections.py [new file with mode: 0644]
sdk/python/tests/test_keep_client.py [new file with mode: 0644]
sdk/python/tests/test_keep_locator.py [new file with mode: 0644]
sdk/python/tests/test_pipeline_template.py [new file with mode: 0644]
sdk/python/tests/test_retry.py [new file with mode: 0644]
sdk/python/tests/test_sdk.py [new file with mode: 0644]
sdk/python/tests/test_stream.py [new file with mode: 0644]
sdk/python/tests/test_util.py [new file with mode: 0644]
sdk/python/tests/test_websockets.py [new file with mode: 0644]
sdk/ruby/.gitignore [new file with mode: 0644]
sdk/ruby/Gemfile [new file with mode: 0644]
sdk/ruby/README [new file with mode: 0644]
sdk/ruby/Rakefile [new file with mode: 0644]
sdk/ruby/arvados.gemspec [new file with mode: 0644]
sdk/ruby/lib/arvados.rb [new file with mode: 0644]
sdk/ruby/lib/arvados/keep.rb [new file with mode: 0644]
sdk/ruby/test/test_big_request.rb [new file with mode: 0644]
sdk/ruby/test/test_keep_manifest.rb [new file with mode: 0644]
services/api/.gitignore [new file with mode: 0644]
services/api/Gemfile [new file with mode: 0644]
services/api/Gemfile.lock [new file with mode: 0644]
services/api/README [new file with mode: 0644]
services/api/Rakefile [new file with mode: 0644]
services/api/app/assets/images/logo.png [new file with mode: 0644]
services/api/app/assets/images/rails.png [new file with mode: 0644]
services/api/app/assets/stylesheets/api_client_authorizations.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/api_clients.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/application.css [new file with mode: 0644]
services/api/app/assets/stylesheets/authorized_keys.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/collections.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/commit_ancestors.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/commits.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/groups.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/humans.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/job_tasks.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/jobs.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/keep_disks.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/links.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/logs.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/nodes.css [new file with mode: 0644]
services/api/app/assets/stylesheets/nodes.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/pipeline_instances.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/pipeline_templates.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/repositories.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/scaffolds.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/specimens.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/traits.css.scss [new file with mode: 0644]
services/api/app/assets/stylesheets/virtual_machines.css.scss [new file with mode: 0644]
services/api/app/controllers/application_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/api_clients_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/authorized_keys_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/collections_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/groups_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/humans_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/job_tasks_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/jobs_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/keep_disks_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/keep_services_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/links_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/logs_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/nodes_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/repositories_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/schema_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/specimens_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/traits_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/user_agreements_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/users_controller.rb [new file with mode: 0644]
services/api/app/controllers/arvados/v1/virtual_machines_controller.rb [new file with mode: 0644]
services/api/app/controllers/database_controller.rb [new file with mode: 0644]
services/api/app/controllers/static_controller.rb [new file with mode: 0644]
services/api/app/controllers/user_sessions_controller.rb [new file with mode: 0644]
services/api/app/helpers/api_client_authorizations_helper.rb [new file with mode: 0644]
services/api/app/helpers/api_clients_helper.rb [new file with mode: 0644]
services/api/app/helpers/application_helper.rb [new file with mode: 0644]
services/api/app/helpers/authorized_keys_helper.rb [new file with mode: 0644]
services/api/app/helpers/collections_helper.rb [new file with mode: 0644]
services/api/app/helpers/commit_ancestors_helper.rb [new file with mode: 0644]
services/api/app/helpers/commits_helper.rb [new file with mode: 0644]
services/api/app/helpers/groups_helper.rb [new file with mode: 0644]
services/api/app/helpers/humans_helper.rb [new file with mode: 0644]
services/api/app/helpers/job_tasks_helper.rb [new file with mode: 0644]
services/api/app/helpers/jobs_helper.rb [new file with mode: 0644]
services/api/app/helpers/keep_disks_helper.rb [new file with mode: 0644]
services/api/app/helpers/links_helper.rb [new file with mode: 0644]
services/api/app/helpers/logs_helper.rb [new file with mode: 0644]
services/api/app/helpers/nodes_helper.rb [new file with mode: 0644]
services/api/app/helpers/pipeline_instances_helper.rb [new file with mode: 0644]
services/api/app/helpers/pipeline_templates_helper.rb [new file with mode: 0644]
services/api/app/helpers/repositories_helper.rb [new file with mode: 0644]
services/api/app/helpers/specimens_helper.rb [new file with mode: 0644]
services/api/app/helpers/traits_helper.rb [new file with mode: 0644]
services/api/app/helpers/virtual_machines_helper.rb [new file with mode: 0644]
services/api/app/mailers/.gitkeep [new file with mode: 0644]
services/api/app/mailers/admin_notifier.rb [new file with mode: 0644]
services/api/app/mailers/profile_notifier.rb [new file with mode: 0644]
services/api/app/mailers/user_notifier.rb [new file with mode: 0644]
services/api/app/middlewares/arvados_api_token.rb [new file with mode: 0644]
services/api/app/middlewares/rack_socket.rb [new file with mode: 0644]
services/api/app/models/.gitkeep [new file with mode: 0644]
services/api/app/models/api_client.rb [new file with mode: 0644]
services/api/app/models/api_client_authorization.rb [new file with mode: 0644]
services/api/app/models/arvados_model.rb [new file with mode: 0644]
services/api/app/models/authorized_key.rb [new file with mode: 0644]
services/api/app/models/blob.rb [new file with mode: 0644]
services/api/app/models/collection.rb [new file with mode: 0644]
services/api/app/models/commit.rb [new file with mode: 0644]
services/api/app/models/commit_ancestor.rb [new file with mode: 0644]
services/api/app/models/database_seeds.rb [new file with mode: 0644]
services/api/app/models/group.rb [new file with mode: 0644]
services/api/app/models/human.rb [new file with mode: 0644]
services/api/app/models/job.rb [new file with mode: 0644]
services/api/app/models/job_task.rb [new file with mode: 0644]
services/api/app/models/keep_disk.rb [new file with mode: 0644]
services/api/app/models/keep_service.rb [new file with mode: 0644]
services/api/app/models/link.rb [new file with mode: 0644]
services/api/app/models/log.rb [new file with mode: 0644]
services/api/app/models/node.rb [new file with mode: 0644]
services/api/app/models/pipeline_instance.rb [new file with mode: 0644]
services/api/app/models/pipeline_template.rb [new file with mode: 0644]
services/api/app/models/repository.rb [new file with mode: 0644]
services/api/app/models/specimen.rb [new file with mode: 0644]
services/api/app/models/trait.rb [new file with mode: 0644]
services/api/app/models/user.rb [new file with mode: 0644]
services/api/app/models/user_agreement.rb [new file with mode: 0644]
services/api/app/models/virtual_machine.rb [new file with mode: 0644]
services/api/app/views/admin_notifier/new_inactive_user.text.erb [new file with mode: 0644]
services/api/app/views/admin_notifier/new_user.text.erb [new file with mode: 0644]
services/api/app/views/layouts/application.html.erb [new file with mode: 0644]
services/api/app/views/profile_notifier/profile_created.text.erb [new file with mode: 0644]
services/api/app/views/static/intro.html.erb [new file with mode: 0644]
services/api/app/views/static/login_failure.html.erb [new file with mode: 0644]
services/api/app/views/user_notifier/account_is_setup.text.erb [new file with mode: 0644]
services/api/app/views/user_sessions/failure.html.erb [new file with mode: 0644]
services/api/config.ru [new file with mode: 0644]
services/api/config/application.default.yml [new file with mode: 0644]
services/api/config/application.rb [new file with mode: 0644]
services/api/config/application.yml.example [new file with mode: 0644]
services/api/config/boot.rb [new file with mode: 0644]
services/api/config/database.yml.sample [new file with mode: 0644]
services/api/config/environment.rb [new file with mode: 0644]
services/api/config/environments/development.rb.example [new file with mode: 0644]
services/api/config/environments/production.rb.example [new file with mode: 0644]
services/api/config/environments/test.rb.example [new file with mode: 0644]
services/api/config/initializers/andand.rb [new file with mode: 0644]
services/api/config/initializers/authorization.rb [new file with mode: 0644]
services/api/config/initializers/backtrace_silencers.rb [new file with mode: 0644]
services/api/config/initializers/common_api_template.rb [new file with mode: 0644]
services/api/config/initializers/current_api_client.rb [new file with mode: 0644]
services/api/config/initializers/eventbus.rb [new file with mode: 0644]
services/api/config/initializers/hardcoded_api_tokens.rb.example [new file with mode: 0644]
services/api/config/initializers/inflections.rb [new file with mode: 0644]
services/api/config/initializers/kind_and_etag.rb [new file with mode: 0644]
services/api/config/initializers/mime_types.rb [new file with mode: 0644]
services/api/config/initializers/net_http.rb [new file with mode: 0644]
services/api/config/initializers/omniauth.rb.example [new file with mode: 0644]
services/api/config/initializers/schema_discovery_cache.rb [new file with mode: 0644]
services/api/config/initializers/session_store.rb [new file with mode: 0644]
services/api/config/initializers/wrap_parameters.rb [new file with mode: 0644]
services/api/config/initializers/zz_load_config.rb [new file with mode: 0644]
services/api/config/initializers/zz_preload_all_models.rb [new file with mode: 0644]
services/api/config/locales/en.yml [new file with mode: 0644]
services/api/config/routes.rb [new file with mode: 0644]
services/api/config/unbound.template [new file with mode: 0644]
services/api/db/migrate/20121016005009_create_collections.rb [new file with mode: 0644]
services/api/db/migrate/20130105203021_create_metadata.rb [new file with mode: 0644]
services/api/db/migrate/20130105224358_rename_metadata_class.rb [new file with mode: 0644]
services/api/db/migrate/20130105224618_rename_collection_created_by_client.rb [new file with mode: 0644]
services/api/db/migrate/20130107181109_add_uuid_to_collections.rb [new file with mode: 0644]
services/api/db/migrate/20130107212832_create_nodes.rb [new file with mode: 0644]
services/api/db/migrate/20130109175700_create_pipelines.rb [new file with mode: 0644]
services/api/db/migrate/20130109220548_create_pipeline_invocations.rb [new file with mode: 0644]
services/api/db/migrate/20130113214204_add_index_to_collections_and_metadata.rb [new file with mode: 0644]
services/api/db/migrate/20130116024233_create_specimens.rb [new file with mode: 0644]
services/api/db/migrate/20130116215213_create_projects.rb [new file with mode: 0644]
services/api/db/migrate/20130118002239_rename_metadata_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20130122020042_create_users.rb [new file with mode: 0644]
services/api/db/migrate/20130122201442_create_logs.rb [new file with mode: 0644]
services/api/db/migrate/20130122221616_add_modified_at_to_logs.rb [new file with mode: 0644]
services/api/db/migrate/20130123174514_add_uuid_index_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20130123180224_create_api_clients.rb [new file with mode: 0644]
services/api/db/migrate/20130123180228_create_api_client_authorizations.rb [new file with mode: 0644]
services/api/db/migrate/20130125220425_rename_created_by_to_owner.rb [new file with mode: 0644]
services/api/db/migrate/20130128202518_rename_metadata_to_links.rb [new file with mode: 0644]
services/api/db/migrate/20130128231343_add_properties_to_specimen.rb [new file with mode: 0644]
services/api/db/migrate/20130130205749_add_manifest_text_to_collection.rb [new file with mode: 0644]
services/api/db/migrate/20130203104818_create_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130203104824_create_job_steps.rb [new file with mode: 0644]
services/api/db/migrate/20130203115329_add_priority_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130207195855_add_index_on_timestamps.rb [new file with mode: 0644]
services/api/db/migrate/20130218181504_add_properties_to_pipeline_invocations.rb [new file with mode: 0644]
services/api/db/migrate/20130226170000_remove_native_target_from_links.rb [new file with mode: 0644]
services/api/db/migrate/20130313175417_rename_projects_to_groups.rb [new file with mode: 0644]
services/api/db/migrate/20130315155820_add_is_locked_by_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130315183626_add_log_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130315213205_add_tasks_summary_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130318002138_add_resource_limits_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20130319165853_rename_job_command_to_script.rb [new file with mode: 0644]
services/api/db/migrate/20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb [new file with mode: 0644]
services/api/db/migrate/20130319194637_rename_pipelines_to_pipeline_templates.rb [new file with mode: 0644]
services/api/db/migrate/20130319201431_rename_job_steps_to_job_tasks.rb [new file with mode: 0644]
services/api/db/migrate/20130319235957_add_default_owner_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20130320000107_add_default_owner_to_api_client_authorizations.rb [new file with mode: 0644]
services/api/db/migrate/20130326173804_create_commits.rb [new file with mode: 0644]
services/api/db/migrate/20130326182917_create_commit_ancestors.rb [new file with mode: 0644]
services/api/db/migrate/20130415020241_rename_orvos_to_arvados.rb [new file with mode: 0644]
services/api/db/migrate/20130425024459_create_keep_disks.rb [new file with mode: 0644]
services/api/db/migrate/20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb [new file with mode: 0644]
services/api/db/migrate/20130523060112_add_created_by_job_task_to_job_tasks.rb [new file with mode: 0644]
services/api/db/migrate/20130523060213_add_qsequence_to_job_tasks.rb [new file with mode: 0644]
services/api/db/migrate/20130524042319_fix_job_task_qsequence_type.rb [new file with mode: 0644]
services/api/db/migrate/20130528134100_update_nodes_index.rb [new file with mode: 0644]
services/api/db/migrate/20130606183519_create_authorized_keys.rb [new file with mode: 0644]
services/api/db/migrate/20130608053730_create_virtual_machines.rb [new file with mode: 0644]
services/api/db/migrate/20130610202538_create_repositories.rb [new file with mode: 0644]
services/api/db/migrate/20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb [new file with mode: 0644]
services/api/db/migrate/20130612042554_add_name_unique_index_to_repositories.rb [new file with mode: 0644]
services/api/db/migrate/20130617150007_add_is_trusted_to_api_clients.rb [new file with mode: 0644]
services/api/db/migrate/20130626002829_add_is_active_to_users.rb [new file with mode: 0644]
services/api/db/migrate/20130626022810_activate_all_admins.rb [new file with mode: 0644]
services/api/db/migrate/20130627154537_create_traits.rb [new file with mode: 0644]
services/api/db/migrate/20130627184333_create_humans.rb [new file with mode: 0644]
services/api/db/migrate/20130708163414_rename_foreign_uuid_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20130708182912_rename_job_foreign_uuid_attributes.rb [new file with mode: 0644]
services/api/db/migrate/20130708185153_rename_user_default_owner.rb [new file with mode: 0644]
services/api/db/migrate/20130724153034_add_scopes_to_api_client_authorizations.rb [new file with mode: 0644]
services/api/db/migrate/20131007180607_rename_resource_limits_to_runtime_constraints.rb [new file with mode: 0644]
services/api/db/migrate/20140117231056_normalize_collection_uuid.rb [new file with mode: 0644]
services/api/db/migrate/20140124222114_fix_link_kind_underscores.rb [new file with mode: 0644]
services/api/db/migrate/20140129184311_normalize_collection_uuids_in_script_parameters.rb [new file with mode: 0644]
services/api/db/migrate/20140317135600_add_nondeterministic_column_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140319160547_separate_repository_from_script_version.rb [new file with mode: 0644]
services/api/db/migrate/20140321191343_add_repository_column_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140324024606_add_output_is_persistent_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140325175653_remove_kind_columns.rb [new file with mode: 0644]
services/api/db/migrate/20140402001908_add_system_group.rb [new file with mode: 0644]
services/api/db/migrate/20140407184311_rename_log_info_to_properties.rb [new file with mode: 0644]
services/api/db/migrate/20140421140924_add_group_class_to_groups.rb [new file with mode: 0644]
services/api/db/migrate/20140421151939_rename_auth_keys_user_index.rb [new file with mode: 0644]
services/api/db/migrate/20140421151940_timestamps_not_null.rb [new file with mode: 0644]
services/api/db/migrate/20140422011506_pipeline_instance_state.rb [new file with mode: 0644]
services/api/db/migrate/20140423132913_add_object_owner_to_logs.rb [new file with mode: 0644]
services/api/db/migrate/20140423133559_new_scope_format.rb [new file with mode: 0644]
services/api/db/migrate/20140501165548_add_unique_name_index_to_links.rb [new file with mode: 0644]
services/api/db/migrate/20140519205916_create_keep_services.rb [new file with mode: 0644]
services/api/db/migrate/20140527152921_add_description_to_pipeline_templates.rb [new file with mode: 0644]
services/api/db/migrate/20140530200539_add_supplied_script_version.rb [new file with mode: 0644]
services/api/db/migrate/20140601022548_remove_name_from_collections.rb [new file with mode: 0644]
services/api/db/migrate/20140602143352_remove_active_and_success_from_pipeline_instances.rb [new file with mode: 0644]
services/api/db/migrate/20140607150616_rename_folder_to_project.rb [new file with mode: 0644]
services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20140627210837_anonymous_group.rb [new file with mode: 0644]
services/api/db/migrate/20140709172343_job_task_serial_qsequence.rb [new file with mode: 0644]
services/api/db/migrate/20140714184006_empty_collection.rb [new file with mode: 0644]
services/api/db/migrate/20140811184643_collection_use_regular_uuids.rb [new file with mode: 0644]
services/api/db/migrate/20140817035914_add_unique_name_constraints.rb [new file with mode: 0644]
services/api/db/migrate/20140818125735_add_not_null_constraint_to_group_name.rb [new file with mode: 0644]
services/api/db/migrate/20140826180337_remove_output_is_persistent_column.rb [new file with mode: 0644]
services/api/db/migrate/20140828141043_job_priority_fixup.rb [new file with mode: 0644]
services/api/db/migrate/20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb [new file with mode: 0644]
services/api/db/migrate/20140911221252_add_description_to_pipeline_instances_and_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20140918141529_change_user_owner_uuid_not_null.rb [new file with mode: 0644]
services/api/db/migrate/20140918153541_add_properties_to_node.rb [new file with mode: 0644]
services/api/db/migrate/20140918153705_add_state_to_job.rb [new file with mode: 0644]
services/api/db/migrate/20140924091559_add_job_uuid_to_nodes.rb [new file with mode: 0644]
services/api/db/migrate/20141111133038_add_arvados_sdk_version_to_jobs.rb [new file with mode: 0644]
services/api/db/migrate/20141208164553_owner_uuid_index.rb [new file with mode: 0644]
services/api/db/seeds.rb [new file with mode: 0644]
services/api/db/structure.sql [new file with mode: 0644]
services/api/lib/assets/.gitkeep [new file with mode: 0644]
services/api/lib/can_be_an_owner.rb [new file with mode: 0644]
services/api/lib/common_api_template.rb [new file with mode: 0644]
services/api/lib/current_api_client.rb [new file with mode: 0644]
services/api/lib/eventbus.rb [new file with mode: 0644]
services/api/lib/has_uuid.rb [new file with mode: 0644]
services/api/lib/josh_id.rb [new file with mode: 0644]
services/api/lib/kind_and_etag.rb [new file with mode: 0644]
services/api/lib/load_param.rb [new file with mode: 0644]
services/api/lib/record_filters.rb [new file with mode: 0644]
services/api/lib/simulate_job_log.rb [new file with mode: 0644]
services/api/lib/tasks/.gitkeep [new file with mode: 0644]
services/api/lib/tasks/config_check.rake [new file with mode: 0644]
services/api/lib/tasks/replay_job_log.rake [new file with mode: 0644]
services/api/log/.gitkeep [new file with mode: 0644]
services/api/public/404.html [new file with mode: 0644]
services/api/public/422.html [new file with mode: 0644]
services/api/public/500.html [new file with mode: 0644]
services/api/public/favicon.ico [new file with mode: 0644]
services/api/public/robots.txt [new file with mode: 0644]
services/api/script/cancel_stale_jobs.rb [new file with mode: 0755]
services/api/script/create_superuser_token.rb [new file with mode: 0755]
services/api/script/crunch-dispatch.rb [new file with mode: 0755]
services/api/script/crunch_failure_report.py [new file with mode: 0755]
services/api/script/get_anonymous_user_token.rb [new file with mode: 0755]
services/api/script/rails [new file with mode: 0755]
services/api/script/rake_test.sh [new file with mode: 0755]
services/api/script/restart-dns-server [new file with mode: 0755]
services/api/script/setup-new-user.rb [new file with mode: 0755]
services/api/test/factories/api_client.rb [new file with mode: 0644]
services/api/test/factories/api_client_authorization.rb [new file with mode: 0644]
services/api/test/factories/group.rb [new file with mode: 0644]
services/api/test/factories/link.rb [new file with mode: 0644]
services/api/test/factories/user.rb [new file with mode: 0644]
services/api/test/fixtures/.gitkeep [new file with mode: 0644]
services/api/test/fixtures/api_client_authorizations.yml [new file with mode: 0644]
services/api/test/fixtures/api_clients.yml [new file with mode: 0644]
services/api/test/fixtures/authorized_keys.yml [new file with mode: 0644]
services/api/test/fixtures/collections.yml [new file with mode: 0644]
services/api/test/fixtures/groups.yml [new file with mode: 0644]
services/api/test/fixtures/jobs.yml [new file with mode: 0644]
services/api/test/fixtures/keep_disks.yml [new file with mode: 0644]
services/api/test/fixtures/keep_services.yml [new file with mode: 0644]
services/api/test/fixtures/links.yml [new file with mode: 0644]
services/api/test/fixtures/logs.yml [new file with mode: 0644]
services/api/test/fixtures/nodes.yml [new file with mode: 0644]
services/api/test/fixtures/pipeline_instances.yml [new file with mode: 0644]
services/api/test/fixtures/pipeline_templates.yml [new file with mode: 0644]
services/api/test/fixtures/repositories.yml [new file with mode: 0644]
services/api/test/fixtures/specimens.yml [new file with mode: 0644]
services/api/test/fixtures/traits.yml [new file with mode: 0644]
services/api/test/fixtures/users.yml [new file with mode: 0644]
services/api/test/fixtures/virtual_machines.yml [new file with mode: 0644]
services/api/test/functional/.gitkeep [new file with mode: 0644]
services/api/test/functional/application_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/authorized_keys_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/collections_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/commits_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/filters_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/groups_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/humans_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/job_reuse_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/job_tasks_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/jobs_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/keep_disks_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/keep_services_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/links_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/logs_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/nodes_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/repositories_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/schema_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/specimens_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/traits_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/user_agreements_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/users_controller_test.rb [new file with mode: 0644]
services/api/test/functional/arvados/v1/virtual_machines_controller_test.rb [new file with mode: 0644]
services/api/test/functional/database_controller_test.rb [new file with mode: 0644]
services/api/test/functional/user_sessions_controller_test.rb [new file with mode: 0644]
services/api/test/helpers/git_test_helper.rb [new file with mode: 0644]
services/api/test/helpers/users_test_helper.rb [new file with mode: 0644]
services/api/test/integration/.gitkeep [new file with mode: 0644]
services/api/test/integration/api_client_authorizations_api_test.rb [new file with mode: 0644]
services/api/test/integration/api_client_authorizations_scopes_test.rb [new file with mode: 0644]
services/api/test/integration/collections_api_test.rb [new file with mode: 0644]
services/api/test/integration/cross_origin_test.rb [new file with mode: 0644]
services/api/test/integration/crunch_dispatch_test.rb [new file with mode: 0644]
services/api/test/integration/database_reset_test.rb [new file with mode: 0644]
services/api/test/integration/errors_test.rb [new file with mode: 0644]
services/api/test/integration/groups_test.rb [new file with mode: 0644]
services/api/test/integration/jobs_api_test.rb [new file with mode: 0644]
services/api/test/integration/keep_proxy_test.rb [new file with mode: 0644]
services/api/test/integration/login_workflow_test.rb [new file with mode: 0644]
services/api/test/integration/permissions_test.rb [new file with mode: 0644]
services/api/test/integration/pipeline_test.rb [new file with mode: 0644]
services/api/test/integration/reader_tokens_test.rb [new file with mode: 0644]
services/api/test/integration/select_test.rb [new file with mode: 0644]
services/api/test/integration/serialized_encoding_test.rb [new file with mode: 0644]
services/api/test/integration/user_sessions_test.rb [new file with mode: 0644]
services/api/test/integration/users_test.rb [new file with mode: 0644]
services/api/test/integration/valid_links_test.rb [new file with mode: 0644]
services/api/test/integration/websocket_test.rb [new file with mode: 0644]
services/api/test/job_logs/crunchstatshort.log [new file with mode: 0644]
services/api/test/performance/browsing_test.rb [new file with mode: 0644]
services/api/test/test.git.tar [new file with mode: 0644]
services/api/test/test_helper.rb [new file with mode: 0644]
services/api/test/unit/.gitkeep [new file with mode: 0644]
services/api/test/unit/api_client_authorization_test.rb [new file with mode: 0644]
services/api/test/unit/api_client_test.rb [new file with mode: 0644]
services/api/test/unit/application_test.rb [new file with mode: 0644]
services/api/test/unit/arvados_model_test.rb [new file with mode: 0644]
services/api/test/unit/authorized_key_test.rb [new file with mode: 0644]
services/api/test/unit/blob_test.rb [new file with mode: 0644]
services/api/test/unit/collection_test.rb [new file with mode: 0644]
services/api/test/unit/commit_ancestor_test.rb [new file with mode: 0644]
services/api/test/unit/commit_test.rb [new file with mode: 0644]
services/api/test/unit/group_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/api_client_authorizations_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/api_clients_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/authorized_keys_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/collections_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/commit_ancestors_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/commits_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/groups_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/humans_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/job_tasks_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/jobs_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/keep_disks_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/links_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/logs_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/nodes_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/pipeline_instances_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/pipeline_templates_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/repositories_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/specimens_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/traits_helper_test.rb [new file with mode: 0644]
services/api/test/unit/helpers/virtual_machines_helper_test.rb [new file with mode: 0644]
services/api/test/unit/human_test.rb [new file with mode: 0644]
services/api/test/unit/job_task_test.rb [new file with mode: 0644]
services/api/test/unit/job_test.rb [new file with mode: 0644]
services/api/test/unit/keep_disk_test.rb [new file with mode: 0644]
services/api/test/unit/keep_service_test.rb [new file with mode: 0644]
services/api/test/unit/link_test.rb [new file with mode: 0644]
services/api/test/unit/log_test.rb [new file with mode: 0644]
services/api/test/unit/node_test.rb [new file with mode: 0644]
services/api/test/unit/owner_test.rb [new file with mode: 0644]
services/api/test/unit/permission_test.rb [new file with mode: 0644]
services/api/test/unit/pipeline_instance_test.rb [new file with mode: 0644]
services/api/test/unit/pipeline_template_test.rb [new file with mode: 0644]
services/api/test/unit/repository_test.rb [new file with mode: 0644]
services/api/test/unit/specimen_test.rb [new file with mode: 0644]
services/api/test/unit/trait_test.rb [new file with mode: 0644]
services/api/test/unit/user_notifier_test.rb [new file with mode: 0644]
services/api/test/unit/user_test.rb [new file with mode: 0644]
services/api/test/unit/virtual_machine_test.rb [new file with mode: 0644]
services/api/test/websocket_runner.rb [new file with mode: 0644]
services/api/vendor/assets/stylesheets/.gitkeep [new file with mode: 0644]
services/api/vendor/plugins/.gitkeep [new file with mode: 0644]
services/crunch/crunch-job [new symlink]
services/crunchstat/crunchstat.go [new file with mode: 0644]
services/crunchstat/crunchstat_test.go [new file with mode: 0644]
services/datamanager/experimental/datamanager.py [new file with mode: 0755]
services/datamanager/experimental/datamanager_test.py [new file with mode: 0755]
services/fuse/.gitignore [new symlink]
services/fuse/MANIFEST.in [new file with mode: 0644]
services/fuse/README.rst [new file with mode: 0644]
services/fuse/arvados_fuse/__init__.py [new file with mode: 0644]
services/fuse/bin/arv-mount [new file with mode: 0755]
services/fuse/setup.py [new file with mode: 0644]
services/fuse/tests/__init__.py [new file with mode: 0644]
services/fuse/tests/run_test_server.py [new symlink]
services/fuse/tests/test_mount.py [new file with mode: 0644]
services/keep/tools/traffic_test.py [new file with mode: 0755]
services/keepproxy/keepproxy.go [new file with mode: 0644]
services/keepproxy/keepproxy_test.go [new file with mode: 0644]
services/keepstore/handler_test.go [new file with mode: 0644]
services/keepstore/handlers.go [new file with mode: 0644]
services/keepstore/keepstore.go [new file with mode: 0644]
services/keepstore/keepstore_test.go [new file with mode: 0644]
services/keepstore/logging_router.go [new file with mode: 0644]
services/keepstore/perms.go [new file with mode: 0644]
services/keepstore/perms_test.go [new file with mode: 0644]
services/keepstore/volume.go [new file with mode: 0644]
services/keepstore/volume_unix.go [new file with mode: 0644]
services/keepstore/volume_unix_test.go [new file with mode: 0644]
services/keepstore/work_queue.go [new file with mode: 0644]
services/keepstore/work_queue_test.go [new file with mode: 0644]
services/nodemanager/.gitignore [new symlink]
services/nodemanager/README.rst [new file with mode: 0644]
services/nodemanager/arvnodeman/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/clientactor.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/dispatch/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/dispatch/slurm.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/__init__.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/dummy.py [new file with mode: 0644]
services/nodemanager/arvnodeman/computenode/driver/ec2.py [new file with mode: 0644]
services/nodemanager/arvnodeman/config.py [new file with mode: 0644]
services/nodemanager/arvnodeman/daemon.py [new file with mode: 0644]
services/nodemanager/arvnodeman/jobqueue.py [new file with mode: 0644]
services/nodemanager/arvnodeman/launcher.py [new file with mode: 0644]
services/nodemanager/arvnodeman/nodelist.py [new file with mode: 0644]
services/nodemanager/arvnodeman/timedcallback.py [new file with mode: 0644]
services/nodemanager/bin/arvados-node-manager [new file with mode: 0644]
services/nodemanager/doc/ec2.example.cfg [new file with mode: 0644]
services/nodemanager/doc/local.example.cfg [new file with mode: 0644]
services/nodemanager/setup.py [new file with mode: 0644]
services/nodemanager/tests/__init__.py [new file with mode: 0644]
services/nodemanager/tests/test_clientactor.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_dispatch.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_dispatch_slurm.py [new file with mode: 0644]
services/nodemanager/tests/test_computenode_driver_ec2.py [new file with mode: 0644]
services/nodemanager/tests/test_config.py [new file with mode: 0644]
services/nodemanager/tests/test_daemon.py [new file with mode: 0644]
services/nodemanager/tests/test_jobqueue.py [new file with mode: 0644]
services/nodemanager/tests/test_nodelist.py [new file with mode: 0644]
services/nodemanager/tests/test_timedcallback.py [new file with mode: 0644]
services/nodemanager/tests/testutil.py [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..8cc6b89
--- /dev/null
@@ -0,0 +1,19 @@
+.bundle
+.rvmrc
+*~
+*.pyc
+docker/*/generated
+docker/config.yml
+doc/.site
+doc/sdk/python/arvados
+sdk/perl/MYMETA.*
+sdk/perl/Makefile
+sdk/perl/blib
+sdk/perl/pm_to_blib
+*/vendor/bundle
+sdk/java/target
+*.class
+apps/workbench/vendor/bundle
+services/api/vendor/bundle
+sdk/java/log
+sdk/cli/vendor
diff --git a/COPYING b/COPYING
new file mode 100644 (file)
index 0000000..4006e68
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,11 @@
+Server-side components of Arvados contained in the apps/ and services/
+directories, including the API Server, Workbench, and Crunch, are licenced
+under the GNU Affero General Public License version 3 (see agpl-3.0.txt)
+
+The Arvados client Software Development Kits contained in the sdk/ directory,
+example scripts in the crunch_scripts/ directory, and code samples in the
+Aravados documentation are licensed under the Apache License, Version 2.0 (see
+LICENSE-2.0.txt)
+
+The Arvados Documentation located in the doc/ directory is licensed under the
+Creative Commons Attribution-Share Alike 3.0 United States (see by-sa-3.0.txt)
\ No newline at end of file
diff --git a/LICENSE-2.0.txt b/LICENSE-2.0.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/README b/README
index 826092d447b14b04df32da905dd13f5179b8df80..c7a36c355b4a2b94dfab45c9748330022a788c91 100644 (file)
--- a/README
+++ b/README
@@ -1,28 +1,21 @@
-Run a web service from Arvados.
+Welcome to Arvados!
 
-usage: arv-web.py [-h] --project PROJECT [--port PORT] --image IMAGE
+The main Arvados web site is 
+  https://arvados.org
 
-optional arguments:
-  -h, --help         show this help message and exit
-  --project PROJECT  Project to watch
-  --port PORT        Local bind port
-  --image IMAGE      Docker image to run
+The Arvados public wiki is located at 
+  https://arvados.org/projects/arvados/wiki
 
+The Arvados public bug tracker is located at 
+  https://arvados.org/projects/arvados/issues
 
-This queries an Arvados project and FUSE mounts the most recently modified
-collection into a temporary directory.  It then runs the supplied Docker image
-with the collection bind mounted to /mnt inside the container.
+For support see 
+  http://doc.arvados.org/user/getting_started/community.html
 
-When a new collection is added to the project, or an existing project is
-updated, it will detect the change, it will stop the running Docker container,
-unmount the old collection, mount the new most recently modified collection,
-and restart the Docker container with the new mount.
+Installation documentation is located at 
+  http://doc.arvados.org/install
 
-The supplied Dockerfile builds a Docker image that runs Apache with /mnt as the
-DocumentRoot.  It is configured to run web applications based on Python WSGI,
-Ruby Rack, CGI, to serve static HTML, or simply browse the contents of the
-/public subdirectory of the collection using Apache's default index pages.
+If you wish to build the documentation yourself, follow the instructions in
+doc/README to build the documentation, then consult the "Install Guide".
 
-To build the Docker image:
-
-$ docker build -t arvados/arv-web .
+See COPYING for information about Arvados Free Software licenses.
diff --git a/agpl-3.0.txt b/agpl-3.0.txt
new file mode 100644 (file)
index 0000000..dba13ed
--- /dev/null
@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<http://www.gnu.org/licenses/>.
similarity index 100%
rename from Dockerfile
rename to apps/arv-web/Dockerfile
diff --git a/apps/arv-web/README b/apps/arv-web/README
new file mode 100644 (file)
index 0000000..826092d
--- /dev/null
@@ -0,0 +1,28 @@
+Run a web service from Arvados.
+
+usage: arv-web.py [-h] --project PROJECT [--port PORT] --image IMAGE
+
+optional arguments:
+  -h, --help         show this help message and exit
+  --project PROJECT  Project to watch
+  --port PORT        Local bind port
+  --image IMAGE      Docker image to run
+
+
+This queries an Arvados project and FUSE mounts the most recently modified
+collection into a temporary directory.  It then runs the supplied Docker image
+with the collection bind mounted to /mnt inside the container.
+
+When a new collection is added to the project, or an existing project is
+updated, it will detect the change, it will stop the running Docker container,
+unmount the old collection, mount the new most recently modified collection,
+and restart the Docker container with the new mount.
+
+The supplied Dockerfile builds a Docker image that runs Apache with /mnt as the
+DocumentRoot.  It is configured to run web applications based on Python WSGI,
+Ruby Rack, CGI, to serve static HTML, or simply browse the contents of the
+/public subdirectory of the collection using Apache's default index pages.
+
+To build the Docker image:
+
+$ docker build -t arvados/arv-web .
similarity index 100%
rename from apache2_vhost
rename to apps/arv-web/apache2_vhost
similarity index 100%
rename from arv-web.py
rename to apps/arv-web/arv-web.py
diff --git a/apps/workbench/.gitignore b/apps/workbench/.gitignore
new file mode 100644 (file)
index 0000000..24a7a84
--- /dev/null
@@ -0,0 +1,37 @@
+# Ignore the default SQLite database.
+/db/*.sqlite3
+
+# Ignore all logfiles and tempfiles.
+/log/*.log
+/tmp
+
+/config/.secret_token
+/config/initializers/secret_token.rb
+
+/public/assets
+
+/config/environments/development.rb
+/config/environments/production.rb
+/config/application.yml
+
+# Workbench doesn't need one anyway, so this shouldn't come up, but...
+/config/database.yml
+
+/config/piwik.yml
+
+# Capistrano files are coming from another repo
+/Capfile*
+/config/deploy*
+
+# Themes are coming from another repo
+/themes/*
+
+# This can be a symlink to ../../../doc/.site in dev setups
+/public/doc
+
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
diff --git a/apps/workbench/Gemfile b/apps/workbench/Gemfile
new file mode 100644 (file)
index 0000000..49f82f5
--- /dev/null
@@ -0,0 +1,102 @@
+source 'https://rubygems.org'
+
+gem 'rails', '~> 4.1.0'
+gem 'minitest', '>= 5.0.0'
+
+gem 'arvados', '>= 0.1.20141114230720'
+
+# Bundle edge Rails instead:
+# gem 'rails', :git => 'git://github.com/rails/rails.git'
+
+gem 'sqlite3'
+
+gem 'multi_json'
+gem 'oj'
+gem 'sass'
+
+# Note: keeping this out of the "group :assets" section "may" allow us
+# to use Coffescript for UJS responses. It also prevents a
+# warning/problem when running tests: "WARN: tilt autoloading
+# 'coffee_script' in a non thread-safe way; explicit require
+# 'coffee_script' suggested."
+gem 'coffee-rails'
+
+# Gems used only for assets and not required
+# in production environments by default.
+group :assets do
+  gem 'sass-rails', '~> 4.0.4'
+
+  # See https://github.com/sstephenson/execjs#readme for more supported runtimes
+  gem 'therubyracer', :platforms => :ruby
+
+  gem 'uglifier', '>= 1.0.3'
+end
+
+group :development do
+  gem 'byebug'
+  gem 'ruby-debug-passenger'
+end
+
+group :test, :diagnostics do
+  gem 'selenium-webdriver'
+  gem 'capybara'
+  gem 'poltergeist'
+  gem 'headless'
+end
+
+group :test, :performance do
+  gem 'rails-perftest'
+  gem 'ruby-prof'
+end
+
+group :test do
+  gem 'rvm-capistrano'
+  # Note: "require: false" here tells bunder not to automatically
+  # 'require' the packages during application startup. Installation is
+  # still mandatory.
+  gem 'simplecov', '~> 0.7.1', require: false
+  gem 'simplecov-rcov', require: false
+  gem 'mocha', require: false
+end
+
+gem 'jquery-rails'
+gem 'bootstrap-sass', '~> 3.1.0'
+gem 'bootstrap-x-editable-rails'
+gem 'bootstrap-tab-history-rails'
+
+gem 'angularjs-rails'
+
+gem 'less'
+gem 'less-rails'
+gem 'wiselinks'
+gem 'sshkey'
+
+# To use ActiveModel has_secure_password
+# gem 'bcrypt-ruby', '~> 3.0.0'
+
+# To use Jbuilder templates for JSON
+# gem 'jbuilder'
+
+# Use unicorn as the app server
+# gem 'unicorn'
+
+# Deploy with Capistrano
+# gem 'capistrano'
+
+# To use debugger
+#gem 'byebug'
+
+gem 'passenger', :group => :production
+gem 'andand'
+gem 'RedCloth'
+
+gem 'piwik_analytics'
+gem 'httpclient', '~> 2.5.0'
+
+# This fork has Rails 4 compatible routes
+gem 'themes_for_rails', git: 'https://github.com/holtkampw/themes_for_rails', ref: '1fd2d7897d75ae0d6375f4c390df87b8e91ad417'
+
+gem "deep_merge", :require => 'deep_merge/rails_compat'
+
+gem 'morrisjs-rails'
+gem 'raphael-rails'
diff --git a/apps/workbench/Gemfile.lock b/apps/workbench/Gemfile.lock
new file mode 100644 (file)
index 0000000..d29c16e
--- /dev/null
@@ -0,0 +1,284 @@
+GIT
+  remote: https://github.com/holtkampw/themes_for_rails
+  revision: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
+  ref: 1fd2d7897d75ae0d6375f4c390df87b8e91ad417
+  specs:
+    themes_for_rails (0.5.1)
+      rails (>= 3.0.0)
+
+GEM
+  remote: https://rubygems.org/
+  specs:
+    RedCloth (4.2.9)
+    actionmailer (4.1.8)
+      actionpack (= 4.1.8)
+      actionview (= 4.1.8)
+      mail (~> 2.5, >= 2.5.4)
+    actionpack (4.1.8)
+      actionview (= 4.1.8)
+      activesupport (= 4.1.8)
+      rack (~> 1.5.2)
+      rack-test (~> 0.6.2)
+    actionview (4.1.8)
+      activesupport (= 4.1.8)
+      builder (~> 3.1)
+      erubis (~> 2.7.0)
+    activemodel (4.1.8)
+      activesupport (= 4.1.8)
+      builder (~> 3.1)
+    activerecord (4.1.8)
+      activemodel (= 4.1.8)
+      activesupport (= 4.1.8)
+      arel (~> 5.0.0)
+    activesupport (4.1.8)
+      i18n (~> 0.6, >= 0.6.9)
+      json (~> 1.7, >= 1.7.7)
+      minitest (~> 5.1)
+      thread_safe (~> 0.1)
+      tzinfo (~> 1.1)
+    addressable (2.3.6)
+    andand (1.3.3)
+    angularjs-rails (1.3.3)
+    arel (5.0.1.20140414130214)
+    arvados (0.1.20141114230720)
+      activesupport (>= 3.2.13)
+      andand (~> 1.3, >= 1.3.3)
+      google-api-client (~> 0.6.3, >= 0.6.3)
+      json (~> 1.7, >= 1.7.7)
+      jwt (>= 0.1.5, < 1.0.0)
+    autoparse (0.3.3)
+      addressable (>= 2.3.1)
+      extlib (>= 0.9.15)
+      multi_json (>= 1.0.0)
+    bootstrap-sass (3.1.1.1)
+      sass (~> 3.2)
+    bootstrap-tab-history-rails (0.1.0)
+      railties (>= 3.1)
+    bootstrap-x-editable-rails (1.5.1.1)
+      railties (>= 3.0)
+    builder (3.2.2)
+    byebug (3.5.1)
+      columnize (~> 0.8)
+      debugger-linecache (~> 1.2)
+      slop (~> 3.6)
+    capistrano (2.15.5)
+      highline
+      net-scp (>= 1.0.0)
+      net-sftp (>= 2.0.0)
+      net-ssh (>= 2.0.14)
+      net-ssh-gateway (>= 1.1.0)
+    capybara (2.4.4)
+      mime-types (>= 1.16)
+      nokogiri (>= 1.3.3)
+      rack (>= 1.0.0)
+      rack-test (>= 0.5.4)
+      xpath (~> 2.0)
+    childprocess (0.5.5)
+      ffi (~> 1.0, >= 1.0.11)
+    cliver (0.3.2)
+    coffee-rails (4.1.0)
+      coffee-script (>= 2.2.0)
+      railties (>= 4.0.0, < 5.0)
+    coffee-script (2.3.0)
+      coffee-script-source
+      execjs
+    coffee-script-source (1.8.0)
+    columnize (0.8.9)
+    commonjs (0.2.7)
+    daemon_controller (1.2.0)
+    debugger-linecache (1.2.0)
+    deep_merge (1.0.1)
+    erubis (2.7.0)
+    execjs (2.2.2)
+    extlib (0.9.16)
+    faraday (0.8.9)
+      multipart-post (~> 1.2.0)
+    ffi (1.9.6)
+    google-api-client (0.6.4)
+      addressable (>= 2.3.2)
+      autoparse (>= 0.3.3)
+      extlib (>= 0.9.15)
+      faraday (~> 0.8.4)
+      jwt (>= 0.1.5)
+      launchy (>= 2.1.1)
+      multi_json (>= 1.0.0)
+      signet (~> 0.4.5)
+      uuidtools (>= 2.1.0)
+    headless (1.0.2)
+    highline (1.6.21)
+    hike (1.2.3)
+    httpclient (2.5.3.3)
+    i18n (0.6.11)
+    jquery-rails (3.1.2)
+      railties (>= 3.0, < 5.0)
+      thor (>= 0.14, < 2.0)
+    json (1.8.1)
+    jwt (0.1.13)
+      multi_json (>= 1.5)
+    launchy (2.4.3)
+      addressable (~> 2.3)
+    less (2.6.0)
+      commonjs (~> 0.2.7)
+    less-rails (2.6.0)
+      actionpack (>= 3.1)
+      less (~> 2.6.0)
+    libv8 (3.16.14.7)
+    mail (2.6.3)
+      mime-types (>= 1.16, < 3)
+    metaclass (0.0.4)
+    mime-types (2.4.3)
+    mini_portile (0.6.1)
+    minitest (5.4.3)
+    mocha (1.1.0)
+      metaclass (~> 0.0.1)
+    morrisjs-rails (0.5.1)
+      railties (> 3.1, < 5)
+    multi_json (1.10.1)
+    multipart-post (1.2.0)
+    net-scp (1.2.1)
+      net-ssh (>= 2.6.5)
+    net-sftp (2.1.2)
+      net-ssh (>= 2.6.5)
+    net-ssh (2.9.1)
+    net-ssh-gateway (1.2.0)
+      net-ssh (>= 2.6.5)
+    nokogiri (1.6.4.1)
+      mini_portile (~> 0.6.0)
+    oj (2.11.1)
+    passenger (4.0.53)
+      daemon_controller (>= 1.2.0)
+      rack
+      rake (>= 0.8.1)
+    piwik_analytics (1.0.2)
+      actionpack
+      activesupport
+      rails (>= 3.0.0)
+    poltergeist (1.5.1)
+      capybara (~> 2.1)
+      cliver (~> 0.3.1)
+      multi_json (~> 1.0)
+      websocket-driver (>= 0.2.0)
+    rack (1.5.2)
+    rack-test (0.6.2)
+      rack (>= 1.0)
+    rails (4.1.8)
+      actionmailer (= 4.1.8)
+      actionpack (= 4.1.8)
+      actionview (= 4.1.8)
+      activemodel (= 4.1.8)
+      activerecord (= 4.1.8)
+      activesupport (= 4.1.8)
+      bundler (>= 1.3.0, < 2.0)
+      railties (= 4.1.8)
+      sprockets-rails (~> 2.0)
+    rails-perftest (0.0.5)
+    railties (4.1.8)
+      actionpack (= 4.1.8)
+      activesupport (= 4.1.8)
+      rake (>= 0.8.7)
+      thor (>= 0.18.1, < 2.0)
+    rake (10.4.0)
+    raphael-rails (2.1.2)
+    ref (1.0.5)
+    ruby-debug-passenger (0.2.0)
+    ruby-prof (0.15.2)
+    rubyzip (1.1.6)
+    rvm-capistrano (1.5.5)
+      capistrano (~> 2.15.4)
+    sass (3.2.19)
+    sass-rails (4.0.4)
+      railties (>= 4.0.0, < 5.0)
+      sass (~> 3.2.2)
+      sprockets (~> 2.8, < 2.12)
+      sprockets-rails (~> 2.0)
+    selenium-webdriver (2.44.0)
+      childprocess (~> 0.5)
+      multi_json (~> 1.0)
+      rubyzip (~> 1.0)
+      websocket (~> 1.0)
+    signet (0.4.5)
+      addressable (>= 2.2.3)
+      faraday (~> 0.8.1)
+      jwt (>= 0.1.5)
+      multi_json (>= 1.0.0)
+    simplecov (0.7.1)
+      multi_json (~> 1.0)
+      simplecov-html (~> 0.7.1)
+    simplecov-html (0.7.1)
+    simplecov-rcov (0.2.3)
+      simplecov (>= 0.4.1)
+    slop (3.6.0)
+    sprockets (2.11.3)
+      hike (~> 1.2)
+      multi_json (~> 1.0)
+      rack (~> 1.0)
+      tilt (~> 1.1, != 1.3.0)
+    sprockets-rails (2.2.0)
+      actionpack (>= 3.0)
+      activesupport (>= 3.0)
+      sprockets (>= 2.8, < 4.0)
+    sqlite3 (1.3.10)
+    sshkey (1.6.1)
+    therubyracer (0.12.1)
+      libv8 (~> 3.16.14.0)
+      ref
+    thor (0.19.1)
+    thread_safe (0.3.4)
+    tilt (1.4.1)
+    tzinfo (1.2.2)
+      thread_safe (~> 0.1)
+    uglifier (2.5.3)
+      execjs (>= 0.3.0)
+      json (>= 1.8.0)
+    uuidtools (2.1.5)
+    websocket (1.2.1)
+    websocket-driver (0.4.0)
+    wiselinks (1.2.1)
+    xpath (2.0.0)
+      nokogiri (~> 1.3)
+
+PLATFORMS
+  ruby
+
+DEPENDENCIES
+  RedCloth
+  andand
+  angularjs-rails
+  arvados (>= 0.1.20141114230720)
+  bootstrap-sass (~> 3.1.0)
+  bootstrap-tab-history-rails
+  bootstrap-x-editable-rails
+  byebug
+  capybara
+  coffee-rails
+  deep_merge
+  headless
+  httpclient (~> 2.5.0)
+  jquery-rails
+  less
+  less-rails
+  minitest (>= 5.0.0)
+  mocha
+  morrisjs-rails
+  multi_json
+  oj
+  passenger
+  piwik_analytics
+  poltergeist
+  rails (~> 4.1.0)
+  rails-perftest
+  raphael-rails
+  ruby-debug-passenger
+  ruby-prof
+  rvm-capistrano
+  sass
+  sass-rails (~> 4.0.4)
+  selenium-webdriver
+  simplecov (~> 0.7.1)
+  simplecov-rcov
+  sqlite3
+  sshkey
+  themes_for_rails!
+  therubyracer
+  uglifier (>= 1.0.3)
+  wiselinks
diff --git a/apps/workbench/README.textile b/apps/workbench/README.textile
new file mode 100644 (file)
index 0000000..7991978
--- /dev/null
@@ -0,0 +1,23 @@
+h1. Developing Workbench
+
+This document includes information to help developers who would like to contribute to Workbench.  If you just want to install it, please refer to our "Workbench installation guide":http://doc.arvados.org/install/install-workbench-app.html.
+
+h2. Running tests
+
+The Workbench application includes a series of integration tests.  When you run these, it starts the API server in a test environment, with all of its fixtures loaded, then tests Workbench by starting that server and making requests against it.
+
+In order for this to work, you must have Firefox installed (or Iceweasel, if you're running Debian), as well as the X Virtual Frame Buffer driver.
+
+<pre>
+$ sudo apt-get install iceweasel xvfb
+</pre>
+
+If you install the Workbench Bundle in deployment mode, you must also install the API server Bundle in deployment mode, and vice versa.  If your Bundle installs have mismatched modes, the integration tests will fail with "Gem not found" errors.
+
+h2. Writing tests
+
+Integration tests are written with Capybara, which drives a fully-featured Web browser to interact with Workbench exactly as a user would.
+
+If your test requires JavaScript support, your test method should start with the line @Capybara.current_driver = Capybara.javascript_driver@.  Otherwise, Capybara defaults to a simpler browser for speed.
+
+In most tests, you can directly call "Capybara's Session methods":http://rubydoc.info/github/jnicklas/capybara/Capybara/Session to drive the browser and check its state.  If you need finer-grained control, refer to the "full Capybara documentation":http://rubydoc.info/github/jnicklas/capybara/Capybara.
diff --git a/apps/workbench/Rakefile b/apps/workbench/Rakefile
new file mode 100644 (file)
index 0000000..752f3d8
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env rake
+# Add your own tasks in files placed in lib/tasks ending in .rake,
+# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake.
+
+require File.expand_path('../config/application', __FILE__)
+
+ArvadosWorkbench::Application.load_tasks
diff --git a/apps/workbench/app/assets/images/dax.png b/apps/workbench/app/assets/images/dax.png
new file mode 100644 (file)
index 0000000..c511f0e
Binary files /dev/null and b/apps/workbench/app/assets/images/dax.png differ
diff --git a/apps/workbench/app/assets/images/rails.png b/apps/workbench/app/assets/images/rails.png
new file mode 100644 (file)
index 0000000..d5edc04
Binary files /dev/null and b/apps/workbench/app/assets/images/rails.png differ
diff --git a/apps/workbench/app/assets/images/spinner_32px.gif b/apps/workbench/app/assets/images/spinner_32px.gif
new file mode 100644 (file)
index 0000000..3288d10
Binary files /dev/null and b/apps/workbench/app/assets/images/spinner_32px.gif differ
diff --git a/apps/workbench/app/assets/javascripts/angular_shim.js b/apps/workbench/app/assets/javascripts/angular_shim.js
new file mode 100644 (file)
index 0000000..8665c73
--- /dev/null
@@ -0,0 +1,13 @@
+// Compile any new HTML content that was loaded via jQuery.ajax().
+// Currently this only works for tabs, and only because they emit an
+// arv:pane:loaded event after updating the DOM.
+
+$(document).on('arv:pane:loaded', function(event, $updatedElement) {
+    if (angular && $updatedElement) {
+        angular.element($updatedElement).injector().invoke([
+            '$compile', function($compile) {
+                var scope = angular.element($updatedElement).scope();
+                $compile($updatedElement)(scope);
+            }]);
+    }
+});
diff --git a/apps/workbench/app/assets/javascripts/application.js b/apps/workbench/app/assets/javascripts/application.js
new file mode 100644 (file)
index 0000000..63887b3
--- /dev/null
@@ -0,0 +1,240 @@
+// This is a manifest file that'll be compiled into application.js, which will include all the files
+// listed below.
+//
+// Any JavaScript/Coffee file within this directory, lib/assets/javascripts, vendor/assets/javascripts,
+// or vendor/assets/javascripts of plugins, if any, can be referenced here using a relative path.
+//
+// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
+// the compiled file.
+//
+// WARNING: THE FIRST BLANK LINE MARKS THE END OF WHAT'S TO BE PROCESSED, ANY BLANK LINE SHOULD
+// GO AFTER THE REQUIRES BELOW.
+//
+//= require jquery
+//= require jquery_ujs
+//= require bootstrap
+//= require bootstrap/dropdown
+//= require bootstrap/tab
+//= require bootstrap/tooltip
+//= require bootstrap/popover
+//= require bootstrap/collapse
+//= require bootstrap/modal
+//= require bootstrap/button
+//= require bootstrap3-editable/bootstrap-editable
+//= require bootstrap-tab-history
+//= require wiselinks
+//= require angular
+//= require raphael
+//= require morris
+//= require jquery.number.min
+//= require_tree .
+
+jQuery(function($){
+    $(document).ajaxStart(function(){
+      $('.modal-with-loading-spinner .spinner').show();
+    }).ajaxStop(function(){
+      $('.modal-with-loading-spinner .spinner').hide();
+    });
+
+    $('[data-toggle=tooltip]').tooltip();
+
+    $('.expand-collapse-row').on('click', function(event) {
+        var targets = $('#' + $(this).attr('data-id'));
+        if (targets.css('display') == 'none') {
+            $(this).addClass('icon-minus-sign');
+            $(this).removeClass('icon-plus-sign');
+        } else {
+            $(this).addClass('icon-plus-sign');
+            $(this).removeClass('icon-minus-sign');
+        }
+        targets.fadeToggle(200);
+    });
+
+    var ajaxCount = 0;
+
+    $(document).
+        on('ajax:send', function(e, xhr) {
+            ajaxCount += 1;
+            if (ajaxCount == 1) {
+                $('.loading').fadeTo('fast', 1);
+            }
+        }).
+        on('ajax:complete', function(e, status) {
+            ajaxCount -= 1;
+            if (ajaxCount == 0) {
+                $('.loading').fadeOut('fast', 0);
+            }
+        }).
+        on('ajaxSend', function(e, xhr) {
+            // jQuery triggers 'ajaxSend' event when starting an ajax call, but
+            // rails-generated ajax triggers generate 'ajax:send'.  Workbench
+            // event listeners currently expect 'ajax:send', so trigger the
+            // rails event in response to the jQuery one.
+            $(document).trigger('ajax:send');
+        }).
+        on('ajaxComplete', function(e, xhr) {
+            // See comment above about ajaxSend/ajax:send
+            $(document).trigger('ajax:complete');
+        }).
+        on('click', '.removable-tag a', function(e) {
+            var tag_span = $(this).parents('[data-tag-link-uuid]').eq(0)
+            tag_span.fadeTo('fast', 0.2);
+            $.ajax('/links/' + tag_span.attr('data-tag-link-uuid'),
+                   {dataType: 'json',
+                    type: 'POST',
+                    data: { '_method': 'DELETE' },
+                    context: tag_span}).
+                done(function(data, status, jqxhr) {
+                    this.remove();
+                }).
+                fail(function(jqxhr, status, error) {
+                    this.addClass('label-danger').fadeTo('fast', '1');
+                });
+            return false;
+        }).
+        on('click', 'a.add-tag-button', function(e) {
+            var jqxhr;
+            var new_tag_uuid = 'new-tag-' + Math.random();
+            var tag_head_uuid = $(this).parents('tr').attr('data-object-uuid');
+            var new_tag = window.prompt("Add tag for collection "+
+                                    tag_head_uuid,
+                                    "");
+            if (new_tag == null)
+                return false;
+            var new_tag_span =
+                $('<span class="label label-info removable-tag"></span>').
+                attr('data-tag-link-uuid', new_tag_uuid).
+                text(new_tag).
+                css('opacity', '0.2').
+                append('&nbsp;<span class="removable-tag"><a title="Delete tag"><i class="fa fa-fw fa-trash-o"></i></a></span>');
+            $(this).
+                parent().
+                find('>span').
+                append(new_tag_span).
+                append(' ');
+            $.ajax($(this).attr('data-remote-href'),
+                           {dataType: 'json',
+                            type: $(this).attr('data-remote-method'),
+                            data: {
+                                'link[head_uuid]': tag_head_uuid,
+                                'link[link_class]': 'tag',
+                                'link[name]': new_tag
+                            },
+                            context: new_tag_span}).
+                done(function(data, status, jqxhr) {
+                    this.attr('data-tag-link-uuid', data.uuid).
+                        fadeTo('fast', '1');
+                }).
+                fail(function(jqxhr, status, error) {
+                    this.addClass('label-danger').fadeTo('fast', '1');
+                });
+            return false;
+        });
+
+    $(document).
+        on('ajax:complete ready', function() {
+            // See http://getbootstrap.com/javascript/#buttons
+            $('.btn').button();
+        }).
+        on('ready ajax:complete', function() {
+            $('[data-toggle~=tooltip]').tooltip({container:'body'});
+        }).
+        on('ready ajax:complete', function() {
+            // This makes the dialog close on Esc key, obviously.
+            $('.modal').attr('tabindex', '-1')
+        });
+
+    HeaderRowFixer = function(selector) {
+        this.duplicateTheadTr = function() {
+            $(selector).each(function() {
+                var the_table = this;
+                if ($('>tbody>tr:first>th', the_table).length > 0)
+                    return;
+                $('>tbody', the_table).
+                    prepend($('>thead>tr', the_table).
+                            clone().
+                            css('opacity', 0));
+            });
+        }
+        this.fixThead = function() {
+            $(selector).each(function() {
+                var widths = [];
+                $('> tbody > tr:eq(1) > td', this).each( function(i,v){
+                    widths.push($(v).width());
+                });
+                for(i=0;i<widths.length;i++) {
+                    $('thead th:eq('+i+')', this).width(widths[i]);
+                }
+            });
+        }
+    }
+
+    var fixer = new HeaderRowFixer('.table-fixed-header-row');
+    fixer.duplicateTheadTr();
+    fixer.fixThead();
+    $(window).resize(function(){
+        fixer.fixThead();
+    });
+    $(document).on('ajax:complete', function(e, status) {
+        fixer.duplicateTheadTr();
+        fixer.fixThead();
+    });
+
+    $(document).ready(function() {
+        /* When wiselinks is initialized, selection.js is not working. Since we want to stop
+           using selection.js in the near future, let's not initialize wiselinks for now. */
+
+        // window.wiselinks = new Wiselinks();
+
+        $(document).off('page:loading').on('page:loading', function(event, $target, render, url){
+            $("#page-wrapper").fadeOut(200);
+        });
+
+        $(document).off('page:redirected').on('page:redirected', function(event, $target, render, url){
+        });
+
+        $(document).off('page:always').on('page:always', function(event, xhr, settings){
+            $("#page-wrapper").fadeIn(200);
+        });
+
+        $(document).off('page:done').on('page:done', function(event, $target, status, url, data){
+        });
+
+        $(document).off('page:fail').on('page:fail', function(event, $target, status, url, error, code){
+        });
+    });
+
+    $(document).on('click', '.compute-detail', function(e) {
+        $(e.target).collapse('hide');
+    });
+
+    $(document).on('click', '.compute-node-summary', function(e) {
+        $(e.target.href).collapse('toggle');
+    });
+
+    $(document).on('click', '.force-cache-reload', function(e) {
+        history.replaceState( { nocache: true }, '' );
+    });
+});
+
+window.addEventListener("DOMContentLoaded", function(e) {
+    if(history.state) {
+        if(history.state.nocache) {
+            showLoadingModal();
+            history.replaceState( {}, '' );
+            location.reload(true);
+        }
+    }
+});
+
+function showLoadingModal() {
+    $('#loading-modal').modal('show');
+}
+
+function hideLoadingModal() {
+    $('#loading-modal').modal('hide');
+}
+
+function hasHTML5History() {
+    return !!(window.history && window.history.pushState);
+}
diff --git a/apps/workbench/app/assets/javascripts/arvados_client.js b/apps/workbench/app/assets/javascripts/arvados_client.js
new file mode 100644 (file)
index 0000000..63f1de1
--- /dev/null
@@ -0,0 +1,101 @@
+angular.
+    module('Arvados', []).
+    service('ArvadosClient', ArvadosClient);
+
+ArvadosClient.$inject = ['arvadosApiToken', 'arvadosDiscoveryUri']
+function ArvadosClient(arvadosApiToken, arvadosDiscoveryUri) {
+    $.extend(this, {
+        apiPromise: apiPromise,
+        uniqueNameForManifest: uniqueNameForManifest
+    });
+    return this;
+    ////////////////////////////////
+
+    var that = this;
+    var promiseDiscovery;
+    var discoveryDoc;
+
+    function apiPromise(controller, action, params) {
+        // Start an API call. Return a promise that will resolve with
+        // the API response.
+        return getDiscoveryDoc().then(function() {
+            var meth = discoveryDoc.resources[controller].methods[action];
+            var data = $.extend({}, params, {_method: meth.httpMethod});
+            $.each(data, function(k, v) {
+                if (typeof(v) === 'object') {
+                    data[k] = JSON.stringify(v);
+                }
+            });
+            var path = meth.path.replace(/{(.*?)}/, function(_, key) {
+                var val = data[key];
+                delete data[key];
+                return encodeURIComponent(val);
+            });
+            return $.ajax({
+                url: discoveryDoc.baseUrl + path,
+                type: 'POST',
+                crossDomain: true,
+                dataType: 'json',
+                data: data,
+                headers: {
+                    Authorization: 'OAuth2 ' + arvadosApiToken
+                }
+            });
+        });
+    }
+
+    function uniqueNameForManifest(manifest, newStreamName, origName) {
+        // Return an (escaped) filename starting with (unescaped)
+        // origName that won't conflict with any existing names in the
+        // manifest if saved under newStreamName. newStreamName must
+        // be exactly as given in the manifest, e.g., "." or "./foo"
+        // or "./foo/bar".
+        //
+        // Example:
+        //
+        // uniqueNameForManifest('./foo [...] 0:0:bar\\040baz.txt\n', '.',
+        //                       'foo/bar baz.txt')
+        // =>
+        // 'foo/bar\\040baz\\040(1).txt'
+        var newName;
+        var nameStub = origName;
+        var suffixInt = null;
+        var ok = false;
+        var lineMatch, linesRe = /(\S+).*/gm;
+        var fileTokenMatch, fileTokensRe = / \d+:\d+:(\S+)/g;
+        while (!ok) {
+            ok = true;
+            // Add ' (N)' before the filename extension, if any.
+            newName = (!suffixInt ? nameStub :
+                       nameStub.replace(/(\.[^.]*)?$/, ' ('+suffixInt+')$1')).
+                replace(/ /g, '\\040');
+            while (ok && null !==
+                   (lineMatch = linesRe.exec(manifest))) {
+                // lineMatch is [theEntireLine, streamName]
+                while (ok && null !==
+                       (fileTokenMatch = fileTokensRe.exec(lineMatch[0]))) {
+                    // fileTokenMatch is [theEntireToken, fileName]
+                    if (lineMatch[1] + '/' + fileTokenMatch[1]
+                        ===
+                        newStreamName + '/' + newName) {
+                        ok = false;
+                    }
+                }
+            }
+            suffixInt = (suffixInt || 0) + 1;
+        }
+        return newName;
+    }
+
+    function getDiscoveryDoc() {
+        if (!promiseDiscovery) {
+            promiseDiscovery = $.ajax({
+                url: arvadosDiscoveryUri,
+                crossDomain: true
+            }).then(function(data, status, xhr) {
+                discoveryDoc = data;
+            });
+        }
+        return promiseDiscovery;
+    }
+}
diff --git a/apps/workbench/app/assets/javascripts/bootstrap.js.coffee b/apps/workbench/app/assets/javascripts/bootstrap.js.coffee
new file mode 100644 (file)
index 0000000..c9404a8
--- /dev/null
@@ -0,0 +1,4 @@
+jQuery ->
+  $("a[rel=popover]").popover()
+  $(".tooltip").tooltip()
+  $("a[rel=tooltip]").tooltip()
\ No newline at end of file
diff --git a/apps/workbench/app/assets/javascripts/collections.js b/apps/workbench/app/assets/javascripts/collections.js
new file mode 100644 (file)
index 0000000..865f121
--- /dev/null
@@ -0,0 +1,55 @@
+jQuery(function($){
+    $(document).on('click', '.toggle-persist button', function() {
+        var toggle_group = $(this).parents('[data-remote-href]').first();
+        var want_persist = !toggle_group.find('button').hasClass('active');
+        var want_state = want_persist ? 'persistent' : 'cache';
+        toggle_group.find('button').
+            toggleClass('active', want_persist).
+            html(want_persist ? 'Persistent' : 'Cache');
+        $.ajax(toggle_group.attr('data-remote-href'),
+               {dataType: 'json',
+                type: 'POST',
+                data: {
+                    value: want_state
+                },
+                context: {
+                    toggle_group: toggle_group,
+                    want_state: want_state,
+                    button: this
+                }
+               }).
+            done(function(data, status, jqxhr) {
+                var context = this;
+                // Remove "danger" status in case a previous action failed
+                $('.btn-danger', context.toggle_group).
+                    addClass('btn-info').
+                    removeClass('btn-danger');
+                // Update last-saved-state
+                context.toggle_group.
+                    attr('data-persistent-state', context.want_state);
+            }).
+            fail(function(jqxhr, status, error) {
+                var context = this;
+                var saved_state;
+                // Add a visual indication that something failed
+                $(context.button).
+                    addClass('btn-danger').
+                    removeClass('btn-info');
+                // Change to the last-saved-state
+                saved_state = context.toggle_group.attr('data-persistent-state');
+                $(context.button).
+                    toggleClass('active', saved_state == 'persistent').
+                    html(saved_state == 'persistent' ? 'Persistent' : 'Cache');
+
+                if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+                    // Request cancelled due to page reload.
+                    // Displaying an alert would be rather annoying.
+                } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+                    window.alert("Request failed: " +
+                                 jqxhr.responseJSON.errors.join("; "));
+                } else {
+                    window.alert("Request failed.");
+                }
+            });
+    });
+});
diff --git a/apps/workbench/app/assets/javascripts/dates.js b/apps/workbench/app/assets/javascripts/dates.js
new file mode 100644 (file)
index 0000000..5e4b804
--- /dev/null
@@ -0,0 +1,25 @@
+jQuery(function($){
+$(document).on('ajax:complete arv:pane:loaded ready', function() {
+    $('[data-utc-date]').each(function(i, elm) {
+        // Try matching the date using a couple of different formats.
+        var v = $(elm).attr('data-utc-date').match(/(\d\d\d\d)-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d) UTC/);
+        if (!v) {
+            v = $(elm).attr('data-utc-date').match(/(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)Z/);
+        }
+
+        if (v) {
+            // Create a new date object from the timestamp so the browser can
+            // render the date based on the locale/timezone.
+            var ts = new Date(Date.UTC(v[1], v[2]-1, v[3], v[4], v[5], v[6]));
+            if ($(elm).attr('data-utc-date-opts') && $(elm).attr('data-utc-date-opts').match(/noseconds/)) {
+                $(elm).text((ts.getHours() > 12 ? (ts.getHours()-12) : ts.getHours())
+                            + ":" + (ts.getMinutes() < 10 ? '0' : '') + ts.getMinutes()
+                            + (ts.getHours() >= 12 ? " PM " : " AM ")
+                            + ts.toLocaleDateString());
+            } else {
+                $(elm).text(ts.toLocaleTimeString() + " " + ts.toLocaleDateString());
+            }
+        }
+    });
+});
+});
diff --git a/apps/workbench/app/assets/javascripts/editable.js b/apps/workbench/app/assets/javascripts/editable.js
new file mode 100644 (file)
index 0000000..dc54bda
--- /dev/null
@@ -0,0 +1,117 @@
+$.fn.editable.defaults.ajaxOptions = {type: 'post', dataType: 'json'};
+$.fn.editable.defaults.send = 'always';
+
+// Default for editing is popup.  I experimented with inline which is a little
+// nicer in that it shows up right under the mouse instead of nearby.  However,
+// the inline box is taller than the regular content, which causes the page
+// layout to shift unless we make the table rows tall, which leaves a lot of
+// wasted space when not editing.  Also inline can get cut off if the page is
+// too narrow, when the popup box will just move to do the right thing.
+//$.fn.editable.defaults.mode = 'inline';
+
+$.fn.editable.defaults.success = function (response, newValue) {
+    $(document).trigger('editable:success', [this, response, newValue]);
+};
+
+$.fn.editable.defaults.params = function (params) {
+    var a = {};
+    var key = params.pk.key;
+    a.id = $(this).attr('data-object-uuid') || params.pk.id;
+    a[key] = params.pk.defaults || {};
+    // Remove null values. Otherwise they get transmitted as empty
+    // strings in request params.
+    for (i in a[key]) {
+        if (a[key][i] == null)
+            delete a[key][i];
+    }
+    a[key][params.name] = params.value;
+    if (!a.id) {
+        a['_method'] = 'post';
+    } else {
+        a['_method'] = 'put';
+    }
+    return a;
+};
+
+$.fn.editable.defaults.validate = function (value) {
+    if (value == "***invalid***") {
+        return "Invalid selection";
+    }
+}
+
+$(document).
+    on('ready ajax:complete', function() {
+        $('.editable').
+            not('.editable-done-setup').
+            addClass('editable-done-setup').
+            editable({
+                success: function(response, newValue) {
+                    // If we just created a new object, stash its UUID
+                    // so we edit it next time instead of creating
+                    // another new object.
+                    if (!$(this).attr('data-object-uuid') && response.uuid) {
+                        $(this).attr('data-object-uuid', response.uuid);
+                    }
+                    if (response.href) {
+                        $(this).editable('option', 'url', response.href);
+                    }
+                    if ($(this).attr('data-name')) {
+                        var textileAttr = $(this).attr('data-name') + 'Textile';
+                        if (response[textileAttr]) {
+                            $(this).attr('data-textile', response[textileAttr]);
+                        }
+                    }
+                    return;
+                },
+                error: function(response, newValue) {
+                    var errlist = response.responseJSON.errors;
+                    var errmsg;
+                    if (Array.isArray(errlist)) {
+                        errmsg = errlist.join();
+                    } else {
+                        errmsg = ("The server returned an error when making " +
+                                  "this update (status " + response.status +
+                                  ": " + errlist + ").");
+                    }
+                    return errmsg;
+                }
+            }).
+            on('hidden', function(e, reason) {
+                // After saving a new attribute, update the same
+                // information if it appears elsewhere on the page.
+                if (reason != 'save') return;
+                var html = $(this).html();
+                if( $(this).attr('data-textile') ) {
+                    html = $(this).attr('data-textile');
+                    $(this).html(html);
+                }
+                var uuid = $(this).attr('data-object-uuid');
+                var attr = $(this).attr('data-name');
+                var edited = this;
+                if (uuid && attr) {
+                    $("[data-object-uuid='" + uuid + "']" +
+                      "[data-name='" + attr + "']").each(function() {
+                          if (this != edited)
+                              $(this).html(html);
+                      });
+                }
+            });
+    }).
+    on('ready ajax:complete', function() {
+        $("[data-toggle~='x-editable']").
+            not('.editable-done-setup').
+            addClass('editable-done-setup').
+            click(function(e) {
+                e.stopPropagation();
+                $($(this).attr('data-toggle-selector')).editable('toggle');
+            });
+    });
+
+$.fn.editabletypes.text.defaults.tpl = '<input type="text" name="editable-text">'
+
+$.fn.editableform.buttons = '\
+<button type="submit" class="btn btn-primary btn-sm editable-submit" \
+  id="editable-submit"><i class="glyphicon glyphicon-ok"></i></button>\
+<button type="button" class="btn btn-default btn-sm editable-cancel" \
+  id="editable-cancel"><i class="glyphicon glyphicon-remove"></i></button>\
+'
diff --git a/apps/workbench/app/assets/javascripts/event_log.js b/apps/workbench/app/assets/javascripts/event_log.js
new file mode 100644 (file)
index 0000000..29ea74c
--- /dev/null
@@ -0,0 +1,370 @@
+/*
+ * This js establishes a websockets connection with the API Server.
+ */
+
+/* Subscribe to websockets event log.  Do nothing if already connected. */
+function subscribeToEventLog () {
+    // if websockets are not supported by browser, do not subscribe for events
+    websocketsSupported = ('WebSocket' in window);
+    if (websocketsSupported == false) {
+        return;
+    }
+
+    // check if websocket connection is already stored on the window
+    event_log_disp = $(window).data("arv-websocket");
+    if (event_log_disp == null) {
+        // need to create new websocket and event log dispatcher
+        websocket_url = $('meta[name=arv-websocket-url]').attr("content");
+        if (websocket_url == null)
+            return;
+
+        event_log_disp = new WebSocket(websocket_url);
+
+        event_log_disp.onopen = onEventLogDispatcherOpen;
+        event_log_disp.onmessage = onEventLogDispatcherMessage;
+
+        // store websocket in window to allow reuse when multiple divs subscribe for events
+        $(window).data("arv-websocket", event_log_disp);
+    }
+}
+
+/* Send subscribe message to the websockets server.  Without any filters
+   arguments, this subscribes to all events */
+function onEventLogDispatcherOpen(event) {
+    this.send('{"method":"subscribe"}');
+}
+
+/* Trigger event for all applicable elements waiting for this event */
+function onEventLogDispatcherMessage(event) {
+    parsedData = JSON.parse(event.data);
+    object_uuid = parsedData.object_uuid;
+
+    if (!object_uuid) {
+        return;
+    }
+
+    // if there are any listeners for this object uuid or "all", trigger the event
+    matches = ".arv-log-event-listener[data-object-uuid=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuids~=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuid=\"all\"],.arv-log-event-listener[data-object-kind=\"" + parsedData.object_kind + "\"]";
+    $(matches).trigger('arv-log-event', parsedData);
+}
+
+/* Automatically connect if there are any elements on the page that want to
+   receive event log events. */
+$(document).on('ajax:complete ready', function() {
+    var a = $('.arv-log-event-listener');
+    if (a.length > 0) {
+        subscribeToEventLog();
+    }
+});
+
+/* Assumes existence of:
+  window.jobGraphData = [];
+  window.jobGraphSeries = [];
+  window.jobGraphSortedSeries = [];
+  window.jobGraphMaxima = {};
+ */
+function processLogLineForChart( logLine ) {
+    try {
+        var match = logLine.match(/^(\S+) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+        if( !match ) {
+            match = logLine.match(/^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (\S+) (\S+) (\S+) stderr crunchstat: (\S+) (.*)/);
+            if( match ) {
+                match[1] = (new Date(match[1] + ' UTC')).toISOString().replace('Z','');
+            }
+        }
+        if( match ) {
+            var rawDetailData = '';
+            var datum = null;
+
+            // the timestamp comes first
+            var timestamp = match[1].replace('_','T') + 'Z';
+
+            // we are interested in "-- interval" recordings
+            var intervalMatch = match[6].match(/(.*) -- interval (.*)/);
+            if( intervalMatch ) {
+                var intervalData = intervalMatch[2].trim().split(' ');
+                var dt = parseFloat(intervalData[0]);
+                var dsum = 0.0;
+                for(var i=2; i < intervalData.length; i += 2 ) {
+                    dsum += parseFloat(intervalData[i]);
+                }
+                datum = dsum/dt;
+
+                if( datum < 0 ) {
+                    // not interested in negative deltas
+                    return;
+                }
+
+                rawDetailData = intervalMatch[2];
+
+                // for the series name use the task number (4th term) and then the first word after 'crunchstat:'
+                var series = 'T' + match[4] + '-' + match[5];
+
+                // special calculation for cpus
+                if( /-cpu$/.test(series) ) {
+                    // divide the stat by the number of cpus unless the time count is less than the interval length
+                    if( dsum.toFixed(1) > dt.toFixed(1) ) {
+                        var cpuCountMatch = intervalMatch[1].match(/(\d+) cpus/);
+                        if( cpuCountMatch ) {
+                            datum = datum / cpuCountMatch[1];
+                        }
+                    }
+                }
+
+                addJobGraphDatum( timestamp, datum, series, rawDetailData );
+            } else {
+                // we are also interested in memory ("mem") recordings
+                var memoryMatch = match[6].match(/(\d+) cache (\d+) swap (\d+) pgmajfault (\d+) rss/);
+                if( memoryMatch ) {
+                    rawDetailData = match[6];
+                    // one datapoint for rss and one for swap - only show the rawDetailData for rss
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[4]), 'T' + match[4] + "-rss", rawDetailData );
+                    addJobGraphDatum( timestamp, parseInt(memoryMatch[2]), 'T' + match[4] + "-swap", '' );
+                } else {
+                    // not interested
+                    return;
+                }
+            }
+
+            window.redraw = true;
+        }
+    } catch( err ) {
+        console.log( 'Ignoring error trying to process log line: ' + err);
+    }
+}
+
+function addJobGraphDatum(timestamp, datum, series, rawDetailData) {
+    // check for new series
+    if( $.inArray( series, jobGraphSeries ) < 0 ) {
+        var newIndex = jobGraphSeries.push(series) - 1;
+        jobGraphSortedSeries.push(newIndex);
+        jobGraphSortedSeries.sort( function(a,b) {
+            var matchA = jobGraphSeries[a].match(/^T(\d+)-(.*)/);
+            var matchB = jobGraphSeries[b].match(/^T(\d+)-(.*)/);
+            var termA = ('000000' + matchA[1]).slice(-6) + matchA[2];
+            var termB = ('000000' + matchB[1]).slice(-6) + matchB[2];
+            return termA > termB ? 1 : -1;
+        });
+        jobGraphMaxima[series] = null;
+        window.recreate = true;
+    }
+
+    if( datum !== 0 && ( jobGraphMaxima[series] === null || jobGraphMaxima[series] < datum ) ) {
+        if( isJobSeriesRescalable(series) ) {
+            // use old maximum to get a scale conversion
+            var scaleConversion = jobGraphMaxima[series]/datum;
+            // set new maximum and rescale the series
+            jobGraphMaxima[series] = datum;
+            rescaleJobGraphSeries( series, scaleConversion );
+        }
+    }
+
+    // scale
+    var scaledDatum = null;
+    if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null && jobGraphMaxima[series] !== 0 ) {
+        scaledDatum = datum/jobGraphMaxima[series]
+    } else {
+        scaledDatum = datum;
+    }
+    // identify x axis point, searching from the end of the array (most recent)
+    var found = false;
+    for( var i = jobGraphData.length - 1; i >= 0; i-- ) {
+        if( jobGraphData[i]['t'] === timestamp ) {
+            found = true;
+            jobGraphData[i][series] = scaledDatum;
+            jobGraphData[i]['raw-'+series] = rawDetailData;
+            break;
+        } else if( jobGraphData[i]['t'] < timestamp  ) {
+            // we've gone far enough back in time and this data is supposed to be sorted
+            break;
+        }
+    }
+    // index counter from previous loop will have gone one too far, so add one
+    var insertAt = i+1;
+    if(!found) {
+        // create a new x point for this previously unrecorded timestamp
+        var entry = { 't': timestamp };
+        entry[series] = scaledDatum;
+        entry['raw-'+series] = rawDetailData;
+        jobGraphData.splice( insertAt, 0, entry );
+        var shifted = [];
+        // now let's see about "scrolling" the graph, dropping entries that are too old (>10 minutes)
+        while( jobGraphData.length > 0
+                 && (Date.parse( jobGraphData[0]['t'] ) + 10*60000 < Date.parse( jobGraphData[jobGraphData.length-1]['t'] )) ) {
+            shifted.push(jobGraphData.shift());
+        }
+        if( shifted.length > 0 ) {
+            // from those that we dropped, were any of them maxima? if so we need to rescale
+            jobGraphSeries.forEach( function(series) {
+                // test that every shifted entry in this series was either not a number (in which case we don't care)
+                // or else approximately (to 2 decimal places) smaller than the scaled maximum (i.e. 1),
+                // because otherwise we just scrolled off something that was a maximum point
+                // and so we need to recalculate a new maximum point by looking at all remaining displayed points in the series
+                if( isJobSeriesRescalable(series) && jobGraphMaxima[series] !== null
+                      && !shifted.every( function(e) { return( !$.isNumeric(e[series]) || e[series].toFixed(2) < 1.0 ) } ) ) {
+                    // check the remaining displayed points and find the new (scaled) maximum
+                    var seriesMax = null;
+                    jobGraphData.forEach( function(entry) {
+                        if( $.isNumeric(entry[series]) && (seriesMax === null || entry[series] > seriesMax)) {
+                            seriesMax = entry[series];
+                        }
+                    });
+                    if( seriesMax !== null && seriesMax !== 0 ) {
+                        // set new actual maximum using the new maximum as the conversion conversion and rescale the series
+                        jobGraphMaxima[series] *= seriesMax;
+                        var scaleConversion = 1/seriesMax;
+                        rescaleJobGraphSeries( series, scaleConversion );
+                    }
+                    else {
+                        // we no longer have any data points displaying for this series
+                        jobGraphMaxima[series] = null;
+                    }
+                }
+            });
+        }
+        // add a 10 minute old null data point to keep the chart honest if the oldest point is less than 9.9 minutes old
+        if( jobGraphData.length > 0 ) {
+            var earliestTimestamp = jobGraphData[0]['t'];
+            var mostRecentTimestamp = jobGraphData[jobGraphData.length-1]['t'];
+            if( (Date.parse( earliestTimestamp ) + 9.9*60000 > Date.parse( mostRecentTimestamp )) ) {
+                var tenMinutesBefore = (new Date(Date.parse( mostRecentTimestamp ) - 600*1000)).toISOString();
+                jobGraphData.unshift( { 't': tenMinutesBefore } );
+            }
+        }
+    }
+
+}
+
+function createJobGraph(elementName) {
+    delete jobGraph;
+    var emptyGraph = false;
+    if( jobGraphData.length === 0 ) {
+        // If there is no data we still want to show an empty graph,
+        // so add an empty datum and placeholder series to fool it into displaying itself.
+        // Note that when finally a new series is added, the graph will be recreated anyway.
+        jobGraphData.push( {} );
+        jobGraphSeries.push( '' );
+        emptyGraph = true;
+    }
+    var graphteristics = {
+        element: elementName,
+        data: jobGraphData,
+        ymax: 1.0,
+        yLabelFormat: function () { return ''; },
+        xkey: 't',
+        ykeys: jobGraphSeries,
+        labels: jobGraphSeries,
+        resize: true,
+        hideHover: 'auto',
+        parseTime: true,
+        hoverCallback: function(index, options, content) {
+            var s = "<div class='morris-hover-row-label'>";
+            s += options.data[index][options.xkey];
+            s += "</div> ";
+            for( i = 0; i < jobGraphSortedSeries.length; i++ ) {
+                var sortedIndex = jobGraphSortedSeries[i];
+                var series = options.ykeys[sortedIndex];
+                var datum = options.data[index][series];
+                var point = ''
+                point += "<div class='morris-hover-point' style='color: ";
+                point += options.lineColors[sortedIndex % options.lineColors.length];
+                point += "'>";
+                var labelMatch = options.labels[sortedIndex].match(/^T(\d+)-(.*)/);
+                point += 'Task ' + labelMatch[1] + ' ' + labelMatch[2];
+                point += ": ";
+                if ( datum !== undefined ) {
+                    if( isJobSeriesRescalable( series ) ) {
+                        datum *= jobGraphMaxima[series];
+                    }
+                    if( parseFloat(datum) !== 0 ) {
+                        if( /-cpu$/.test(series) ){
+                            datum = $.number(datum * 100, 1) + '%';
+                        } else if( datum < 10 ) {
+                            datum = $.number(datum, 2);
+                        } else {
+                            datum = $.number(datum);
+                        }
+                        if(options.data[index]['raw-'+series]) {
+                            datum += ' (' + options.data[index]['raw-'+series] + ')';
+                        }
+                    }
+                    point += datum;
+                } else {
+                    continue;
+                }
+                point += "</div> ";
+                s += point;
+            }
+            return s;
+        }
+    }
+    if( emptyGraph ) {
+        graphteristics['axes'] = false;
+        graphteristics['parseTime'] = false;
+        graphteristics['hideHover'] = 'always';
+    }
+    window.jobGraph = Morris.Line( graphteristics );
+    if( emptyGraph ) {
+        jobGraphData = [];
+        jobGraphSeries = [];
+    }
+}
+
+function rescaleJobGraphSeries( series, scaleConversion ) {
+    if( isJobSeriesRescalable() ) {
+        $.each( jobGraphData, function( i, entry ) {
+            if( entry[series] !== null && entry[series] !== undefined ) {
+                entry[series] *= scaleConversion;
+            }
+        });
+    }
+}
+
+// that's right - we never do this for the 'cpu' series, which will always be between 0 and 1 anyway
+function isJobSeriesRescalable( series ) {
+    return !/-cpu$/.test(series);
+}
+
+$(document).on('arv-log-event', '#log_graph_div', function(event, eventData) {
+    if( eventData.properties.text ) {
+        eventData.properties.text.split('\n').forEach( function( logLine ) {
+            processLogLineForChart( logLine );
+        } );
+    }
+} );
+
+$(document).on('ready ajax:complete', function() {
+    $('#log_graph_div').not('.graph-is-setup').addClass('graph-is-setup').each( function( index, graph_div ) {
+        window.jobGraphData = [];
+        window.jobGraphSeries = [];
+        window.jobGraphSortedSeries = [];
+        window.jobGraphMaxima = {};
+        window.recreate = false;
+        window.redraw = false;
+
+        createJobGraph($(graph_div).attr('id'));
+        var object_uuid = $(graph_div).data('object-uuid');
+        // if there are any listeners for this object uuid or "all", we will trigger the event
+        var matches = ".arv-log-event-listener[data-object-uuid=\"" + object_uuid + "\"],.arv-log-event-listener[data-object-uuids~=\"" + object_uuid + "\"]";
+
+        $(document).trigger('ajax:send');
+        $.get('/jobs/' + $(graph_div).data('object-uuid') + '/logs.json', function(data) {
+            data.forEach( function( entry ) {
+                $(matches).trigger('arv-log-event', entry);
+            });
+        });
+
+        setInterval( function() {
+            if( recreate ) {
+                window.recreate = false;
+                window.redraw = false;
+                // series have changed, draw entirely new graph
+                $(graph_div).html('');
+                createJobGraph($(graph_div).attr('id'));
+            } else if( redraw ) {
+                window.redraw = false;
+                jobGraph.setData( jobGraphData );
+            }
+        }, 5000);
+    });
+});
diff --git a/apps/workbench/app/assets/javascripts/filterable.js b/apps/workbench/app/assets/javascripts/filterable.js
new file mode 100644 (file)
index 0000000..34075ca
--- /dev/null
@@ -0,0 +1,178 @@
+// filterable.js shows/hides content when the user operates
+// search/select widgets. For "infinite scroll" content, it passes the
+// filters to the server and retrieves new content. For other content,
+// it filters the existing DOM elements using jQuery show/hide.
+//
+// Usage:
+//
+// 1. Add the "filterable" class to each filterable content item.
+// Typically, each item is a 'tr' or a 'div class="row"'.
+//
+// <div id="results">
+//   <div class="filterable row">First row</div>
+//   <div class="filterable row">Second row</div>
+// </div>
+//
+// 2. Add the "filterable-control" class to each search/select widget.
+// Also add a data-filterable-target attribute with a jQuery selector
+// for an ancestor of the filterable items, i.e., the container in
+// which this widget should apply filtering.
+//
+// <input class="filterable-control" data-filterable-target="#results"
+//        type="text" />
+//
+// Supported widgets:
+//
+// <input type="text" ... />
+//
+// The input value is used as a regular expression. Rows with content
+// matching the regular expression are shown.
+//
+// <select ... data-filterable-attribute="data-example-attr">
+//  <option value="foo">Foo</option>
+//  <option value="">Show all</option>
+// </select>
+//
+// When the user selects the "Foo" option, rows with
+// data-example-attr="foo" are shown, and all others are hidden. When
+// the user selects the "Show all" option, all rows are shown.
+//
+// Notes:
+//
+// When multiple filterable-control widgets operate on the same
+// data-filterable-target, items must pass _all_ filters in order to
+// be shown.
+//
+// If one data-filterable-target is the parent of another
+// data-filterable-target, results are undefined. Don't do this.
+//
+// Combining "select" filterable-controls with infinite-scroll is not
+// yet supported.
+
+function updateFilterableQueryNow($target) {
+    var newquery = $target.data('filterable-query-new');
+    var params = $target.data('infinite-content-params-filterable') || {};
+    params.filters = [['any', 'ilike', '%' + newquery + '%']];
+    $target.data('infinite-content-params-filterable', params);
+    $target.data('filterable-query', newquery);
+}
+
+$(document).
+    on('ready ajax:success', function() {
+        // Copy any initial input values into
+        // data-filterable-query[-new].
+        $('input[type=text].filterable-control').each(function() {
+            var $this = $(this);
+            var $target = $($this.attr('data-filterable-target'));
+            if ($target.data('filterable-query-new') === undefined) {
+                $target.data('filterable-query', $this.val());
+                $target.data('filterable-query-new', $this.val());
+                updateFilterableQueryNow($target);
+            }
+        });
+        $('[data-infinite-scroller]').on('refresh-content', '[data-filterable-query]', function(e) {
+            // If some other event causes a refresh-content event while there
+            // is a new query waiting to cooloff, we should use the new query
+            // right away -- otherwise we'd launch an extra ajax request that
+            // would have to be reloaded as soon as the cooloff period ends.
+            if (this != e.target)
+                return;
+            if ($(this).data('filterable-query') == $(this).data('filterable-query-new'))
+                return;
+            updateFilterableQueryNow($(this));
+        });
+    }).
+    on('paste keyup input', 'input[type=text].filterable-control', function(e) {
+        var regexp;
+        if (this != e.target) return;
+        var $target = $($(this).attr('data-filterable-target'));
+        var currentquery = $target.data('filterable-query');
+        if (currentquery === undefined) currentquery = '';
+        if ($target.is('[data-infinite-scroller]')) {
+            // We already know how to load content dynamically, so we
+            // can do all filtering on the server side.
+
+            if ($target.data('infinite-cooloff-timer') > 0) {
+                // Clear a stale refresh-after-delay timer.
+                clearTimeout($target.data('infinite-cooloff-timer'));
+            }
+            // Stash the new query string in the filterable container.
+            $target.data('filterable-query-new', $(this).val());
+            if (currentquery == $(this).val()) {
+                // Don't mess with existing results or queries in
+                // progress.
+                return;
+            }
+            $target.data('infinite-cooloff-timer', setTimeout(function() {
+                // If the user doesn't do any query-changing actions
+                // in the next 1/4 second (like type or erase
+                // characters in the search box), hide the stale
+                // content and ask the server for new results.
+                updateFilterableQueryNow($target);
+                $target.trigger('refresh-content');
+            }, 250));
+        } else {
+            // Target does not have infinite-scroll capability. Just
+            // filter the rows in the browser using a RegExp.
+            regexp = undefined;
+            try {
+                regexp = new RegExp($(this).val(), 'i');
+            } catch(e) {
+                if (e instanceof SyntaxError) {
+                    // Invalid/partial regexp. See 'has-error' below.
+                } else {
+                    throw e;
+                }
+            }
+            $target.
+                toggleClass('has-error', regexp === undefined).
+                addClass('filterable-container').
+                data('q', regexp).
+                trigger('refresh');
+        }
+    }).on('refresh', '.filterable-container', function() {
+        var $container = $(this);
+        var q = $(this).data('q');
+        var filters = $(this).data('filters');
+        $('.filterable', this).hide().filter(function() {
+            var $row = $(this);
+            var pass = true;
+            if (q && !$row.text().match(q))
+                return false;
+            if (filters) {
+                $.each(filters, function(filterby, val) {
+                    if (!val) return;
+                    if (!pass) return;
+                    pass = false;
+                    $.each(val.split(" "), function(i, e) {
+                        if ($row.attr(filterby) == e)
+                            pass = true;
+                    });
+                });
+            }
+            return pass;
+        }).show();
+
+        // Show/hide each section heading depending on whether any
+        // content rows are visible in that section.
+        $('.row[data-section-heading]', this).each(function(){
+            $(this).toggle($('.row.filterable[data-section-name="' +
+                             $(this).attr('data-section-name') +
+                             '"]:visible').length > 0);
+        });
+
+        // Load more content if the last result is showing.
+        $('.infinite-scroller').add(window).trigger('scroll');
+    }).on('change', 'select.filterable-control', function() {
+        var val = $(this).val();
+        var filterby = $(this).attr('data-filterable-attribute');
+        var $target = $($(this).attr('data-filterable-target')).
+            addClass('filterable-container');
+        var filters = $target.data('filters') || {};
+        filters[filterby] = val;
+        $target.
+            data('filters', filters).
+            trigger('refresh');
+    }).on('ajax:complete', function() {
+        $('.filterable-control').trigger('input');
+    });
diff --git a/apps/workbench/app/assets/javascripts/infinite_scroll.js b/apps/workbench/app/assets/javascripts/infinite_scroll.js
new file mode 100644 (file)
index 0000000..81a3a46
--- /dev/null
@@ -0,0 +1,269 @@
+function maybe_load_more_content(event) {
+    var scroller = this;
+    var $container = $(event.data.container);
+    var src;                     // url for retrieving content
+    var scrollHeight;
+    var spinner, colspan;
+    var serial = Date.now();
+    var params;
+    scrollHeight = scroller.scrollHeight || $('body')[0].scrollHeight;
+    if ($(scroller).scrollTop() + $(scroller).height()
+        >
+        scrollHeight - 50)
+    {
+        if (!$container.attr('data-infinite-content-href0')) {
+            // Remember the first page source url, so we can refresh
+            // from page 1 later.
+            $container.attr('data-infinite-content-href0',
+                            $container.attr('data-infinite-content-href'));
+        }
+        src = $container.attr('data-infinite-content-href');
+        if (!src || !$container.is(':visible'))
+            // Finished
+            return;
+
+        // Don't start another request until this one finishes
+        $container.attr('data-infinite-content-href', null);
+        spinner = '<div class="spinner spinner-32px spinner-h-center"></div>';
+        if ($container.is('table,tbody,thead,tfoot')) {
+            // Hack to determine how many columns a new tr should have
+            // in order to reach full width.
+            colspan = $container.closest('table').
+                find('tr').eq(0).find('td,th').length;
+            if (colspan == 0)
+                colspan = '*';
+            spinner = ('<tr class="spinner"><td colspan="' + colspan + '">' +
+                       spinner +
+                       '</td></tr>');
+        }
+        $container.find(".spinner").detach();
+        $container.append(spinner);
+        $container.attr('data-infinite-serial', serial);
+
+        if (src == $container.attr('data-infinite-content-href0')) {
+            // If we're loading the first page, collect filters from
+            // various sources.
+            params = mergeInfiniteContentParams($container);
+            $.each(params, function(k,v) {
+                if (v instanceof Object) {
+                    params[k] = JSON.stringify(v);
+                }
+            });
+        } else {
+            // If we're loading page >1, ignore other filtering
+            // mechanisms and just use the "next page" URI from the
+            // previous page's response. Aside from avoiding race
+            // conditions (where page 2 could have different filters
+            // than page 1), this allows the server to use filters in
+            // the "next page" URI to achieve paging. (To apply any
+            // new filters effectively, we need to load page 1 again
+            // anyway.)
+            params = {};
+        }
+
+        $.ajax(src,
+               {dataType: 'json',
+                type: 'GET',
+                data: params,
+                context: {container: $container, src: src, serial: serial}}).
+            fail(function(jqxhr, status, error) {
+                var $faildiv;
+                var $container = this.container;
+                if ($container.attr('data-infinite-serial') != this.serial) {
+                    // A newer request is already in progress.
+                    return;
+                }
+                if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+                    message = "Cancelled."
+                } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+                    message = jqxhr.responseJSON.errors.join("; ");
+                } else {
+                    message = "Request failed.";
+                }
+                // TODO: report the message to the user.
+                console.log(message);
+                $faildiv = $('<div />').
+                    attr('data-infinite-content-href', this.src).
+                    addClass('infinite-retry').
+                    append('<span class="fa fa-warning" /> Oops, request failed. <button class="btn btn-xs btn-primary">Retry</button>');
+                $container.find('div.spinner').replaceWith($faildiv);
+            }).
+            done(function(data, status, jqxhr) {
+                if ($container.attr('data-infinite-serial') != this.serial) {
+                    // A newer request is already in progress.
+                    return;
+                }
+                $container.find(".spinner").detach();
+                $container.append(data.content);
+                $container.attr('data-infinite-content-href', data.next_page_href);
+            });
+     }
+}
+
+function ping_all_scrollers() {
+    // Send a scroll event to all scroll listeners that might need
+    // updating. Adding infinite-scroller class to the window element
+    // doesn't work, so we add it explicitly here.
+    $('.infinite-scroller').add(window).trigger('scroll');
+}
+
+function mergeInfiniteContentParams($container) {
+    var params = {};
+    // Combine infiniteContentParams from multiple sources. This
+    // mechanism allows each of several components to set and
+    // update its own set of filters, without having to worry
+    // about stomping on some other component's filters.
+    //
+    // For example, filterable.js writes filters in
+    // infiniteContentParamsFilterable ("search for text foo")
+    // without worrying about clobbering the filters set up by the
+    // tab pane ("only show jobs and pipelines in this tab").
+    $.each($container.data(), function(datakey, datavalue) {
+        // Note: We attach these data to DOM elements using
+        // <element data-foo-bar="baz">. We store/retrieve them
+        // using $('element').data('foo-bar'), although
+        // .data('fooBar') would also work. The "all data" hash
+        // returned by $('element').data(), however, always has
+        // keys like 'fooBar'. In other words, where we have a
+        // choice, we stick with the 'foo-bar' style to be
+        // consistent with HTML. Here, our only option is
+        // 'fooBar'.
+        if (/^infiniteContentParams/.exec(datakey)) {
+            if (datavalue instanceof Object) {
+                $.each(datavalue, function(hkey, hvalue) {
+                    if (hvalue instanceof Array) {
+                        params[hkey] = (params[hkey] || []).
+                            concat(hvalue);
+                    } else if (hvalue instanceof Object) {
+                        $.extend(params[hkey], hvalue);
+                    } else {
+                        params[hkey] = hvalue;
+                    }
+                });
+            }
+        }
+    });
+    return params;
+}
+
+function setColumnSort( $container, $header, direction ) {
+    // $container should be the tbody or whatever has all the infinite table data attributes
+    // $header should be the th with a preset data-sort-order attribute
+    // direction should be "asc" or "desc"
+    // This function returns the order by clause for this column header as a string
+
+    // First reset all sort directions
+    $('th[data-sort-order]').removeData('sort-order-direction');
+    // set the current one
+    $header.data('sort-order-direction', direction);
+    // change the ordering parameter
+    var paramsAttr = 'infinite-content-params-' + $container.data('infinite-content-params-attr');
+    var params = $container.data(paramsAttr) || {};
+    params.order = $header.data('sort-order').split(",").join( ' ' + direction + ', ' ) + ' ' + direction;
+    $container.data(paramsAttr, params);
+    // show the correct icon next to the column header
+    $container.trigger('sort-icons');
+
+    return params.order;
+}
+
+$(document).
+    on('click', 'div.infinite-retry button', function() {
+        var $retry_div = $(this).closest('.infinite-retry');
+        var $container = $(this).closest('.infinite-scroller-ready')
+        $container.attr('data-infinite-content-href',
+                        $retry_div.attr('data-infinite-content-href'));
+        $retry_div.
+            replaceWith('<div class="spinner spinner-32px spinner-h-center" />');
+        ping_all_scrollers();
+    }).
+    on('refresh-content', '[data-infinite-scroller]', function() {
+        // Clear all rows, reset source href to initial state, and
+        // (if the container is visible) start loading content.
+        var first_page_href = $(this).attr('data-infinite-content-href0');
+        if (!first_page_href)
+            first_page_href = $(this).attr('data-infinite-content-href');
+        $(this).
+            html('').
+            attr('data-infinite-content-href', first_page_href);
+        ping_all_scrollers();
+    }).
+    on('ready ajax:complete', function() {
+        $('[data-infinite-scroller]').each(function() {
+            if ($(this).hasClass('infinite-scroller-ready'))
+                return;
+            $(this).addClass('infinite-scroller-ready');
+
+            // deal with sorting if there is any, and if it was set on this page for this tab already
+            if( $('th[data-sort-order]').length ) {
+                var tabId = $(this).closest('div.tab-pane').attr('id');
+                if( hasHTML5History() && history.state !== undefined && history.state !== null && history.state.order !== undefined && history.state.order[tabId] !== undefined ) {
+                    // we will use the list of one or more table columns associated with this header to find the right element
+                    // see sortable_columns as it is passed to render_pane in the various tab .erbs (e.g. _show_jobs_and_pipelines.html.erb)
+                    var strippedColumns = history.state.order[tabId].replace(/\s|\basc\b|\bdesc\b/g,'');
+                    var sortDirection = history.state.order[tabId].split(" ")[1].replace(/,/,'');
+                    $columnHeader = $(this).closest('table').find('[data-sort-order="'+ strippedColumns +'"]');
+                    setColumnSort( $(this), $columnHeader, sortDirection );
+                } else {
+                    // otherwise just reset the sort icons
+                    $(this).trigger('sort-icons');
+                }
+            }
+
+            // $scroller is the DOM element that hears "scroll"
+            // events: sometimes it's a div, sometimes it's
+            // window. Here, "this" is the DOM element containing the
+            // result rows. We pass it to maybe_load_more_content in
+            // event.data.
+            var $scroller = $($(this).attr('data-infinite-scroller'));
+            if (!$scroller.hasClass('smart-scroll') &&
+                'scroll' != $scroller.css('overflow-y'))
+                $scroller = $(window);
+            $scroller.
+                addClass('infinite-scroller').
+                on('scroll resize', { container: this }, maybe_load_more_content).
+                trigger('scroll');
+        });
+    }).
+    on('shown.bs.tab', 'a[data-toggle="tab"]', function(event) {
+        $(event.target.getAttribute('href') + ' [data-infinite-scroller]').
+            trigger('scroll');
+    }).
+    on('click', 'th[data-sort-order]', function() {
+        var direction = $(this).data('sort-order-direction');
+        // reverse the current direction, or do ascending if none
+        if( direction === undefined || direction === 'desc' ) {
+            direction = 'asc';
+        } else {
+            direction = 'desc';
+        }
+
+        var $container = $(this).closest('table').find('[data-infinite-content-params-attr]');
+
+        var order = setColumnSort( $container, $(this), direction );
+
+        // put it in the browser history state if browser allows it
+        if( hasHTML5History() ) {
+            var tabId = $(this).closest('div.tab-pane').attr('id');
+            var state =  history.state || {};
+            if( state.order === undefined ) {
+                state.order = {};
+            }
+            state.order[tabId] = order;
+            history.replaceState( state, null, null );
+        }
+
+        $container.trigger('refresh-content');
+    }).
+    on('sort-icons', function() {
+        // set or reset the icon next to each sortable column header according to the current direction attribute
+        $('th[data-sort-order]').each(function() {
+            $(this).find('i').remove();
+            var direction = $(this).data('sort-order-direction');
+            if( direction !== undefined ) {
+                $(this).append('<i class="fa fa-sort-' + direction + '"/>');
+            } else {
+                $(this).append('<i class="fa fa-sort"/>');
+            }
+        });
+    });
diff --git a/apps/workbench/app/assets/javascripts/keep_disks.js.coffee b/apps/workbench/app/assets/javascripts/keep_disks.js.coffee
new file mode 100644 (file)
index 0000000..e4aa4b4
--- /dev/null
@@ -0,0 +1,28 @@
+cache_age_in_days = (milliseconds_age) ->
+  ONE_DAY = 1000 * 60 * 60 * 24
+  milliseconds_age / ONE_DAY
+
+cache_age_hover = (milliseconds_age) ->
+  'Cache age ' + cache_age_in_days(milliseconds_age).toFixed(1) + ' days.'
+
+cache_age_axis_label = (milliseconds_age) ->
+  cache_age_in_days(milliseconds_age).toFixed(0) + ' days'
+
+float_as_percentage = (proportion) ->
+  (proportion.toFixed(4) * 100) + '%'
+
+$.renderHistogram = (histogram_data) ->
+  Morris.Area({
+    element: 'cache-age-vs-disk-histogram',
+    pointSize: 0,
+    lineWidth: 0,
+    data: histogram_data,
+    xkey: 'age',
+    ykeys: ['persisted', 'cache'],
+    labels: ['Persisted Storage Disk Utilization', 'Cached Storage Disk Utilization'],
+    ymax: 1,
+    ymin: 0,
+    xLabelFormat: cache_age_axis_label,
+    yLabelFormat: float_as_percentage,
+    dateFormat: cache_age_hover
+  })
diff --git a/apps/workbench/app/assets/javascripts/list.js b/apps/workbench/app/assets/javascripts/list.js
new file mode 100644 (file)
index 0000000..d8ea7ba
--- /dev/null
@@ -0,0 +1,1474 @@
+;(function(){
+
+/**
+ * Require the given path.
+ *
+ * @param {String} path
+ * @return {Object} exports
+ * @api public
+ */
+
+function require(path, parent, orig) {
+  var resolved = require.resolve(path);
+
+  // lookup failed
+  if (null == resolved) {
+    orig = orig || path;
+    parent = parent || 'root';
+    var err = new Error('Failed to require "' + orig + '" from "' + parent + '"');
+    err.path = orig;
+    err.parent = parent;
+    err.require = true;
+    throw err;
+  }
+
+  var module = require.modules[resolved];
+
+  // perform real require()
+  // by invoking the module's
+  // registered function
+  if (!module._resolving && !module.exports) {
+    var mod = {};
+    mod.exports = {};
+    mod.client = mod.component = true;
+    module._resolving = true;
+    module.call(this, mod.exports, require.relative(resolved), mod);
+    delete module._resolving;
+    module.exports = mod.exports;
+  }
+
+  return module.exports;
+}
+
+/**
+ * Registered modules.
+ */
+
+require.modules = {};
+
+/**
+ * Registered aliases.
+ */
+
+require.aliases = {};
+
+/**
+ * Resolve `path`.
+ *
+ * Lookup:
+ *
+ *   - PATH/index.js
+ *   - PATH.js
+ *   - PATH
+ *
+ * @param {String} path
+ * @return {String} path or null
+ * @api private
+ */
+
+require.resolve = function(path) {
+  if (path.charAt(0) === '/') path = path.slice(1);
+
+  var paths = [
+    path,
+    path + '.js',
+    path + '.json',
+    path + '/index.js',
+    path + '/index.json'
+  ];
+
+  for (var i = 0; i < paths.length; i++) {
+    var path = paths[i];
+    if (require.modules.hasOwnProperty(path)) return path;
+    if (require.aliases.hasOwnProperty(path)) return require.aliases[path];
+  }
+};
+
+/**
+ * Normalize `path` relative to the current path.
+ *
+ * @param {String} curr
+ * @param {String} path
+ * @return {String}
+ * @api private
+ */
+
+require.normalize = function(curr, path) {
+  var segs = [];
+
+  if ('.' != path.charAt(0)) return path;
+
+  curr = curr.split('/');
+  path = path.split('/');
+
+  for (var i = 0; i < path.length; ++i) {
+    if ('..' == path[i]) {
+      curr.pop();
+    } else if ('.' != path[i] && '' != path[i]) {
+      segs.push(path[i]);
+    }
+  }
+
+  return curr.concat(segs).join('/');
+};
+
+/**
+ * Register module at `path` with callback `definition`.
+ *
+ * @param {String} path
+ * @param {Function} definition
+ * @api private
+ */
+
+require.register = function(path, definition) {
+  require.modules[path] = definition;
+};
+
+/**
+ * Alias a module definition.
+ *
+ * @param {String} from
+ * @param {String} to
+ * @api private
+ */
+
+require.alias = function(from, to) {
+  if (!require.modules.hasOwnProperty(from)) {
+    throw new Error('Failed to alias "' + from + '", it does not exist');
+  }
+  require.aliases[to] = from;
+};
+
+/**
+ * Return a require function relative to the `parent` path.
+ *
+ * @param {String} parent
+ * @return {Function}
+ * @api private
+ */
+
+require.relative = function(parent) {
+  var p = require.normalize(parent, '..');
+
+  /**
+   * lastIndexOf helper.
+   */
+
+  function lastIndexOf(arr, obj) {
+    var i = arr.length;
+    while (i--) {
+      if (arr[i] === obj) return i;
+    }
+    return -1;
+  }
+
+  /**
+   * The relative require() itself.
+   */
+
+  function localRequire(path) {
+    var resolved = localRequire.resolve(path);
+    return require(resolved, parent, path);
+  }
+
+  /**
+   * Resolve relative to the parent.
+   */
+
+  localRequire.resolve = function(path) {
+    var c = path.charAt(0);
+    if ('/' == c) return path.slice(1);
+    if ('.' == c) return require.normalize(p, path);
+
+    // resolve deps by returning
+    // the dep in the nearest "deps"
+    // directory
+    var segs = parent.split('/');
+    var i = lastIndexOf(segs, 'deps') + 1;
+    if (!i) i = 0;
+    path = segs.slice(0, i + 1).join('/') + '/deps/' + path;
+    return path;
+  };
+
+  /**
+   * Check if module is defined at `path`.
+   */
+
+  localRequire.exists = function(path) {
+    return require.modules.hasOwnProperty(localRequire.resolve(path));
+  };
+
+  return localRequire;
+};
+require.register("component-classes/index.js", function(exports, require, module){
+/**
+ * Module dependencies.
+ */
+
+var index = require('indexof');
+
+/**
+ * Whitespace regexp.
+ */
+
+var re = /\s+/;
+
+/**
+ * toString reference.
+ */
+
+var toString = Object.prototype.toString;
+
+/**
+ * Wrap `el` in a `ClassList`.
+ *
+ * @param {Element} el
+ * @return {ClassList}
+ * @api public
+ */
+
+module.exports = function(el){
+  return new ClassList(el);
+};
+
+/**
+ * Initialize a new ClassList for `el`.
+ *
+ * @param {Element} el
+ * @api private
+ */
+
+function ClassList(el) {
+  if (!el) throw new Error('A DOM element reference is required');
+  this.el = el;
+  this.list = el.classList;
+}
+
+/**
+ * Add class `name` if not already present.
+ *
+ * @param {String} name
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.add = function(name){
+  // classList
+  if (this.list) {
+    this.list.add(name);
+    return this;
+  }
+
+  // fallback
+  var arr = this.array();
+  var i = index(arr, name);
+  if (!~i) arr.push(name);
+  this.el.className = arr.join(' ');
+  return this;
+};
+
+/**
+ * Remove class `name` when present, or
+ * pass a regular expression to remove
+ * any which match.
+ *
+ * @param {String|RegExp} name
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.remove = function(name){
+  if ('[object RegExp]' == toString.call(name)) {
+    return this.removeMatching(name);
+  }
+
+  // classList
+  if (this.list) {
+    this.list.remove(name);
+    return this;
+  }
+
+  // fallback
+  var arr = this.array();
+  var i = index(arr, name);
+  if (~i) arr.splice(i, 1);
+  this.el.className = arr.join(' ');
+  return this;
+};
+
+/**
+ * Remove all classes matching `re`.
+ *
+ * @param {RegExp} re
+ * @return {ClassList}
+ * @api private
+ */
+
+ClassList.prototype.removeMatching = function(re){
+  var arr = this.array();
+  for (var i = 0; i < arr.length; i++) {
+    if (re.test(arr[i])) {
+      this.remove(arr[i]);
+    }
+  }
+  return this;
+};
+
+/**
+ * Toggle class `name`, can force state via `force`.
+ *
+ * For browsers that support classList, but do not support `force` yet,
+ * the mistake will be detected and corrected.
+ *
+ * @param {String} name
+ * @param {Boolean} force
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.toggle = function(name, force){
+  // classList
+  if (this.list) {
+    if ("undefined" !== typeof force) {
+      if (force !== this.list.toggle(name, force)) {
+        this.list.toggle(name); // toggle again to correct
+      }
+    } else {
+      this.list.toggle(name);
+    }
+    return this;
+  }
+
+  // fallback
+  if ("undefined" !== typeof force) {
+    if (!force) {
+      this.remove(name);
+    } else {
+      this.add(name);
+    }
+  } else {
+    if (this.has(name)) {
+      this.remove(name);
+    } else {
+      this.add(name);
+    }
+  }
+
+  return this;
+};
+
+/**
+ * Return an array of classes.
+ *
+ * @return {Array}
+ * @api public
+ */
+
+ClassList.prototype.array = function(){
+  var str = this.el.className.replace(/^\s+|\s+$/g, '');
+  var arr = str.split(re);
+  if ('' === arr[0]) arr.shift();
+  return arr;
+};
+
+/**
+ * Check if class `name` is present.
+ *
+ * @param {String} name
+ * @return {ClassList}
+ * @api public
+ */
+
+ClassList.prototype.has =
+ClassList.prototype.contains = function(name){
+  return this.list
+    ? this.list.contains(name)
+    : !! ~index(this.array(), name);
+};
+
+});
+require.register("segmentio-extend/index.js", function(exports, require, module){
+
+module.exports = function extend (object) {
+    // Takes an unlimited number of extenders.
+    var args = Array.prototype.slice.call(arguments, 1);
+
+    // For each extender, copy their properties on our object.
+    for (var i = 0, source; source = args[i]; i++) {
+        if (!source) continue;
+        for (var property in source) {
+            object[property] = source[property];
+        }
+    }
+
+    return object;
+};
+});
+require.register("component-indexof/index.js", function(exports, require, module){
+module.exports = function(arr, obj){
+  if (arr.indexOf) return arr.indexOf(obj);
+  for (var i = 0; i < arr.length; ++i) {
+    if (arr[i] === obj) return i;
+  }
+  return -1;
+};
+});
+require.register("component-event/index.js", function(exports, require, module){
+var bind = window.addEventListener ? 'addEventListener' : 'attachEvent',
+    unbind = window.removeEventListener ? 'removeEventListener' : 'detachEvent',
+    prefix = bind !== 'addEventListener' ? 'on' : '';
+
+/**
+ * Bind `el` event `type` to `fn`.
+ *
+ * @param {Element} el
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @return {Function}
+ * @api public
+ */
+
+exports.bind = function(el, type, fn, capture){
+  el[bind](prefix + type, fn, capture || false);
+  return fn;
+};
+
+/**
+ * Unbind `el` event `type`'s callback `fn`.
+ *
+ * @param {Element} el
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @return {Function}
+ * @api public
+ */
+
+exports.unbind = function(el, type, fn, capture){
+  el[unbind](prefix + type, fn, capture || false);
+  return fn;
+};
+});
+require.register("timoxley-to-array/index.js", function(exports, require, module){
+/**
+ * Convert an array-like object into an `Array`.
+ * If `collection` is already an `Array`, then will return a clone of `collection`.
+ *
+ * @param {Array | Mixed} collection An `Array` or array-like object to convert e.g. `arguments` or `NodeList`
+ * @return {Array} Naive conversion of `collection` to a new `Array`.
+ * @api public
+ */
+
+module.exports = function toArray(collection) {
+  if (typeof collection === 'undefined') return []
+  if (collection === null) return [null]
+  if (collection === window) return [window]
+  if (typeof collection === 'string') return [collection]
+  if (isArray(collection)) return collection
+  if (typeof collection.length != 'number') return [collection]
+  if (typeof collection === 'function' && collection instanceof Function) return [collection]
+
+  var arr = []
+  for (var i = 0; i < collection.length; i++) {
+    if (Object.prototype.hasOwnProperty.call(collection, i) || i in collection) {
+      arr.push(collection[i])
+    }
+  }
+  if (!arr.length) return []
+  return arr
+}
+
+function isArray(arr) {
+  return Object.prototype.toString.call(arr) === "[object Array]";
+}
+
+});
+require.register("javve-events/index.js", function(exports, require, module){
+var events = require('event'),
+  toArray = require('to-array');
+
+/**
+ * Bind `el` event `type` to `fn`.
+ *
+ * @param {Element} el, NodeList, HTMLCollection or Array
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @api public
+ */
+
+exports.bind = function(el, type, fn, capture){
+  el = toArray(el);
+  for ( var i = 0; i < el.length; i++ ) {
+    events.bind(el[i], type, fn, capture);
+  }
+};
+
+/**
+ * Unbind `el` event `type`'s callback `fn`.
+ *
+ * @param {Element} el, NodeList, HTMLCollection or Array
+ * @param {String} type
+ * @param {Function} fn
+ * @param {Boolean} capture
+ * @api public
+ */
+
+exports.unbind = function(el, type, fn, capture){
+  el = toArray(el);
+  for ( var i = 0; i < el.length; i++ ) {
+    events.unbind(el[i], type, fn, capture);
+  }
+};
+
+});
+require.register("javve-get-by-class/index.js", function(exports, require, module){
+/**
+ * Find all elements with class `className` inside `container`.
+ * Use `single = true` to increase performance in older browsers
+ * when only one element is needed.
+ *
+ * @param {String} className
+ * @param {Element} container
+ * @param {Boolean} single
+ * @api public
+ */
+
+module.exports = (function() {
+  if (document.getElementsByClassName) {
+    return function(container, className, single) {
+      if (single) {
+        return container.getElementsByClassName(className)[0];
+      } else {
+        return container.getElementsByClassName(className);
+      }
+    };
+  } else if (document.querySelector) {
+    return function(container, className, single) {
+      className = '.' + className;
+      if (single) {
+        return container.querySelector(className);
+      } else {
+        return container.querySelectorAll(className);
+      }
+    };
+  } else {
+    return function(container, className, single) {
+      var classElements = [],
+        tag = '*';
+      if (container == null) {
+        container = document;
+      }
+      var els = container.getElementsByTagName(tag);
+      var elsLen = els.length;
+      var pattern = new RegExp("(^|\\s)"+className+"(\\s|$)");
+      for (var i = 0, j = 0; i < elsLen; i++) {
+        if ( pattern.test(els[i].className) ) {
+          if (single) {
+            return els[i];
+          } else {
+            classElements[j] = els[i];
+            j++;
+          }
+        }
+      }
+      return classElements;
+    };
+  }
+})();
+
+});
+require.register("javve-get-attribute/index.js", function(exports, require, module){
+/**
+ * Return the value for `attr` at `element`.
+ *
+ * @param {Element} el
+ * @param {String} attr
+ * @api public
+ */
+
+module.exports = function(el, attr) {
+  var result = (el.getAttribute && el.getAttribute(attr)) || null;
+  if( !result ) {
+    var attrs = el.attributes;
+    var length = attrs.length;
+    for(var i = 0; i < length; i++) {
+      if (attr[i] !== undefined) {
+        if(attr[i].nodeName === attr) {
+          result = attr[i].nodeValue;
+        }
+      }
+    }
+  }
+  return result;
+}
+});
+require.register("javve-natural-sort/index.js", function(exports, require, module){
+/*
+ * Natural Sort algorithm for Javascript - Version 0.7 - Released under MIT license
+ * Author: Jim Palmer (based on chunking idea from Dave Koelle)
+ */
+
+module.exports = function(a, b, options) {
+  var re = /(^-?[0-9]+(\.?[0-9]*)[df]?e?[0-9]?$|^0x[0-9a-f]+$|[0-9]+)/gi,
+    sre = /(^[ ]*|[ ]*$)/g,
+    dre = /(^([\w ]+,?[\w ]+)?[\w ]+,?[\w ]+\d+:\d+(:\d+)?[\w ]?|^\d{1,4}[\/\-]\d{1,4}[\/\-]\d{1,4}|^\w+, \w+ \d+, \d{4})/,
+    hre = /^0x[0-9a-f]+$/i,
+    ore = /^0/,
+    options = options || {},
+    i = function(s) { return options.insensitive && (''+s).toLowerCase() || ''+s },
+    // convert all to strings strip whitespace
+    x = i(a).replace(sre, '') || '',
+    y = i(b).replace(sre, '') || '',
+    // chunk/tokenize
+    xN = x.replace(re, '\0$1\0').replace(/\0$/,'').replace(/^\0/,'').split('\0'),
+    yN = y.replace(re, '\0$1\0').replace(/\0$/,'').replace(/^\0/,'').split('\0'),
+    // numeric, hex or date detection
+    xD = parseInt(x.match(hre)) || (xN.length != 1 && x.match(dre) && Date.parse(x)),
+    yD = parseInt(y.match(hre)) || xD && y.match(dre) && Date.parse(y) || null,
+    oFxNcL, oFyNcL,
+    mult = options.desc ? -1 : 1;
+  // first try and sort Hex codes or Dates
+  if (yD)
+    if ( xD < yD ) return -1 * mult;
+    else if ( xD > yD ) return 1 * mult;
+  // natural sorting through split numeric strings and default strings
+  for(var cLoc=0, numS=Math.max(xN.length, yN.length); cLoc < numS; cLoc++) {
+    // find floats not starting with '0', string or 0 if not defined (Clint Priest)
+    oFxNcL = !(xN[cLoc] || '').match(ore) && parseFloat(xN[cLoc]) || xN[cLoc] || 0;
+    oFyNcL = !(yN[cLoc] || '').match(ore) && parseFloat(yN[cLoc]) || yN[cLoc] || 0;
+    // handle numeric vs string comparison - number < string - (Kyle Adams)
+    if (isNaN(oFxNcL) !== isNaN(oFyNcL)) { return (isNaN(oFxNcL)) ? 1 : -1; }
+    // rely on string comparison if different types - i.e. '02' < 2 != '02' < '2'
+    else if (typeof oFxNcL !== typeof oFyNcL) {
+      oFxNcL += '';
+      oFyNcL += '';
+    }
+    if (oFxNcL < oFyNcL) return -1 * mult;
+    if (oFxNcL > oFyNcL) return 1 * mult;
+  }
+  return 0;
+};
+
+/*
+var defaultSort = getSortFunction();
+
+module.exports = function(a, b, options) {
+  if (arguments.length == 1) {
+    options = a;
+    return getSortFunction(options);
+  } else {
+    return defaultSort(a,b);
+  }
+}
+*/
+});
+require.register("javve-to-string/index.js", function(exports, require, module){
+module.exports = function(s) {
+    s = (s === undefined) ? "" : s;
+    s = (s === null) ? "" : s;
+    s = s.toString();
+    return s;
+};
+
+});
+require.register("component-type/index.js", function(exports, require, module){
+/**
+ * toString ref.
+ */
+
+var toString = Object.prototype.toString;
+
+/**
+ * Return the type of `val`.
+ *
+ * @param {Mixed} val
+ * @return {String}
+ * @api public
+ */
+
+module.exports = function(val){
+  switch (toString.call(val)) {
+    case '[object Date]': return 'date';
+    case '[object RegExp]': return 'regexp';
+    case '[object Arguments]': return 'arguments';
+    case '[object Array]': return 'array';
+    case '[object Error]': return 'error';
+  }
+
+  if (val === null) return 'null';
+  if (val === undefined) return 'undefined';
+  if (val !== val) return 'nan';
+  if (val && val.nodeType === 1) return 'element';
+
+  return typeof val.valueOf();
+};
+
+});
+require.register("list.js/index.js", function(exports, require, module){
+/*
+ListJS with beta 1.0.0
+By Jonny Strömberg (www.jonnystromberg.com, www.listjs.com)
+*/
+(function( window, undefined ) {
+"use strict";
+
+var document = window.document,
+    getByClass = require('get-by-class'),
+    extend = require('extend'),
+    indexOf = require('indexof');
+
+var List = function(id, options, values) {
+
+    var self = this,
+               init,
+        Item = require('./src/item')(self),
+        addAsync = require('./src/add-async')(self),
+        parse = require('./src/parse')(self);
+
+    init = {
+        start: function() {
+            self.listClass      = "list";
+            self.searchClass    = "search";
+            self.sortClass      = "sort";
+            self.page           = 200;
+            self.i              = 1;
+            self.items          = [];
+            self.visibleItems   = [];
+            self.matchingItems  = [];
+            self.searched       = false;
+            self.filtered       = false;
+            self.handlers       = { 'updated': [] };
+            self.plugins        = {};
+            self.helpers        = {
+                getByClass: getByClass,
+                extend: extend,
+                indexOf: indexOf
+            };
+
+            extend(self, options);
+
+            self.listContainer = (typeof(id) === 'string') ? document.getElementById(id) : id;
+            if (!self.listContainer) { return; }
+            self.list           = getByClass(self.listContainer, self.listClass, true);
+
+            self.templater      = require('./src/templater')(self);
+            self.search         = require('./src/search')(self);
+            self.filter         = require('./src/filter')(self);
+            self.sort           = require('./src/sort')(self);
+
+            this.items();
+            self.update();
+            this.plugins();
+        },
+        items: function() {
+            parse(self.list);
+            if (values !== undefined) {
+                self.add(values);
+            }
+        },
+        plugins: function() {
+            for (var i = 0; i < self.plugins.length; i++) {
+                var plugin = self.plugins[i];
+                self[plugin.name] = plugin;
+                plugin.init(self);
+            }
+        }
+    };
+
+
+    /*
+    * Add object to list
+    */
+    this.add = function(values, callback) {
+        if (callback) {
+            addAsync(values, callback);
+            return;
+        }
+        var added = [],
+            notCreate = false;
+        if (values[0] === undefined){
+            values = [values];
+        }
+        for (var i = 0, il = values.length; i < il; i++) {
+            var item = null;
+            if (values[i] instanceof Item) {
+                item = values[i];
+                item.reload();
+            } else {
+                notCreate = (self.items.length > self.page) ? true : false;
+                item = new Item(values[i], undefined, notCreate);
+            }
+            self.items.push(item);
+            added.push(item);
+        }
+        self.update();
+        return added;
+    };
+
+       this.show = function(i, page) {
+               this.i = i;
+               this.page = page;
+               self.update();
+        return self;
+       };
+
+    /* Removes object from list.
+    * Loops through the list and removes objects where
+    * property "valuename" === value
+    */
+    this.remove = function(valueName, value, options) {
+        var found = 0;
+        for (var i = 0, il = self.items.length; i < il; i++) {
+            if (self.items[i].values()[valueName] == value) {
+                self.templater.remove(self.items[i], options);
+                self.items.splice(i,1);
+                il--;
+                i--;
+                found++;
+            }
+        }
+        self.update();
+        return found;
+    };
+
+    /* Gets the objects in the list which
+    * property "valueName" === value
+    */
+    this.get = function(valueName, value) {
+        var matchedItems = [];
+        for (var i = 0, il = self.items.length; i < il; i++) {
+            var item = self.items[i];
+            if (item.values()[valueName] == value) {
+                matchedItems.push(item);
+            }
+        }
+        return matchedItems;
+    };
+
+    /*
+    * Get size of the list
+    */
+    this.size = function() {
+        return self.items.length;
+    };
+
+    /*
+    * Removes all items from the list
+    */
+    this.clear = function() {
+        self.templater.clear();
+        self.items = [];
+        return self;
+    };
+
+    this.on = function(event, callback) {
+        self.handlers[event].push(callback);
+        return self;
+    };
+
+    this.off = function(event, callback) {
+        var e = self.handlers[event];
+        var index = indexOf(e, callback);
+        if (index > -1) {
+            e.splice(index, 1);
+        }
+        return self;
+    };
+
+    this.trigger = function(event) {
+        var i = self.handlers[event].length;
+        while(i--) {
+            self.handlers[event][i](self);
+        }
+        return self;
+    };
+
+    this.reset = {
+        filter: function() {
+            var is = self.items,
+                il = is.length;
+            while (il--) {
+                is[il].filtered = false;
+            }
+            return self;
+        },
+        search: function() {
+            var is = self.items,
+                il = is.length;
+            while (il--) {
+                is[il].found = false;
+            }
+            return self;
+        }
+    };
+
+    this.update = function() {
+        var is = self.items,
+                       il = is.length;
+
+        self.visibleItems = [];
+        self.matchingItems = [];
+        self.templater.clear();
+        for (var i = 0; i < il; i++) {
+            if (is[i].matching() && ((self.matchingItems.length+1) >= self.i && self.visibleItems.length < self.page)) {
+                is[i].show();
+                self.visibleItems.push(is[i]);
+                self.matchingItems.push(is[i]);
+                       } else if (is[i].matching()) {
+                self.matchingItems.push(is[i]);
+                is[i].hide();
+                       } else {
+                is[i].hide();
+                       }
+        }
+        self.trigger('updated');
+        return self;
+    };
+
+    init.start();
+};
+
+module.exports = List;
+
+})(window);
+
+});
+require.register("list.js/src/search.js", function(exports, require, module){
+var events = require('events'),
+    getByClass = require('get-by-class'),
+    toString = require('to-string');
+
+module.exports = function(list) {
+    var item,
+        text,
+        columns,
+        searchString,
+        customSearch;
+
+    var prepare = {
+        resetList: function() {
+            list.i = 1;
+            list.templater.clear();
+            customSearch = undefined;
+        },
+        setOptions: function(args) {
+            if (args.length == 2 && args[1] instanceof Array) {
+                columns = args[1];
+            } else if (args.length == 2 && typeof(args[1]) == "function") {
+                customSearch = args[1];
+            } else if (args.length == 3) {
+                columns = args[1];
+                customSearch = args[2];
+            }
+        },
+        setColumns: function() {
+            columns = (columns === undefined) ? prepare.toArray(list.items[0].values()) : columns;
+        },
+        setSearchString: function(s) {
+            s = toString(s).toLowerCase();
+            s = s.replace(/[-[\]{}()*+?.,\\^$|#]/g, "\\$&"); // Escape regular expression characters
+            searchString = s;
+        },
+        toArray: function(values) {
+            var tmpColumn = [];
+            for (var name in values) {
+                tmpColumn.push(name);
+            }
+            return tmpColumn;
+        }
+    };
+    var search = {
+        list: function() {
+            for (var k = 0, kl = list.items.length; k < kl; k++) {
+                search.item(list.items[k]);
+            }
+        },
+        item: function(item) {
+            item.found = false;
+            for (var j = 0, jl = columns.length; j < jl; j++) {
+                if (search.values(item.values(), columns[j])) {
+                    item.found = true;
+                    return;
+                }
+            }
+        },
+        values: function(values, column) {
+            if (values.hasOwnProperty(column)) {
+                text = toString(values[column]).toLowerCase();
+                if ((searchString !== "") && (text.search(searchString) > -1)) {
+                    return true;
+                }
+            }
+            return false;
+        },
+        reset: function() {
+            list.reset.search();
+            list.searched = false;
+        }
+    };
+
+    var searchMethod = function(str) {
+        list.trigger('searchStart');
+
+        prepare.resetList();
+        prepare.setSearchString(str);
+        prepare.setOptions(arguments); // str, cols|searchFunction, searchFunction
+        prepare.setColumns();
+
+        if (searchString === "" ) {
+            search.reset();
+        } else {
+            list.searched = true;
+            if (customSearch) {
+                customSearch(searchString, columns);
+            } else {
+                search.list();
+            }
+        }
+
+        list.update();
+        list.trigger('searchComplete');
+        return list.visibleItems;
+    };
+
+    list.handlers.searchStart = list.handlers.searchStart || [];
+    list.handlers.searchComplete = list.handlers.searchComplete || [];
+
+    events.bind(getByClass(list.listContainer, list.searchClass), 'keyup', function(e) {
+        var target = e.target || e.srcElement, // IE have srcElement
+            alreadyCleared = (target.value === "" && !list.searched);
+        if (!alreadyCleared) { // If oninput already have resetted the list, do nothing
+            searchMethod(target.value);
+        }
+    });
+
+    // Used to detect click on HTML5 clear button
+    events.bind(getByClass(list.listContainer, list.searchClass), 'input', function(e) {
+        var target = e.target || e.srcElement;
+        if (target.value === "") {
+            searchMethod('');
+        }
+    });
+
+    list.helpers.toString = toString;
+    return searchMethod;
+};
+
+});
+require.register("list.js/src/sort.js", function(exports, require, module){
+var naturalSort = require('natural-sort'),
+    classes = require('classes'),
+    events = require('events'),
+    getByClass = require('get-by-class'),
+    getAttribute = require('get-attribute');
+
+module.exports = function(list) {
+    list.sortFunction = list.sortFunction || function(itemA, itemB, options) {
+        options.desc = options.order == "desc" ? true : false; // Natural sort uses this format
+        return naturalSort(itemA.values()[options.valueName], itemB.values()[options.valueName], options);
+    };
+
+    var buttons = {
+        els: undefined,
+        clear: function() {
+            for (var i = 0, il = buttons.els.length; i < il; i++) {
+                classes(buttons.els[i]).remove('asc');
+                classes(buttons.els[i]).remove('desc');
+            }
+        },
+        getOrder: function(btn) {
+            var predefinedOrder = getAttribute(btn, 'data-order');
+            if (predefinedOrder == "asc" || predefinedOrder == "desc") {
+                return predefinedOrder;
+            } else if (classes(btn).has('desc')) {
+                return "asc";
+            } else if (classes(btn).has('asc')) {
+                return "desc";
+            } else {
+                return "asc";
+            }
+        },
+        getInSensitive: function(btn, options) {
+            var insensitive = getAttribute(btn, 'data-insensitive');
+            if (insensitive === "true") {
+                options.insensitive = true;
+            } else {
+                options.insensitive = false;
+            }
+        },
+        setOrder: function(options) {
+            for (var i = 0, il = buttons.els.length; i < il; i++) {
+                var btn = buttons.els[i];
+                if (getAttribute(btn, 'data-sort') !== options.valueName) {
+                    continue;
+                }
+                var predefinedOrder = getAttribute(btn, 'data-order');
+                if (predefinedOrder == "asc" || predefinedOrder == "desc") {
+                    if (predefinedOrder == options.order) {
+                        classes(btn).add(options.order);
+                    }
+                } else {
+                    classes(btn).add(options.order);
+                }
+            }
+        }
+    };
+    var sort = function() {
+        list.trigger('sortStart');
+        options = {};
+
+        var target = arguments[0].currentTarget || arguments[0].srcElement || undefined;
+
+        if (target) {
+            options.valueName = getAttribute(target, 'data-sort');
+            buttons.getInSensitive(target, options);
+            options.order = buttons.getOrder(target);
+        } else {
+            options = arguments[1] || options;
+            options.valueName = arguments[0];
+            options.order = options.order || "asc";
+            options.insensitive = (typeof options.insensitive == "undefined") ? true : options.insensitive;
+        }
+        buttons.clear();
+        buttons.setOrder(options);
+
+        options.sortFunction = options.sortFunction || list.sortFunction;
+        list.items.sort(function(a, b) {
+            return options.sortFunction(a, b, options);
+        });
+        list.update();
+        list.trigger('sortComplete');
+    };
+
+    // Add handlers
+    list.handlers.sortStart = list.handlers.sortStart || [];
+    list.handlers.sortComplete = list.handlers.sortComplete || [];
+
+    buttons.els = getByClass(list.listContainer, list.sortClass);
+    events.bind(buttons.els, 'click', sort);
+    list.on('searchStart', buttons.clear);
+    list.on('filterStart', buttons.clear);
+
+    // Helpers
+    list.helpers.classes = classes;
+    list.helpers.naturalSort = naturalSort;
+    list.helpers.events = events;
+    list.helpers.getAttribute = getAttribute;
+
+    return sort;
+};
+
+});
+require.register("list.js/src/item.js", function(exports, require, module){
+module.exports = function(list) {
+    return function(initValues, element, notCreate) {
+        var item = this;
+
+        this._values = {};
+
+        this.found = false; // Show if list.searched == true and this.found == true
+        this.filtered = false;// Show if list.filtered == true and this.filtered == true
+
+        var init = function(initValues, element, notCreate) {
+            if (element === undefined) {
+                if (notCreate) {
+                    item.values(initValues, notCreate);
+                } else {
+                    item.values(initValues);
+                }
+            } else {
+                item.elm = element;
+                var values = list.templater.get(item, initValues);
+                item.values(values);
+            }
+        };
+        this.values = function(newValues, notCreate) {
+            if (newValues !== undefined) {
+                for(var name in newValues) {
+                    item._values[name] = newValues[name];
+                }
+                if (notCreate !== true) {
+                    list.templater.set(item, item.values());
+                }
+            } else {
+                return item._values;
+            }
+        };
+        this.show = function() {
+            list.templater.show(item);
+        };
+        this.hide = function() {
+            list.templater.hide(item);
+        };
+        this.matching = function() {
+            return (
+                (list.filtered && list.searched && item.found && item.filtered) ||
+                (list.filtered && !list.searched && item.filtered) ||
+                (!list.filtered && list.searched && item.found) ||
+                (!list.filtered && !list.searched)
+            );
+        };
+        this.visible = function() {
+            return (item.elm.parentNode == list.list) ? true : false;
+        };
+        init(initValues, element, notCreate);
+    };
+};
+
+});
+require.register("list.js/src/templater.js", function(exports, require, module){
+var getByClass = require('get-by-class');
+
+var Templater = function(list) {
+    var itemSource = getItemSource(list.item),
+        templater = this;
+
+    function getItemSource(item) {
+        if (item === undefined) {
+            var nodes = list.list.childNodes,
+                items = [];
+
+            for (var i = 0, il = nodes.length; i < il; i++) {
+                // Only textnodes have a data attribute
+                if (nodes[i].data === undefined) {
+                    return nodes[i];
+                }
+            }
+            return null;
+        } else if (item.indexOf("<") !== -1) { // Try create html element of list, do not work for tables!!
+            var div = document.createElement('div');
+            div.innerHTML = item;
+            return div.firstChild;
+        } else {
+            return document.getElementById(list.item);
+        }
+    }
+
+    /* Get values from element */
+    this.get = function(item, valueNames) {
+        templater.create(item);
+        var values = {};
+        for(var i = 0, il = valueNames.length; i < il; i++) {
+            var elm = getByClass(item.elm, valueNames[i], true);
+            values[valueNames[i]] = elm ? elm.innerHTML : "";
+        }
+        return values;
+    };
+
+    /* Sets values at element */
+    this.set = function(item, values) {
+        if (!templater.create(item)) {
+            for(var v in values) {
+                if (values.hasOwnProperty(v)) {
+                    // TODO speed up if possible
+                    var elm = getByClass(item.elm, v, true);
+                    if (elm) {
+                        /* src attribute for image tag & text for other tags */
+                        if (elm.tagName === "IMG" && values[v] !== "") {
+                            elm.src = values[v];
+                        } else {
+                            elm.innerHTML = values[v];
+                        }
+                    }
+                }
+            }
+        }
+    };
+
+    this.create = function(item) {
+        if (item.elm !== undefined) {
+            return false;
+        }
+        /* If item source does not exists, use the first item in list as
+        source for new items */
+        var newItem = itemSource.cloneNode(true);
+        newItem.removeAttribute('id');
+        item.elm = newItem;
+        templater.set(item, item.values());
+        return true;
+    };
+    this.remove = function(item) {
+        list.list.removeChild(item.elm);
+    };
+    this.show = function(item) {
+        templater.create(item);
+        list.list.appendChild(item.elm);
+    };
+    this.hide = function(item) {
+        if (item.elm !== undefined && item.elm.parentNode === list.list) {
+            list.list.removeChild(item.elm);
+        }
+    };
+    this.clear = function() {
+        /* .innerHTML = ''; fucks up IE */
+        if (list.list.hasChildNodes()) {
+            while (list.list.childNodes.length >= 1)
+            {
+                list.list.removeChild(list.list.firstChild);
+            }
+        }
+    };
+};
+
+module.exports = function(list) {
+    return new Templater(list);
+};
+
+});
+require.register("list.js/src/filter.js", function(exports, require, module){
+module.exports = function(list) {
+
+    // Add handlers
+    list.handlers.filterStart = list.handlers.filterStart || [];
+    list.handlers.filterComplete = list.handlers.filterComplete || [];
+
+    return function(filterFunction) {
+        list.trigger('filterStart');
+        list.i = 1; // Reset paging
+        list.reset.filter();
+        if (filterFunction === undefined) {
+            list.filtered = false;
+        } else {
+            list.filtered = true;
+            var is = list.items;
+            for (var i = 0, il = is.length; i < il; i++) {
+                var item = is[i];
+                if (filterFunction(item)) {
+                    item.filtered = true;
+                } else {
+                    item.filtered = false;
+                }
+            }
+        }
+        list.update();
+        list.trigger('filterComplete');
+        return list.visibleItems;
+    };
+};
+
+});
+require.register("list.js/src/add-async.js", function(exports, require, module){
+module.exports = function(list) {
+    return function(values, callback, items) {
+        var valuesToAdd = values.splice(0, 100);
+        items = items || [];
+        items = items.concat(list.add(valuesToAdd));
+        if (values.length > 0) {
+            setTimeout(function() {
+                addAsync(values, callback, items);
+            }, 10);
+        } else {
+            list.update();
+            callback(items);
+        }
+    };
+};
+});
+require.register("list.js/src/parse.js", function(exports, require, module){
+module.exports = function(list) {
+
+    var Item = require('./item')(list);
+
+    var getChildren = function(parent) {
+        var nodes = parent.childNodes,
+            items = [];
+        for (var i = 0, il = nodes.length; i < il; i++) {
+            // Only textnodes have a data attribute
+            if (nodes[i].data === undefined) {
+                items.push(nodes[i]);
+            }
+        }
+        return items;
+    };
+
+    var parse = function(itemElements, valueNames) {
+        for (var i = 0, il = itemElements.length; i < il; i++) {
+            list.items.push(new Item(valueNames, itemElements[i]));
+        }
+    };
+    var parseAsync = function(itemElements, valueNames) {
+        var itemsToIndex = itemElements.splice(0, 100); // TODO: If < 100 items, what happens in IE etc?
+        parse(itemsToIndex, valueNames);
+        if (itemElements.length > 0) {
+            setTimeout(function() {
+                init.items.indexAsync(itemElements, valueNames);
+            }, 10);
+        } else {
+            list.update();
+            // TODO: Add indexed callback
+        }
+    };
+
+    return function() {
+        var itemsToIndex = getChildren(list.list),
+            valueNames = list.valueNames;
+
+        if (list.indexAsync) {
+            parseAsync(itemsToIndex, valueNames);
+        } else {
+            parse(itemsToIndex, valueNames);
+        }
+    };
+};
+
+});
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+require.alias("component-classes/index.js", "list.js/deps/classes/index.js");
+require.alias("component-classes/index.js", "classes/index.js");
+require.alias("component-indexof/index.js", "component-classes/deps/indexof/index.js");
+
+require.alias("segmentio-extend/index.js", "list.js/deps/extend/index.js");
+require.alias("segmentio-extend/index.js", "extend/index.js");
+
+require.alias("component-indexof/index.js", "list.js/deps/indexof/index.js");
+require.alias("component-indexof/index.js", "indexof/index.js");
+
+require.alias("javve-events/index.js", "list.js/deps/events/index.js");
+require.alias("javve-events/index.js", "events/index.js");
+require.alias("component-event/index.js", "javve-events/deps/event/index.js");
+
+require.alias("timoxley-to-array/index.js", "javve-events/deps/to-array/index.js");
+
+require.alias("javve-get-by-class/index.js", "list.js/deps/get-by-class/index.js");
+require.alias("javve-get-by-class/index.js", "get-by-class/index.js");
+
+require.alias("javve-get-attribute/index.js", "list.js/deps/get-attribute/index.js");
+require.alias("javve-get-attribute/index.js", "get-attribute/index.js");
+
+require.alias("javve-natural-sort/index.js", "list.js/deps/natural-sort/index.js");
+require.alias("javve-natural-sort/index.js", "natural-sort/index.js");
+
+require.alias("javve-to-string/index.js", "list.js/deps/to-string/index.js");
+require.alias("javve-to-string/index.js", "list.js/deps/to-string/index.js");
+require.alias("javve-to-string/index.js", "to-string/index.js");
+require.alias("javve-to-string/index.js", "javve-to-string/index.js");
+require.alias("component-type/index.js", "list.js/deps/type/index.js");
+require.alias("component-type/index.js", "type/index.js");
+if (typeof exports == "object") {
+  module.exports = require("list.js");
+} else if (typeof define == "function" && define.amd) {
+  define(function(){ return require("list.js"); });
+} else {
+  this["List"] = require("list.js");
+}})();
\ No newline at end of file
diff --git a/apps/workbench/app/assets/javascripts/log_viewer.js b/apps/workbench/app/assets/javascripts/log_viewer.js
new file mode 100644 (file)
index 0000000..0e12f9c
--- /dev/null
@@ -0,0 +1,282 @@
+function newTaskState() {
+    return {"complete_count": 0,
+            "failure_count": 0,
+            "task_count": 0,
+            "incomplete_count": 0,
+            "nodes": []};
+}
+
+function addToLogViewer(logViewer, lines, taskState) {
+    var re = /((\d\d\d\d)-(\d\d)-(\d\d))_((\d\d):(\d\d):(\d\d)) ([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}) (\d+) (\d+)? (.*)/;
+
+    var items = [];
+    var count = logViewer.items.length;
+    for (var a in lines) {
+        var v = lines[a].match(re);
+        if (v != null) {
+
+            var ts = new Date(Date.UTC(v[2], v[3]-1, v[4], v[6], v[7], v[8]));
+
+            v11 = v[11];
+            if (typeof v[11] === 'undefined') {
+                v11 = "";
+            } else {
+                v11 = Number(v11);
+            }
+
+            var message = v[12];
+            var type = "";
+            var node = "";
+            var slot = "";
+            if (v11 !== "") {
+                if (!taskState.hasOwnProperty(v11)) {
+                    taskState[v11] = {};
+                    taskState.task_count += 1;
+                }
+
+                if (/^stderr /.test(message)) {
+                    message = message.substr(7);
+                    if (/^crunchstat: /.test(message)) {
+                        type = "crunchstat";
+                        message = message.substr(12);
+                    } else if (/^srun: /.test(message) || /^slurmd/.test(message)) {
+                        type = "task-dispatch";
+                    } else {
+                        type = "task-print";
+                    }
+                } else {
+                    var m;
+                    if (m = /^success in (\d+) second/.exec(message)) {
+                        taskState[v11].outcome = "success";
+                        taskState[v11].runtime = Number(m[1]);
+                        taskState.complete_count += 1;
+                    }
+                    else if (m = /^failure \(\#\d+, (temporary|permanent)\) after (\d+) second/.exec(message)) {
+                        taskState[v11].outcome = "failure";
+                        taskState[v11].runtime = Number(m[2]);
+                        taskState.failure_count += 1;
+                        if (m[1] == "permanent") {
+                            taskState.incomplete_count += 1;
+                        }
+                    }
+                    else if (m = /^child \d+ started on ([^.]*)\.(\d+)/.exec(message)) {
+                        taskState[v11].node = m[1];
+                        taskState[v11].slot = m[2];
+                        if (taskState.nodes.indexOf(m[1], 0) == -1) {
+                            taskState.nodes.push(m[1]);
+                        }
+                        for (var i in items) {
+                            if (i > 0) {
+                                if (items[i].taskid === v11) {
+                                    items[i].node = m[1];
+                                    items[i].slot = m[2];
+                                }
+                            }
+                        }
+                    }
+                    type = "task-dispatch";
+                }
+                node = taskState[v11].node;
+                slot = taskState[v11].slot;
+            } else {
+                type = "crunch";
+            }
+
+            items.push({
+                id: count,
+                ts: ts,
+                timestamp: ts.toLocaleDateString() + " " + ts.toLocaleTimeString(),
+                taskid: v11,
+                node: node,
+                slot: slot,
+                message: message.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;'),
+                type: type
+            });
+            count += 1;
+        } else {
+            console.log("Did not parse line " + a + ": " + lines[a]);
+        }
+    }
+    logViewer.add(items);
+}
+
+function sortById(a, b, opt) {
+    a = a.values();
+    b = b.values();
+
+    if (a["id"] > b["id"]) {
+        return 1;
+    }
+    if (a["id"] < b["id"]) {
+        return -1;
+    }
+    return 0;
+}
+
+function sortByTask(a, b, opt) {
+    var aa = a.values();
+    var bb = b.values();
+
+    if (aa["taskid"] === "" && bb["taskid"] !== "") {
+        return -1;
+    }
+    if (aa["taskid"] !== "" && bb["taskid"] === "") {
+        return 1;
+    }
+
+    if (aa["taskid"] !== "" && bb["taskid"] !== "") {
+        if (aa["taskid"] > bb["taskid"]) {
+            return 1;
+        }
+        if (aa["taskid"] < bb["taskid"]) {
+            return -1;
+        }
+    }
+
+    return sortById(a, b, opt);
+}
+
+function sortByNode(a, b, opt) {
+    var aa = a.values();
+    var bb = b.values();
+
+    if (aa["node"] === "" && bb["node"] !== "") {
+        return -1;
+    }
+    if (aa["node"] !== "" && bb["node"] === "") {
+        return 1;
+    }
+
+    if (aa["node"] !== "" && bb["node"] !== "") {
+        if (aa["node"] > bb["node"]) {
+            return 1;
+        }
+        if (aa["node"] < bb["node"]) {
+            return -1;
+        }
+    }
+
+    if (aa["slot"] !== "" && bb["slot"] !== "") {
+        if (aa["slot"] > bb["slot"]) {
+            return 1;
+        }
+        if (aa["slot"] < bb["slot"]) {
+            return -1;
+        }
+    }
+
+    return sortById(a, b, opt);
+}
+
+
+function dumbPluralize(n, s, p) {
+    if (typeof p === 'undefined') {
+        p = "s";
+    }
+    if (n == 0 || n > 1) {
+        return n + " " + (s + p);
+    } else {
+        return n + " " + s;
+    }
+}
+
+function generateJobOverview(id, logViewer, taskState) {
+    var html = "";
+
+    if (logViewer.items.length > 2) {
+        var first = logViewer.items[1];
+        var last = logViewer.items[logViewer.items.length-1];
+        var duration = (last.values().ts.getTime() - first.values().ts.getTime()) / 1000;
+
+        var hours = 0;
+        var minutes = 0;
+        var seconds;
+
+        if (duration >= 3600) {
+            hours = Math.floor(duration / 3600);
+            duration -= (hours * 3600);
+        }
+        if (duration >= 60) {
+            minutes = Math.floor(duration / 60);
+            duration -= (minutes * 60);
+        }
+        seconds = duration;
+
+        var tcount = taskState.task_count;
+
+        html += "<p>";
+        html += "Started at " + first.values().timestamp + ".  ";
+        html += "Ran " + dumbPluralize(tcount, " task") + " over ";
+        if (hours > 0) {
+            html += dumbPluralize(hours, " hour");
+        }
+        if (minutes > 0) {
+            html += " " + dumbPluralize(minutes, " minute");
+        }
+        if (seconds > 0) {
+            html += " " + dumbPluralize(seconds, " second");
+        }
+
+        html += " using " + dumbPluralize(taskState.nodes.length, " node");
+
+        html += ".  " + dumbPluralize(taskState.complete_count, "task") + " completed";
+        html += ",  " + dumbPluralize(taskState.incomplete_count, "task") +  " incomplete";
+        html += " (" + dumbPluralize(taskState.failure_count, " failure") + ")";
+
+        html += ".  Finished at " + last.values().timestamp + ".";
+        html += "</p>";
+    } else {
+       html = "<p>Job log is empty or failed to load.</p>";
+    }
+
+    $(id).html(html);
+}
+
+function gotoPage(n, logViewer, page, id) {
+    if (n < 0) { return; }
+    if (n*page > logViewer.matchingItems.length) { return; }
+    logViewer.page_offset = n;
+    logViewer.show(n*page, page);
+}
+
+function updatePaging(id, logViewer, page) {
+    var p = "";
+    var i = logViewer.matchingItems.length;
+    var n;
+    for (n = 0; (n*page) < i; n += 1) {
+        if (n == logViewer.page_offset) {
+            p += "<span class='log-viewer-page-num'>" + (n+1) + "</span> ";
+        } else {
+            p += "<a href=\"#\" class='log-viewer-page-num log-viewer-page-" + n + "'>" + (n+1) + "</a> ";
+        }
+    }
+    $(id).html(p);
+    for (n = 0; (n*page) < i; n += 1) {
+        (function(n) {
+            $(".log-viewer-page-" + n).on("click", function() {
+                gotoPage(n, logViewer, page, id);
+                return false;
+            });
+        })(n);
+    }
+
+    if (logViewer.page_offset == 0) {
+        $(".log-viewer-page-up").addClass("text-muted");
+    } else {
+        $(".log-viewer-page-up").removeClass("text-muted");
+    }
+
+    if (logViewer.page_offset == (n-1)) {
+        $(".log-viewer-page-down").addClass("text-muted");
+    } else {
+        $(".log-viewer-page-down").removeClass("text-muted");
+    }
+}
+
+function nextPage(logViewer, page, id) {
+    gotoPage(logViewer.page_offset+1, logViewer, page, id);
+}
+
+function prevPage(logViewer, page, id) {
+    gotoPage(logViewer.page_offset-1, logViewer, page, id);
+}
diff --git a/apps/workbench/app/assets/javascripts/permission_toggle.js b/apps/workbench/app/assets/javascripts/permission_toggle.js
new file mode 100644 (file)
index 0000000..00c1718
--- /dev/null
@@ -0,0 +1,55 @@
+$(document).
+    on('click', '[data-toggle-permission] input[type=checkbox]', function() {
+        var data = {};
+        var keys = ['data-permission-uuid',
+                    'data-permission-name',
+                    'data-permission-head',
+                    'data-permission-tail'];
+        var attr;
+        for(var i in keys) {
+            attr = keys[i];
+            data[attr] = $(this).closest('[' + attr + ']').attr(attr);
+            if (data[attr] === undefined) {
+                console.log(["Error: no " + attr + " established here.", this]);
+                return;
+            }
+        }
+        var is_checked = $(this).prop('checked');
+
+        if (is_checked) {
+            $.ajax('/links',
+                   {dataType: 'json',
+                    type: 'POST',
+                    data: {'link[tail_uuid]': data['data-permission-tail'],
+                           'link[head_uuid]': data['data-permission-head'],
+                           'link[link_class]': 'permission',
+                           'link[name]': data['data-permission-name']},
+                    context: this}).
+                fail(function(jqxhr, status, error) {
+                    $(this).prop('checked', false);
+                }).
+                done(function(data, status, jqxhr) {
+                    $(this).attr('data-permission-uuid', data['uuid']);
+                }).
+                always(function() {
+                    $(this).prop('disabled', false);
+                });
+        }
+        else {
+            $.ajax('/links/' + data['data-permission-uuid'],
+                   {dataType: 'json',
+                    type: 'POST',
+                    data: {'_method': 'DELETE'},
+                    context: this}).
+                fail(function(jqxhr, status, error) {
+                    $(this).prop('checked', true);
+                }).
+                done(function(data, status, jqxhr) {
+                    $(this).attr('data-permission-uuid', 'x');
+                }).
+                always(function() {
+                    $(this).prop('disabled', false);
+                });
+        }
+        $(this).prop('disabled', true);
+    });
diff --git a/apps/workbench/app/assets/javascripts/pipeline_instances.js b/apps/workbench/app/assets/javascripts/pipeline_instances.js
new file mode 100644 (file)
index 0000000..e820ba9
--- /dev/null
@@ -0,0 +1,129 @@
+function run_pipeline_button_state() {
+    var a = $('a.editable.required.editable-empty,input.form-control.required[value=""]');
+    if (a.length > 0) {
+        $(".run-pipeline-button").addClass("disabled");
+    }
+    else {
+        $(".run-pipeline-button").removeClass("disabled");
+    }
+}
+
+$(document).on('editable:success', function(event, tag, response, newValue) {
+    var $tag = $(tag);
+    if ($('.run-pipeline-button').length == 0)
+        return;
+    if ($tag.hasClass("required")) {
+        if (newValue && newValue.trim() != "") {
+            $tag.removeClass("editable-empty");
+            $tag.parent().css("background-color", "");
+            $tag.parent().prev().css("background-color", "");
+        }
+        else {
+            $tag.addClass("editable-empty");
+            $tag.parent().css("background-color", "#ffdddd");
+            $tag.parent().prev().css("background-color", "#ffdddd");
+        }
+    }
+    if ($tag.attr('data-name')) {
+        // Update other inputs representing the same piece of data
+        $('.editable[data-name="' + $tag.attr('data-name') + '"]').
+            editable('setValue', newValue);
+    }
+    run_pipeline_button_state();
+});
+
+$(document).on('ready ajax:complete', function() {
+    $('a.editable.required').each(function() {
+        var $tag = $(this);
+        if ($tag.hasClass("editable-empty")) {
+            $tag.parent().css("background-color", "#ffdddd");
+            $tag.parent().prev().css("background-color", "#ffdddd");
+        }
+        else {
+            $tag.parent().css("background-color", "");
+            $tag.parent().prev().css("background-color", "");
+        }
+    });
+    run_pipeline_button_state();
+});
+
+$(document).on('arv-log-event', '.arv-refresh-on-state-change', function(event, eventData) {
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+    if (eventData.event_type == "update" &&
+        eventData.properties.old_attributes.state != eventData.properties.new_attributes.state)
+    {
+        $(event.target).trigger('arv:pane:reload');
+    }
+});
+
+$(document).on('arv-log-event', '.arv-log-event-subscribe-to-pipeline-job-uuids', function(event, eventData){
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+    if (!((eventData.object_kind == 'arvados#pipelineInstance') &&
+          (eventData.event_type == "create" ||
+           eventData.event_type == "update") &&
+         eventData.properties &&
+         eventData.properties.new_attributes &&
+         eventData.properties.new_attributes.components)) {
+        return;
+    }
+    var objs = "";
+    var components = eventData.properties.new_attributes.components;
+    for (a in components) {
+        if (components[a].job && components[a].job.uuid) {
+            objs += " " + components[a].job.uuid;
+        }
+    }
+    $(event.target).attr("data-object-uuids", eventData.object_uuid + objs);
+});
+
+$(document).on('ready ajax:success', function() {
+    $('.arv-log-refresh-control').each(function() {
+        var uuids = $(this).attr('data-object-uuids');
+        var $pane = $(this).closest('[data-pane-content-url]');
+        $pane.attr('data-object-uuids', uuids);
+    });
+});
+
+$(document).on('arv-log-event', '.arv-log-event-handler-append-logs', function(event, eventData){
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+    var wasatbottom = ($(this).scrollTop() + $(this).height() >= this.scrollHeight);
+
+    if (eventData.event_type == "stderr" || eventData.event_type == "stdout") {
+        if( eventData.prepend ) {
+            $(this).prepend(eventData.properties.text);
+        } else {
+            $(this).append(eventData.properties.text);
+        }
+    }
+
+    if (wasatbottom) {
+        this.scrollTop = this.scrollHeight;
+    }
+});
+
+// Set up all events for the pipeline instances compare button.
+(function() {
+    var compare_form = '#compare';
+    var compare_inputs = '#comparedInstances :checkbox[name="uuids[]"]';
+    var update_button = function(event) {
+        var $form = $(compare_form);
+        var $checked_inputs = $(compare_inputs).filter(':checked');
+        $(':submit', $form).prop('disabled', (($checked_inputs.length < 2) ||
+                                              ($checked_inputs.length > 3)));
+        $('input[name="uuids[]"]', $form).remove();
+        $form.append($checked_inputs.clone()
+                     .removeAttr('id').attr('type', 'hidden'));
+    };
+    $(document)
+        .on('ready ajax:success', compare_form, update_button)
+        .on('change', compare_inputs, update_button);
+})();
diff --git a/apps/workbench/app/assets/javascripts/report_issue.js b/apps/workbench/app/assets/javascripts/report_issue.js
new file mode 100644 (file)
index 0000000..f3c323c
--- /dev/null
@@ -0,0 +1,30 @@
+$(document).
+  on('click', "#report-issue-submit", function(e){
+    $(this).html('Sending');
+    $(this).prop('disabled', true);
+    var $cancelButton = $('#report-issue-cancel');
+    if ($cancelButton) {
+      $cancelButton.html('Close');
+    }
+    $('div').remove('.modal-footer-status');
+
+    $.ajax('/').
+      success(function(data, status, jqxhr) {
+        var $sendButton = $('#report-issue-submit');
+        $sendButton.html('Report sent');
+        $('div').remove('.modal-footer-status');
+        $('.modal-footer').append('<div><br/></div><div class="modal-footer-status alert alert-success"><p class="contain-align-left">Thanks for reporting this issue!</p></div>');
+      }).
+      fail(function(jqxhr, status, error) {
+        var $sendButton = $('#report-issue-submit');
+        if ($sendButton && $sendButton.prop('disabled')) {
+          $('div').remove('.modal-footer-status');
+          $('.modal-footer').append('<div><br/></div><div class="modal-footer-status alert alert-danger"><p class="contain-align-left">We are sorry. We could not submit your report! We really want this to work, though -- please try again.</p></div>');
+          $sendButton.html('Send problem report');
+          $sendButton.prop('disabled', false);
+        }
+        var $cancelButton = $('#report-issue-cancel');
+        $cancelButton.html('Cancel');
+      });
+
+  });
diff --git a/apps/workbench/app/assets/javascripts/select_modal.js b/apps/workbench/app/assets/javascripts/select_modal.js
new file mode 100644 (file)
index 0000000..3b51faa
--- /dev/null
@@ -0,0 +1,179 @@
+$(document).on('click', '.selectable', function() {
+    var any;
+    var $this = $(this);
+    var $container = $(this).closest('.selectable-container');
+    if (!$container.hasClass('multiple')) {
+        $container.
+            find('.selectable').
+            removeClass('active');
+    }
+    $this.toggleClass('active');
+
+    if (!$this.hasClass('use-preview-selection')) {
+      any = ($container.
+           find('.selectable.active').length > 0)
+    }
+
+    if (!$container.hasClass('preview-selectable-container')) {
+      $this.
+        closest('.modal').
+        find('[data-enable-if-selection]').
+        prop('disabled', !any);
+
+      if ($this.hasClass('active')) {
+        var no_preview_available = '<div class="spinner-h-center spinner-v-center"><center>(No preview available)</center></div>';
+        if (!$this.attr('data-preview-href')) {
+            $(".modal-dialog-preview-pane").html(no_preview_available);
+            return;
+        }
+        $(".modal-dialog-preview-pane").html('<div class="spinner spinner-32px spinner-h-center spinner-v-center"></div>');
+        $.ajax($this.attr('data-preview-href'),
+               {dataType: "html"}).
+            done(function(data, status, jqxhr) {
+                $(".modal-dialog-preview-pane").html(data);
+            }).
+            fail(function(data, status, jqxhr) {
+                $(".modal-dialog-preview-pane").html(no_preview_available);
+            });
+      }
+    } else {
+      any = ($container.
+           find('.preview-selectable.active').length > 0)
+      $(this).
+          closest('.modal').
+          find('[data-enable-if-selection]').
+          prop('disabled', !any);
+    }
+
+}).on('click', '.modal button[data-action-href]', function() {
+    var selection = [];
+    var data = [];
+    var $modal = $(this).closest('.modal');
+    var http_method = $(this).attr('data-method').toUpperCase();
+    var action_data = $(this).data('action-data');
+    var action_data_from_params = $(this).data('action-data-from-params');
+    var selection_param = action_data.selection_param;
+    $modal.find('.modal-error').removeClass('hide').hide();
+
+    var $preview_selections = $modal.find('.preview-selectable.active');
+    if ($preview_selections.length > 0) {
+      data.push({name: selection_param, value: $preview_selections.first().attr('href')});
+    }
+
+    if (data.length == 0) {   // not using preview selection option
+      $modal.find('.selectable.active[data-object-uuid]').each(function() {
+        var val = $(this).attr('data-object-uuid');
+        data.push({name: selection_param, value: val});
+      });
+    }
+    $.each($.extend({}, action_data, action_data_from_params),
+           function(key, value) {
+               if (value instanceof Array && key[-1] != ']') {
+                   for (var i in value) {
+                       data.push({name: key + '[]', value: value[i]});
+                   }
+               } else {
+                   data.push({name: key, value: value});
+               }
+           });
+    if (http_method === 'PATCH') {
+        // Some user agents do not support HTTP PATCH (notably,
+        // phantomjs silently ignores our "data" and sends an empty
+        // request body) so we use POST instead, and supply a
+        // _method=PATCH param to tell Rails what we really want.
+        data.push({name: '_method', value: http_method});
+        http_method = 'POST';
+    }
+    $.ajax($(this).attr('data-action-href'),
+           {dataType: 'json',
+            type: http_method,
+            data: data,
+            traditional: false,
+            context: {modal: $modal, action_data: action_data}}).
+        fail(function(jqxhr, status, error) {
+            if (jqxhr.readyState == 0 || jqxhr.status == 0) {
+                message = "Cancelled."
+            } else if (jqxhr.responseJSON && jqxhr.responseJSON.errors) {
+                message = jqxhr.responseJSON.errors.join("; ");
+            } else {
+                message = "Request failed.";
+            }
+            this.modal.find('.modal-error').
+                html('<div class="alert alert-danger">' + message + '</div>').
+                show();
+        }).
+        done(function(data, status, jqxhr) {
+            var event_name = this.action_data.success;
+            this.modal.find('.modal-error').hide();
+            $(document).trigger(event_name!=null ? event_name : 'page-refresh',
+                                [data, status, jqxhr, this.action_data]);
+        });
+}).on('click', '.chooser-show-project', function() {
+    var params = {};
+    var project_uuid = $(this).attr('data-project-uuid');
+    $(this).attr('href', '#');  // Skip normal click handler
+    if (project_uuid) {
+        params = {'filters': [['owner_uuid',
+                               '=',
+                               project_uuid]],
+                  'project_uuid': project_uuid
+                 };
+    }
+    // Use current selection as dropdown button label
+    $(this).
+        closest('.dropdown-menu').
+        prev('button').
+        html($(this).text() + ' <span class="caret"></span>');
+    // Set (or unset) filter params and refresh filterable rows
+    $($(this).closest('[data-filterable-target]').attr('data-filterable-target')).
+        data('infinite-content-params-from-project-dropdown', params).
+        trigger('refresh-content');
+}).on('ready', function() {
+    $('form[data-search-modal] a').on('click', function() {
+        $(this).closest('form').submit();
+        return false;
+    });
+    $('form[data-search-modal]').on('submit', function() {
+        // Ask the server for a Search modal. When it arrives, copy
+        // the search string from the top nav input into the modal's
+        // search query field.
+        var $form = $(this);
+        var searchq = $form.find('input').val();
+        var is_a_uuid = /^([0-9a-f]{32}(\+\S+)?|[0-9a-z]{5}-[0-9a-z]{5}-[0-9a-z]{15})$/;
+        if (searchq.trim().match(is_a_uuid)) {
+            window.location = '/actions?uuid=' + encodeURIComponent(searchq.trim());
+            // Show the "loading" indicator. TODO: better page transition hook
+            $(document).trigger('ajax:send');
+            return false;
+        }
+        if ($form.find('a[data-remote]').length > 0) {
+            // A search dialog is already loading.
+            return false;
+        }
+        $('<a />').
+            attr('href', $form.attr('data-search-modal')).
+            attr('data-remote', 'true').
+            attr('data-method', 'GET').
+            hide().
+            appendTo($form).
+            on('ajax:success', function(data, status, xhr) {
+                $('body > .modal-container input[type=text]').
+                    val($form.find('input').val()).
+                    focus();
+                $form.find('input').val('');
+            }).on('ajax:complete', function() {
+                $(this).detach();
+            }).
+            click();
+        return false;
+    });
+}).on('page-refresh', function(event, data, status, jqxhr, action_data) {
+    window.location.reload();
+}).on('tab-refresh', function(event, data, status, jqxhr, action_data) {
+    $(document).trigger('arv:pane:reload:all');
+    $('body > .modal-container .modal').modal('hide');
+}).on('redirect-to-created-object', function(event, data, status, jqxhr, action_data) {
+    window.location.href = data.href.replace(/^[^\/]*\/\/[^\/]*/, '');
+}).on('shown.bs.modal', 'body > .modal-container .modal', function() {
+    $('.focus-on-display', this).focus();
+});
diff --git a/apps/workbench/app/assets/javascripts/selection.js.erb b/apps/workbench/app/assets/javascripts/selection.js.erb
new file mode 100644 (file)
index 0000000..0068b73
--- /dev/null
@@ -0,0 +1,74 @@
+//= require jquery
+//= require jquery_ujs
+
+/** Javascript for selection. */
+
+jQuery(function($){
+    $(document).
+        on('change', '.persistent-selection:checkbox', function(e) {
+            $(document).trigger('selections-updated');
+        });
+});
+
+function dispatch_selection_action() {
+    // Build a new "href" attribute for this link by starting with the
+    // "data-href" attribute and appending ?foo[]=bar&foo[]=baz (or
+    // &foo=... as appropriate) to reflect the current object
+    // selections.
+    var data = [];
+    var param_name = $(this).attr('data-selection-param-name');
+    var href = $(this).attr('data-href');
+    if ($(this).closest('.disabled').length > 0) {
+        return false;
+    }
+    $(this).
+        closest('.selection-action-container').
+        find(':checkbox:checked:visible').
+        each(function() {
+            data.push({name: param_name, value: $(this).val()});
+        });
+    if (href.indexOf('?') >= 0)
+        href += '&';
+    else
+        href += '?';
+    href += $.param(data, true);
+    $(this).attr('href', href);
+    return true;
+}
+
+function enable_disable_selection_actions() {
+    var $container = $(this);
+    var $checked = $('.persistent-selection:checkbox:checked', $container);
+    $('[data-selection-action]', $container).
+        closest('div.btn-group-sm').
+        find('ul li').
+        toggleClass('disabled', ($checked.length == 0));
+    $('[data-selection-action=compare]', $container).
+        closest('li').
+        toggleClass('disabled',
+                    ($checked.filter('[value*=-d1hrv-]').length < 2) ||
+                    ($checked.not('[value*=-d1hrv-]').length > 0));
+    <% unless Group.copies_to_projects? %>
+        $('[data-selection-action=copy]', $container).
+            closest('li').
+            toggleClass('disabled',
+                        ($checked.filter('[value*=-j7d0g-]').length > 0) ||
+                        ($checked.length < 1));
+    <% end %>
+    $('[data-selection-action=combine-project-contents]', $container).
+        closest('li').
+        toggleClass('disabled',
+                    ($checked.filter('[value*=-4zz18-]').length < 1) ||
+                    ($checked.length != $checked.filter('[value*=-4zz18-]').length));
+}
+
+$(document).
+    on('selections-updated', function() {
+        $('.selection-action-container').each(enable_disable_selection_actions);
+    }).
+    on('ready ajax:complete', function() {
+        $('[data-selection-action]').
+            off('click', dispatch_selection_action).
+            on('click', dispatch_selection_action);
+        $(this).trigger('selections-updated');
+    });
diff --git a/apps/workbench/app/assets/javascripts/sizing.js b/apps/workbench/app/assets/javascripts/sizing.js
new file mode 100644 (file)
index 0000000..2341628
--- /dev/null
@@ -0,0 +1,31 @@
+function graph_zoom(divId, svgId, scale) {
+    var pg = document.getElementById(divId);
+    vcenter = (pg.scrollTop + (pg.scrollHeight - pg.scrollTopMax)/2.0) / pg.scrollHeight;
+    hcenter = (pg.scrollLeft + (pg.scrollWidth - pg.scrollLeftMax)/2.0) / pg.scrollWidth;
+    var g = document.getElementById(svgId);
+    g.setAttribute("height", parseFloat(g.getAttribute("height")) * scale);
+    g.setAttribute("width", parseFloat(g.getAttribute("width")) * scale);
+    pg.scrollTop = (vcenter * pg.scrollHeight) - (pg.scrollHeight - pg.scrollTopMax)/2.0;
+    pg.scrollLeft = (hcenter * pg.scrollWidth) - (pg.scrollWidth - pg.scrollLeftMax)/2.0;
+    smart_scroll_fixup();
+}
+
+function smart_scroll_fixup(s) {
+
+    if (s != null && s.type == 'shown.bs.tab') {
+        s = [s.target];
+    }
+    else {
+        s = $(".smart-scroll");
+    }
+
+    s.each(function(i, a) {
+        a = $(a);
+        var h = window.innerHeight - a.offset().top - a.attr("data-smart-scroll-padding-bottom");
+        height = String(h) + "px";
+        a.css('max-height', height);
+    });
+}
+
+$(window).on('load ready resize scroll ajax:complete', smart_scroll_fixup);
+$(document).on('shown.bs.tab', 'ul.nav-tabs > li > a', smart_scroll_fixup);
diff --git a/apps/workbench/app/assets/javascripts/tab_panes.js b/apps/workbench/app/assets/javascripts/tab_panes.js
new file mode 100644 (file)
index 0000000..6565ea9
--- /dev/null
@@ -0,0 +1,197 @@
+// Load tab panes on demand. See app/views/application/_content.html.erb
+
+// Fire when a tab is selected/clicked.
+$(document).on('shown.bs.tab', '[data-toggle="tab"]', function(event) {
+    // reload the pane (unless it's already loaded)
+    $($(event.target).attr('href')).
+        not('.pane-loaded').
+        trigger('arv:pane:reload');
+});
+
+// Ask a refreshable pane to reload via ajax.
+//
+// Target of this event is the DOM element to be updated. A reload
+// consists of an AJAX call to load the "data-pane-content-url" and
+// replace the content of the target element with the retrieved HTML.
+//
+// There are four CSS classes set on the element to indicate its state:
+// pane-loading, pane-stale, pane-loaded, pane-reload-pending
+//
+// There are five states based on the presence or absence of css classes:
+//
+// 1. Absence of any pane-* states means the pane is empty, and should
+// be loaded as soon as it becomes visible.
+//
+// 2. "pane-loading" means an AJAX call has been made to reload the
+// pane and we are waiting on a result.
+//
+// 3. "pane-loading pane-stale" means the pane is loading, but has
+// already been invalidated and should schedule a reload as soon as
+// possible after the current load completes. (This happens when there
+// is a cluster of events, where the reload is triggered by the first
+// event, but we want ensure that we eventually load the final
+// quiescent state).
+//
+// 4. "pane-loaded" means the pane is up to date.
+//
+// 5. "pane-loaded pane-reload-pending" means a reload is needed, and
+// has been scheduled, but has not started because the pane's
+// minimum-time-between-reloads throttle has not yet been reached.
+//
+$(document).on('arv:pane:reload', '[data-pane-content-url]', function(e) {
+    if (this != e.target) {
+        // An arv:pane:reload event was sent to an element (e.target)
+        // which happens to have an ancestor (this) matching the above
+        // '[data-pane-content-url]' selector. This happens because
+        // events bubble up the DOM on their way to document. However,
+        // here we only care about events delivered directly to _this_
+        // selected element (i.e., this==e.target), not ones delivered
+        // to its children. The event "e" is uninteresting here.
+        return;
+    }
+
+    // $pane, the event target, is an element whose content is to be
+    // replaced. Pseudoclasses on $pane (pane-loading, etc) encode the
+    // current loading state.
+    var $pane = $(this);
+
+    if ($pane.hasClass('pane-loading')) {
+        // Already loading, mark stale to schedule a reload after this one.
+        $pane.addClass('pane-stale');
+        return;
+    }
+
+    // The default throttle (mininum milliseconds between refreshes)
+    // can be overridden by an .arv-log-refresh-control element inside
+    // the pane -- or, failing that, the pane element itself -- with a
+    // data-load-throttle attribute. This allows the server to adjust
+    // the throttle depending on the pane content.
+    var throttle =
+        $pane.find('.arv-log-refresh-control').attr('data-load-throttle') ||
+        $pane.attr('data-load-throttle') ||
+        15000;
+    var now = (new Date()).getTime();
+    var loaded_at = $pane.attr('data-loaded-at');
+    var since_last_load = now - loaded_at;
+    if (loaded_at && (since_last_load < throttle)) {
+        if (!$pane.hasClass('pane-reload-pending')) {
+            $pane.addClass('pane-reload-pending');
+            setTimeout((function() {
+                $pane.trigger('arv:pane:reload');
+            }), throttle - since_last_load);
+        }
+        return;
+    }
+
+    // We know this doesn't have 'pane-loading' because we tested for it above
+    $pane.removeClass('pane-reload-pending');
+    $pane.removeClass('pane-loaded');
+    $pane.removeClass('pane-stale');
+
+    if (!$pane.hasClass('active') &&
+        $pane.parent().hasClass('tab-content')) {
+        // $pane is one of the content areas in a bootstrap tabs
+        // widget, and it isn't the currently selected tab. If and
+        // when the user does select the corresponding tab, it will
+        // get a shown.bs.tab event, which will invoke this reload
+        // function again (see handler above). For now, we just insert
+        // a spinner, which will be displayed while the new content is
+        // loading.
+        $pane.html('<div class="spinner spinner-32px spinner-h-center"></div>');
+        return;
+    }
+
+    $pane.addClass('pane-loading');
+
+    var content_url = $pane.attr('data-pane-content-url');
+    $.ajax(content_url, {dataType: 'html', type: 'GET', context: $pane}).
+        done(function(data, status, jqxhr) {
+            // Preserve collapsed state
+            var $pane = this;
+            var collapsable = {};
+            $(".collapse", this).each(function(i, c) {
+                collapsable[c.id] = $(c).hasClass('in');
+            });
+            var tmp = $(data);
+            $(".collapse", tmp).each(function(i, c) {
+                if (collapsable[c.id]) {
+                    $(c).addClass('in');
+                } else {
+                    $(c).removeClass('in');
+                }
+            });
+            $pane.html(tmp);
+            $pane.removeClass('pane-loading');
+            $pane.addClass('pane-loaded');
+            $pane.attr('data-loaded-at', (new Date()).getTime());
+            $pane.trigger('arv:pane:loaded', [$pane]);
+
+            if ($pane.hasClass('pane-stale')) {
+                $pane.trigger('arv:pane:reload');
+            }
+        }).fail(function(jqxhr, status, error) {
+            var $pane = this;
+            var errhtml;
+            var contentType = jqxhr.getResponseHeader('Content-Type');
+            if (contentType && contentType.match(/\btext\/html\b/)) {
+                var $response = $(jqxhr.responseText);
+                var $wrapper = $('div#page-wrapper', $response);
+                if ($wrapper.length) {
+                    errhtml = $wrapper.html();
+                } else {
+                    errhtml = jqxhr.responseText;
+                }
+            } else {
+                errhtml = ("An error occurred: " +
+                           (jqxhr.responseText || status)).
+                    replace(/&/g, '&amp;').
+                    replace(/</g, '&lt;').
+                    replace(/>/g, '&gt;');
+            }
+            $pane.html('<div><p>' +
+                      '<a href="#" class="btn btn-primary tab_reload">' +
+                      '<i class="fa fa-fw fa-refresh"></i> ' +
+                      'Reload tab</a></p><iframe style="width: 100%"></iframe></div>');
+            $('.tab_reload', $pane).click(function() {
+                $(this).
+                    html('<div class="spinner spinner-32px spinner-h-center"></div>').
+                    closest('.pane-loaded').
+                    attr('data-loaded-at', 0).
+                    trigger('arv:pane:reload');
+            });
+            // We want to render the error in an iframe, in order to
+            // avoid conflicts with the main page's element ids, etc.
+            // In order to do that dynamically, we have to set a
+            // timeout on the iframe window to load our HTML *after*
+            // the default source (e.g., about:blank) has loaded.
+            var iframe = $('iframe', $pane)[0];
+            iframe.contentWindow.setTimeout(function() {
+                $('body', iframe.contentDocument).html(errhtml);
+                iframe.height = iframe.contentDocument.body.scrollHeight + "px";
+            }, 1);
+            $pane.removeClass('pane-loading');
+            $pane.addClass('pane-loaded');
+        });
+});
+
+// Mark all panes as stale/dirty. Refresh any 'active' panes.
+$(document).on('arv:pane:reload:all', function() {
+    $('[data-pane-content-url]').trigger('arv:pane:reload');
+});
+
+$(document).on('arv-log-event', '.arv-refresh-on-log-event', function(event) {
+    if (this != event.target) {
+        // Not interested in events sent to child nodes.
+        return;
+    }
+    // Panes marked arv-refresh-on-log-event should be refreshed
+    $(event.target).trigger('arv:pane:reload');
+});
+
+// If there is a 'tab counts url' in the nav-tabs element then use it to get some javascript that will update them
+$(document).on('ready count-change', function() {
+    var tabCountsUrl = $('ul.nav-tabs').data('tab-counts-url');
+    if( tabCountsUrl && tabCountsUrl.length ) {
+        $.get( tabCountsUrl );
+    }
+});
diff --git a/apps/workbench/app/assets/javascripts/upload_to_collection.js b/apps/workbench/app/assets/javascripts/upload_to_collection.js
new file mode 100644 (file)
index 0000000..d4333fa
--- /dev/null
@@ -0,0 +1,476 @@
+var app = angular.module('Workbench', ['Arvados']);
+app.controller('UploadToCollection', UploadToCollection);
+app.directive('arvUuid', arvUuid);
+
+function arvUuid() {
+    // Copy the given uuid into the current $scope.
+    return {
+        restrict: 'A',
+        link: function(scope, element, attributes) {
+            scope.uuid = attributes.arvUuid;
+        }
+    };
+}
+
+UploadToCollection.$inject = ['$scope', '$filter', '$q', '$timeout',
+                              'ArvadosClient', 'arvadosApiToken'];
+function UploadToCollection($scope, $filter, $q, $timeout,
+                            ArvadosClient, arvadosApiToken) {
+    $.extend($scope, {
+        uploadQueue: [],
+        uploader: new QueueUploader(),
+        addFilesToQueue: function(files) {
+            // Angular binding doesn't work its usual magic for file
+            // inputs, so we need to $scope.$apply() this update.
+            $scope.$apply(function(){
+                var i, nItemsTodo;
+                // Add these new files after the items already waiting
+                // in the queue -- but before the items that are
+                // 'Done' and have therefore been pushed to the
+                // bottom.
+                for (nItemsTodo = 0;
+                     (nItemsTodo < $scope.uploadQueue.length &&
+                      $scope.uploadQueue[nItemsTodo].state !== 'Done'); ) {
+                    nItemsTodo++;
+                }
+                for (i=0; i<files.length; i++) {
+                    $scope.uploadQueue.splice(nItemsTodo+i, 0,
+                        new FileUploader(files[i]));
+                }
+            });
+        },
+        go: function() {
+            $scope.uploader.go();
+        },
+        stop: function() {
+            $scope.uploader.stop();
+        },
+        removeFileFromQueue: function(index) {
+            var wasRunning = $scope.uploader.running;
+            $scope.uploadQueue[index].stop();
+            $scope.uploadQueue.splice(index, 1);
+            if (wasRunning)
+                $scope.go();
+        },
+        countInStates: function(want_states) {
+            var found = 0;
+            $.each($scope.uploadQueue, function() {
+                if (want_states.indexOf(this.state) >= 0) {
+                    ++found;
+                }
+            });
+            return found;
+        }
+    });
+    ////////////////////////////////
+
+    var keepProxy;
+
+    function SliceReader(_slice) {
+        var that = this;
+        $.extend(this, {
+            go: go
+        });
+        ////////////////////////////////
+        var _deferred;
+        var _reader;
+        function go() {
+            // Return a promise, which will be resolved with the
+            // requested slice data.
+            _deferred = $.Deferred();
+            _reader = new FileReader();
+            _reader.onload = resolve;
+            _reader.onerror = _deferred.reject;
+            _reader.onprogress = _deferred.notify;
+            _reader.readAsArrayBuffer(_slice.blob);
+            return _deferred.promise();
+        }
+        function resolve() {
+            if (that._reader.result.length !== that._slice.size) {
+                // Sometimes we get an onload event even if the read
+                // did not return the desired number of bytes. We
+                // treat that as a fail.
+                _deferred.reject(
+                    null, "Read error",
+                    "Short read: wanted " + _slice.size +
+                        ", received " + _reader.result.length);
+                return;
+            }
+            return _deferred.resolve(_reader.result);
+        }
+    }
+
+    function SliceUploader(_label, _data, _dataSize) {
+        $.extend(this, {
+            go: go,
+            stop: stop
+        });
+        ////////////////////////////////
+        var that = this;
+        var _deferred;
+        var _failCount = 0;
+        var _failMax = 3;
+        var _jqxhr;
+        function go() {
+            // Send data to the Keep proxy. Retry a few times on
+            // fail. Return a promise that will get resolved with
+            // resolve(locator) when the block is accepted by the
+            // proxy.
+            _deferred = $.Deferred();
+            goSend();
+            return _deferred.promise();
+        }
+        function stop() {
+            _failMax = 0;
+            _jqxhr.abort();
+            _deferred.reject({
+                textStatus: 'stopped',
+                err: 'interrupted at slice '+_label
+            });
+        }
+        function goSend() {
+            _jqxhr = $.ajax({
+                url: proxyUriBase(),
+                type: 'POST',
+                crossDomain: true,
+                headers: {
+                    'Authorization': 'OAuth2 '+arvadosApiToken,
+                    'Content-Type': 'application/octet-stream',
+                    'X-Keep-Desired-Replicas': '2'
+                },
+                xhr: function() {
+                    // Make an xhr that reports upload progress
+                    var xhr = $.ajaxSettings.xhr();
+                    if (xhr.upload) {
+                        xhr.upload.onprogress = onSendProgress;
+                    }
+                    return xhr;
+                },
+                processData: false,
+                data: _data
+            });
+            _jqxhr.then(onSendResolve, onSendReject);
+        }
+        function onSendProgress(xhrProgressEvent) {
+            _deferred.notify(xhrProgressEvent.loaded, _dataSize);
+        }
+        function onSendResolve(data, textStatus, jqxhr) {
+            _deferred.resolve(data, _dataSize);
+        }
+        function onSendReject(xhr, textStatus, err) {
+            if (++_failCount < _failMax) {
+                // TODO: nice to tell the user that retry is happening.
+                console.log('slice ' + _label + ': ' +
+                            textStatus + ', retry ' + _failCount);
+                goSend();
+            } else {
+                _deferred.reject(
+                    {xhr: xhr, textStatus: textStatus, err: err});
+            }
+        }
+        function proxyUriBase() {
+            return ((keepProxy.service_ssl_flag ? 'https' : 'http') +
+                    '://' + keepProxy.service_host + ':' +
+                    keepProxy.service_port + '/');
+        }
+    }
+
+    function FileUploader(file) {
+        $.extend(this, {
+            file: file,
+            locators: [],
+            progress: 0.0,
+            state: 'Queued',    // Queued, Uploading, Paused, Uploaded, Done
+            statistics: null,
+            go: go,
+            stop: stop          // User wants to stop.
+        });
+        ////////////////////////////////
+        var that = this;
+        var _currentUploader;
+        var _currentSlice;
+        var _deferred;
+        var _maxBlobSize = Math.pow(2,26);
+        var _bytesDone = 0;
+        var _queueTime = Date.now();
+        var _startTime;
+        var _startByte;
+        var _finishTime;
+        var _readPos = 0;       // number of bytes confirmed uploaded
+        function go() {
+            if (_deferred)
+                _deferred.reject({textStatus: 'restarted'});
+            _deferred = $.Deferred();
+            that.state = 'Uploading';
+            _startTime = Date.now();
+            _startByte = _readPos;
+            setProgress();
+            goSlice();
+            return _deferred.promise().always(function() { _deferred = null; });
+        }
+        function stop() {
+            if (_deferred) {
+                that.state = 'Paused';
+                _deferred.reject({textStatus: 'stopped', err: 'interrupted'});
+            }
+            if (_currentUploader) {
+                _currentUploader.stop();
+                _currentUploader = null;
+            }
+        }
+        function goSlice() {
+            // Ensure this._deferred gets resolved or rejected --
+            // either right here, or when a new promise arranged right
+            // here is fulfilled.
+            _currentSlice = nextSlice();
+            if (!_currentSlice) {
+                // All slices have been uploaded, but the work won't
+                // be truly Done until the target collection has been
+                // updated by the QueueUploader. This state is called:
+                that.state = 'Uploaded';
+                setProgress(_readPos);
+                _currentUploader = null;
+                _deferred.resolve([that]);
+                return;
+            }
+            _currentUploader = new SliceUploader(
+                _readPos.toString(),
+                _currentSlice.blob,
+                _currentSlice.size);
+            _currentUploader.go().then(
+                onUploaderResolve,
+                onUploaderReject,
+                onUploaderProgress);
+        }
+        function onUploaderResolve(locator, dataSize) {
+            var sizeHint = (''+locator).split('+')[1];
+            if (!locator || parseInt(sizeHint) !== dataSize) {
+                console.log("onUploaderResolve, but locator '" + locator +
+                            "' with size hint '" + sizeHint +
+                            "' does not look right for dataSize=" + dataSize);
+                return onUploaderReject({
+                    textStatus: "error",
+                    err: "Bad response from slice upload"
+                });
+            }
+            that.locators.push(locator);
+            _readPos += dataSize;
+            _currentUploader = null;
+            goSlice();
+        }
+        function onUploaderReject(reason) {
+            that.state = 'Paused';
+            setProgress(_readPos);
+            _currentUploader = null;
+            if (_deferred)
+                _deferred.reject(reason);
+        }
+        function onUploaderProgress(sliceDone, sliceSize) {
+            setProgress(_readPos + sliceDone);
+        }
+        function nextSlice() {
+            var size = Math.min(
+                _maxBlobSize,
+                that.file.size - _readPos);
+            setProgress(_readPos);
+            if (size === 0) {
+                return false;
+            }
+            var blob = that.file.slice(
+                _readPos, _readPos+size,
+                'application/octet-stream; charset=x-user-defined');
+            return {blob: blob, size: size};
+        }
+        function setProgress(bytesDone) {
+            var kBps;
+            if (that.file.size == 0)
+                that.progress = 100;
+            else
+                that.progress = Math.min(100, 100 * bytesDone / that.file.size);
+            if (bytesDone > _startByte) {
+                kBps = (bytesDone - _startByte) /
+                    (Date.now() - _startTime);
+                that.statistics = (
+                    '' + $filter('number')(bytesDone/1024, '0') + ' KiB ' +
+                        'at ~' + $filter('number')(kBps, '0') + ' KiB/s')
+                if (that.state === 'Paused') {
+                    that.statistics += ', paused';
+                } else if (that.state === 'Uploading') {
+                    that.statistics += ', ETA ' +
+                        $filter('date')(
+                            new Date(
+                                Date.now() + (that.file.size - bytesDone) / kBps),
+                            'shortTime')
+                }
+            } else {
+                that.statistics = that.state;
+            }
+            if (that.state === 'Uploaded') {
+                // 'Uploaded' gets reported as 'finished', which is a
+                // little misleading because the collection hasn't
+                // been updated yet. But FileUploader's portion of the
+                // work (and the time when it makes sense to show
+                // speed and ETA) is finished.
+                that.statistics += ', finished ' +
+                    $filter('date')(Date.now(), 'shortTime');
+                _finishTime = Date.now();
+            }
+            if (_deferred)
+                _deferred.notify();
+        }
+    }
+
+    function QueueUploader() {
+        $.extend(this, {
+            state: 'Idle',      // Idle, Running, Stopped, Failed
+            stateReason: null,
+            statusSuccess: null,
+            go: go,
+            stop: stop
+        });
+        ////////////////////////////////
+        var that = this;
+        var _deferred;          // the one we promise to go()'s caller
+        var _deferredAppend;    // tracks current appendToCollection
+        function go() {
+            if (_deferred) return _deferred.promise();
+            if (_deferredAppend) return _deferredAppend.promise();
+            _deferred = $.Deferred();
+            that.state = 'Running';
+            ArvadosClient.apiPromise(
+                'keep_services', 'list',
+                {filters: [['service_type','=','proxy']]}).
+                then(doQueueWithProxy);
+            onQueueProgress();
+            return _deferred.promise().always(function() { _deferred = null; });
+        }
+        function stop() {
+            that.state = 'Stopped';
+            if (_deferred) {
+                _deferred.reject({});
+            }
+            for (var i=0; i<$scope.uploadQueue.length; i++)
+                $scope.uploadQueue[i].stop();
+            onQueueProgress();
+        }
+        function doQueueWithProxy(data) {
+            keepProxy = data.items[0];
+            if (!keepProxy) {
+                that.state = 'Failed';
+                that.stateReason =
+                    'There seems to be no Keep proxy service available.';
+                _deferred.reject(null, 'error', that.stateReason);
+                return;
+            }
+            return doQueueWork();
+        }
+        function doQueueWork() {
+            // If anything is not Done, do it.
+            if ($scope.uploadQueue.length > 0 &&
+                $scope.uploadQueue[0].state !== 'Done') {
+                if (_deferred) {
+                    that.stateReason = null;
+                    return $scope.uploadQueue[0].go().
+                        then(appendToCollection, null, onQueueProgress).
+                        then(doQueueWork, onQueueReject);
+                } else {
+                    // Queue work has been stopped. Just update the
+                    // view.
+                    onQueueProgress();
+                    return;
+                }
+            }
+            // If everything is Done, resolve the promise and clean
+            // up. Note this can happen even after the _deferred
+            // promise has been rejected: specifically, when stop() is
+            // called too late to prevent completion of the last
+            // upload. In that case we want to update state to "Idle",
+            // rather than leave it at "Stopped".
+            onQueueResolve();
+        }
+        function onQueueReject(reason) {
+            if (!_deferred) {
+                // Outcome has already been decided (by stop()).
+                return;
+            }
+
+            that.state = 'Failed';
+            that.stateReason = (
+                (reason.textStatus || 'Error') +
+                    (reason.xhr && reason.xhr.options
+                     ? (' (from ' + reason.xhr.options.url + ')')
+                     : '') +
+                    ': ' +
+                    (reason.err || ''));
+            if (reason.xhr && reason.xhr.responseText)
+                that.stateReason += ' -- ' + reason.xhr.responseText;
+            _deferred.reject(reason);
+            onQueueProgress();
+        }
+        function onQueueResolve() {
+            that.state = 'Idle';
+            that.stateReason = 'Done!';
+            if (_deferred)
+                _deferred.resolve();
+            onQueueProgress();
+        }
+        function onQueueProgress() {
+            // Ensure updates happen after FileUpload promise callbacks.
+            $timeout(function(){$scope.$apply();});
+        }
+        function appendToCollection(uploads) {
+            _deferredAppend = $.Deferred();
+            ArvadosClient.apiPromise(
+                'collections', 'get',
+                { uuid: $scope.uuid }).
+                then(function(collection) {
+                    var manifestText = '';
+                    $.each(uploads, function(_, upload) {
+                        var locators = upload.locators;
+                        if (locators.length === 0) {
+                            // Every stream must have at least one
+                            // data locator, even if it is zero bytes
+                            // long:
+                            locators = ['d41d8cd98f00b204e9800998ecf8427e+0'];
+                        }
+                        filename = ArvadosClient.uniqueNameForManifest(
+                            collection.manifest_text,
+                            '.', upload.file.name);
+                        collection.manifest_text += '. ' +
+                            locators.join(' ') +
+                            ' 0:' + upload.file.size.toString() + ':' +
+                            filename +
+                            '\n';
+                    });
+                    return ArvadosClient.apiPromise(
+                        'collections', 'update',
+                        { uuid: $scope.uuid,
+                          collection:
+                          { manifest_text:
+                            collection.manifest_text }
+                        });
+                }).
+                then(function() {
+                    // Mark the completed upload(s) as Done and push
+                    // them to the bottom of the queue.
+                    var i, qLen = $scope.uploadQueue.length;
+                    for (i=0; i<qLen; i++) {
+                        if (uploads.indexOf($scope.uploadQueue[i]) >= 0) {
+                            $scope.uploadQueue[i].state = 'Done';
+                            $scope.uploadQueue.push.apply(
+                                $scope.uploadQueue,
+                                $scope.uploadQueue.splice(i, 1));
+                            --i;
+                            --qLen;
+                        }
+                    }
+                }).
+                then(_deferredAppend.resolve,
+                     _deferredAppend.reject);
+            return _deferredAppend.promise().
+                always(function() {
+                    _deferredAppend = null;
+                });
+        }
+    }
+}
diff --git a/apps/workbench/app/assets/javascripts/user_agreements.js b/apps/workbench/app/assets/javascripts/user_agreements.js
new file mode 100644 (file)
index 0000000..1b9ce4b
--- /dev/null
@@ -0,0 +1,7 @@
+function enable_okbutton() {
+    var $div = $('#open_user_agreement');
+    var allchecked = $('input[name="checked[]"]', $div).not(':checked').length == 0;
+    $('input[type=submit]', $div).prop('disabled', !allchecked);
+}
+$(document).on('click keyup input', '#open_user_agreement input', enable_okbutton);
+$(document).on('ready ajax:complete', enable_okbutton);
diff --git a/apps/workbench/app/assets/javascripts/users.js b/apps/workbench/app/assets/javascripts/users.js
new file mode 100644 (file)
index 0000000..ee1c7dd
--- /dev/null
@@ -0,0 +1,47 @@
+$(document).
+    on('notifications:recount',
+       function() {
+           var menu = $('.notification-menu');
+           n = $('.notification', menu).not('.empty').length;
+           $('.notification-count', menu).html(n>0 ? n : '');
+       }).
+    on('ajax:success', 'form.new_authorized_key',
+       function(e, data, status, xhr) {
+           $(e.target).parents('.notification').eq(0).fadeOut('slow', function() {
+               $('<li class="alert alert-success daxalert">SSH key added.</li>').hide().replaceAll(this).fadeIn('slow');
+               $(document).trigger('notifications:recount');
+           });
+       }).
+    on('ajax:complete', 'form.new_authorized_key',
+       function(e, data, status, xhr) {
+           $($('input[name=disable_element]', e.target).val()).
+               fadeTo(200, 1.0);
+       }).
+    on('ajax:error', 'form.new_authorized_key',
+       function(e, xhr, status, error) {
+           var error_div;
+           response = $.parseJSON(xhr.responseText);
+           error_div = $(e.target).parent().find('div.ajax-errors');
+           if (error_div.length == 0) {
+               $(e.target).parent().append('<div class="alert alert-error ajax-errors"></div>');
+               error_div = $(e.target).parent().find('div.ajax-errors');
+           }
+           if (response.errors) {
+               error_div.html($('<p/>').text(response.errors).html());
+           } else {
+               error_div.html('<p>Sorry, request failed.');
+           }
+           error_div.show();
+           $($('input[name=disable_element]', e.target).val()).
+               fadeTo(200, 1.0);
+       }).
+    on('click', 'form[data-remote] input[type=submit]',
+       function(e) {
+           $(e.target).parents('form').eq(0).parent().find('div.ajax-errors').html('').hide();
+           $($(e.target).
+             parents('form').
+             find('input[name=disable_element]').
+             val()).
+               fadeTo(200, 0.3);
+           return true;
+       });
diff --git a/apps/workbench/app/assets/stylesheets/api_client_authorizations.css.scss b/apps/workbench/app/assets/stylesheets/api_client_authorizations.css.scss
new file mode 100644 (file)
index 0000000..fd2c9d8
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the ApiClientAuthorizations controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/application.css.scss b/apps/workbench/app/assets/stylesheets/application.css.scss
new file mode 100644 (file)
index 0000000..9bc93e3
--- /dev/null
@@ -0,0 +1,282 @@
+/*
+ * This is a manifest file that'll be compiled into application.css, which will include all the files
+ * listed below.
+ *
+ * Any CSS and SCSS file within this directory, lib/assets/stylesheets, vendor/assets/stylesheets,
+ * or vendor/assets/stylesheets of plugins, if any, can be referenced here using a relative path.
+ *
+ * You're free to add application-wide styles to this file and they'll appear at the top of the
+ * compiled file, but it's generally better to create a new file per style scope.
+ *
+ *= require_self
+ *= require bootstrap
+ *= require bootstrap3-editable/bootstrap-editable
+ *= require morris
+ *= require_tree .
+ */
+
+.contain-align-left {
+    text-align: left;
+}
+table.topalign>tbody>tr>td {
+    vertical-align: top;
+}
+table.topalign>thead>tr>td {
+    vertical-align: bottom;
+}
+tr.cell-valign-center>td {
+    vertical-align: middle;
+}
+tr.cell-noborder>td,tr.cell-noborder>th {
+    border: none;
+}
+table.table-justforlayout>tr>td,
+table.table-justforlayout>tr>th,
+table.table-justforlayout>thead>tr>td,
+table.table-justforlayout>thead>tr>th,
+table.table-justforlayout>tbody>tr>td,
+table.table-justforlayout>tbody>tr>th{
+    border: none;
+}
+table.table-justforlayout {
+    margin-bottom: 0;
+}
+.smaller-text {
+    font-size: .8em;
+}
+.deemphasize {
+    font-size: .8em;
+    color: #888;
+}
+.lighten {
+    color: #888;
+}
+.arvados-filename,
+.arvados-uuid {
+    font-size: .8em;
+    font-family: monospace;
+}
+table .data-size, .table .data-size {
+    text-align: right;
+}
+body .editable-empty {
+    color: #999;
+}
+body .editable-empty:hover {
+    color: #0088cc;
+}
+table.arv-index tbody td.arv-object-AuthorizedKey.arv-attr-public_key {
+    overflow-x: hidden;
+    max-width: 120px;
+}
+table.arv-index > thead > tr > th {
+    border-top: none;
+}
+table.table-fixedlayout {
+    white-space: nowrap;
+    table-layout: fixed;
+}
+table.table-fixedlayout td {
+    overflow: hidden;
+    overflow-x: hidden;
+    text-overflow: ellipsis;
+}
+table.table-smallcontent td {
+    font-size: 85%;
+}
+form input.search-mini {
+    padding: 0 6px;
+}
+form.small-form-margin {
+    margin-bottom: 2px;
+}
+.nowrap {
+    white-space: nowrap;
+}
+
+/* top nav */
+$top-nav-bg: #3c163d;
+$top-nav-bg-bottom: #260027;
+nav.navbar-fixed-top .navbar-brand {
+    color: #79537a;
+    letter-spacing: 0.4em;
+}
+nav.navbar-fixed-top {
+    background: $top-nav-bg;
+    background: linear-gradient(to bottom, $top-nav-bg 0%,$top-nav-bg-bottom 100%);
+}
+.navbar.breadcrumbs {
+    line-height: 50px;
+    border-radius: 0;
+    margin-bottom: 0;
+    border-right: 0;
+    border-left: 0;
+}
+.navbar.breadcrumbs .nav > li > a,
+.navbar.breadcrumbs .nav > li {
+    color: #000;
+}
+.navbar.breadcrumbs .nav > li.nav-separator > i {
+    color: #bbb;
+}
+.navbar.breadcrumbs .navbar-form {
+  margin-top: 0px;
+  margin-bottom: 0px;
+}
+.navbar.breadcrumbs .navbar-text {
+  margin-top: 0px;
+  margin-bottom: 0px;
+}
+
+nav.navbar-fixed-top .navbar-nav.navbar-right > li.open > a,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li.open > a:focus,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li.open > a:hover {
+    background: lighten($top-nav-bg, 5%);
+}
+nav.navbar-fixed-top .navbar-nav.navbar-right > li > a,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li > a:focus,
+nav.navbar-fixed-top .navbar-nav.navbar-right > li > a:hover {
+    color: #fff;
+}
+
+.dax {
+    max-width: 10%;
+    margin-right: 1em;
+    float: left
+}
+
+.smart-scroll {
+    overflow: auto;
+    margin-bottom: -15px;
+}
+
+.infinite-scroller .fa-warning {
+    color: #800;
+}
+
+th[data-sort-order] {
+    cursor: pointer;
+}
+
+.inline-progress-container div.progress {
+    margin-bottom: 0;
+}
+
+.inline-progress-container {
+    width: 100%;
+    display:inline-block;
+}
+
+td.add-tag-button {
+    white-space: normal;
+}
+td.add-tag-button .add-tag-button {
+    margin-right: 4px;
+    opacity: 0.2;
+}
+td.add-tag-button .add-tag-button:hover {
+    opacity: 1;
+}
+span.removable-tag-container {
+    line-height: 1.6;
+}
+.label.removable-tag a {
+    color: #fff;
+    cursor: pointer;
+}
+
+li.notification {
+    padding: 10px;
+}
+
+// See HeaderRowFixer in application.js
+table.table-fixed-header-row {
+    width: 100%;
+    border-spacing: 0px;
+    margin:0;
+}
+table.table-fixed-header-row thead {
+    position:fixed;
+    background: #fff;
+}
+table.table-fixed-header-row tbody {
+    position:relative;
+    top:1.5em;
+}
+
+.dropdown-menu {
+    max-height: 30em;
+    overflow-y: auto;
+}
+
+.row-fill-height, .row-fill-height>div[class*='col-'] {
+    display: flex;
+}
+.row-fill-height>div[class*='col-']>div {
+    width: 100%;
+}
+
+/* Show editable popover above side-nav */
+.editable-popup.popover {
+    z-index:1055;
+}
+
+/* Do not leave space for left-nav */
+div#wrapper {
+  padding-left: 0;
+}
+
+.arv-description-as-subtitle {
+  padding-bottom: 1em;
+}
+.arv-description-in-table {
+  height: 4em;
+  overflow-x: hidden;
+  overflow-y: hidden;
+}
+.arv-description-in-table:hover {
+  overflow-y: auto;
+}
+
+.btn.btn-nodecorate {
+  border: none;
+}
+svg text {
+    font-size: 6pt;
+}
+
+div.pane-content iframe {
+  width: 100%;
+  border: none;
+}
+span.editable-textile {
+  display: inline-block;
+}
+.text-overflow-ellipsis {
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+.time-label-divider {
+  font-size: 80%;
+  min-width: 1em;
+  padding: 0px 2px 0px 0px;
+}
+.task-summary-status {
+  font-size: 80%;
+}
+#page-wrapper > div > h2 {
+  margin-top: 0px;
+}
+
+.compute-summary-numbers td {
+  font-size: 150%;
+}
+
+.arv-log-refresh-control {
+  display: none;
+}
+
+[ng\:cloak], [ng-cloak], .ng-cloak {
+    display: none !important;
+}
diff --git a/apps/workbench/app/assets/stylesheets/authorized_keys.css.scss b/apps/workbench/app/assets/stylesheets/authorized_keys.css.scss
new file mode 100644 (file)
index 0000000..390576b
--- /dev/null
@@ -0,0 +1,10 @@
+// Place all the styles related to the AuthorizedKeys controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+form .table input[type=text] {
+    width: 600px;
+}
+form .table textarea {
+    width: 600px;
+    height: 10em;
+}
diff --git a/apps/workbench/app/assets/stylesheets/badges.css.scss b/apps/workbench/app/assets/stylesheets/badges.css.scss
new file mode 100644 (file)
index 0000000..82c4ab0
--- /dev/null
@@ -0,0 +1,28 @@
+/* Colors
+ * Contextual variations of badges
+ * Bootstrap 3.0 removed contexts for badges, we re-introduce them, based on what is done for labels
+ */
+
+.badge.badge-error {
+  background-color: #b94a48;
+}
+
+.badge.badge-warning {
+  background-color: #f89406;
+}
+
+.badge.badge-success {
+  background-color: #468847;
+}
+
+.badge.badge-info {
+  background-color: #3a87ad;
+}
+
+.badge.badge-inverse {
+  background-color: #333333;
+}
+
+.badge.badge-alert {
+    background: red;
+}
diff --git a/apps/workbench/app/assets/stylesheets/cards.css.scss b/apps/workbench/app/assets/stylesheets/cards.css.scss
new file mode 100644 (file)
index 0000000..c9560ad
--- /dev/null
@@ -0,0 +1,85 @@
+.card {
+    padding-top: 20px;
+    margin: 10px 0 20px 0;
+    background-color: #ffffff;
+    border: 1px solid #d8d8d8;
+    border-top-width: 0;
+    border-bottom-width: 2px;
+    -webkit-border-radius: 3px;
+    -moz-border-radius: 3px;
+    border-radius: 3px;
+    -webkit-box-shadow: none;
+    -moz-box-shadow: none;
+    box-shadow: none;
+    -webkit-box-sizing: border-box;
+    -moz-box-sizing: border-box;
+    box-sizing: border-box;
+}
+.card.arvados-object {
+    position: relative;
+    display: inline-block;
+    width: 170px;
+    height: 175px;
+    padding-top: 0;
+    margin-left: 20px;
+    overflow: hidden;
+    vertical-align: top;
+}
+.card.arvados-object .card-top.green {
+    background-color: #53a93f;
+}
+.card.arvados-object .card-top.blue {
+    background-color: #427fed;
+}
+.card.arvados-object .card-top {
+    position: absolute;
+    top: 0;
+    left: 0;
+    display: inline-block;
+    width: 170px;
+    height: 25px;
+    background-color: #ffffff;
+}
+.card.arvados-object .card-info {
+    position: absolute;
+    top: 25px;
+    display: inline-block;
+    width: 100%;
+    height: 101px;
+    overflow: hidden;
+    background: #ffffff;
+    -webkit-box-sizing: border-box;
+    -moz-box-sizing: border-box;
+    box-sizing: border-box;
+}
+.card.arvados-object .card-info .title {
+    display: block;
+    margin: 8px 14px 0 14px;
+    overflow: hidden;
+    font-size: 16px;
+    font-weight: bold;
+    line-height: 18px;
+    color: #404040;
+}
+.card.arvados-object .card-info .desc {
+    display: block;
+    margin: 8px 14px 0 14px;
+    overflow: hidden;
+    font-size: 12px;
+    line-height: 16px;
+    color: #737373;
+    text-overflow: ellipsis;
+}
+.card.arvados-object .card-bottom {
+    position: absolute;
+    bottom: 0;
+    left: 0;
+    display: inline-block;
+    width: 100%;
+    padding: 10px 20px;
+    line-height: 29px;
+    text-align: center;
+    -webkit-box-sizing: border-box;
+    -moz-box-sizing: border-box;
+    box-sizing: border-box;
+}
diff --git a/apps/workbench/app/assets/stylesheets/collections.css.scss b/apps/workbench/app/assets/stylesheets/collections.css.scss
new file mode 100644 (file)
index 0000000..35c2249
--- /dev/null
@@ -0,0 +1,66 @@
+/* Style for _show_files tree view. */
+
+ul#collection_files {
+  padding: 0 .5em;
+}
+
+ul.collection_files {
+  line-height: 2.5em;
+  list-style-type: none;
+  padding-left: 2.3em;
+}
+
+ul.collection_files li {
+  clear: both;
+}
+
+.collection_files_row {
+  padding: 1px;  /* Replaced by border for :hover */
+}
+
+.collection_files_row:hover {
+  background-color: #D9EDF7;
+  padding: 0px;
+  border: 1px solid #BCE8F1;
+  border-radius: 3px;
+}
+
+.collection_files_inline {
+  clear: both;
+  width: 80%;
+  margin: 0 3em;
+}
+
+.collection_files_inline img {
+  max-height: 15em;
+}
+
+.collection_files_name {
+  padding-left: .5em;
+  white-space: nowrap;
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+
+.collection_files_name i.fa-fw:first-child {
+  width: 1.6em;
+}
+
+/*
+  "active" and "inactive" colors are too similar for a toggle switch
+  in the default bootstrap theme.
+  */
+
+$inactive-bg: #5bc0de;
+$active-bg: #39b3d7;
+
+.btn-group.toggle-persist .btn {
+    width: 6em;
+}
+.btn-group.toggle-persist .btn-info {
+    background-color: lighten($inactive-bg, 15%);
+}
+
+.btn-group.toggle-persist .btn-info.active {
+    background-color: $active-bg;
+}
diff --git a/apps/workbench/app/assets/stylesheets/groups.css.scss b/apps/workbench/app/assets/stylesheets/groups.css.scss
new file mode 100644 (file)
index 0000000..6795636
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Groups controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/humans.css.scss b/apps/workbench/app/assets/stylesheets/humans.css.scss
new file mode 100644 (file)
index 0000000..f8afcdf
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Humans controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/job_tasks.css.scss b/apps/workbench/app/assets/stylesheets/job_tasks.css.scss
new file mode 100644 (file)
index 0000000..ab340e9
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the JobTasks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/jobs.css.scss b/apps/workbench/app/assets/stylesheets/jobs.css.scss
new file mode 100644 (file)
index 0000000..cf93f20
--- /dev/null
@@ -0,0 +1,15 @@
+.arv-job-log-window {
+    height: 40em;
+    white-space: pre;
+    overflow: scroll;
+    background: black;
+    color: white;
+    font-family: monospace;
+    font-size: .8em;
+    border: 2px solid black;
+}
+
+.morris-hover-point {
+    text-align: left;
+    width: 100%;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/keep_disks.css.scss b/apps/workbench/app/assets/stylesheets/keep_disks.css.scss
new file mode 100644 (file)
index 0000000..e7a1b12
--- /dev/null
@@ -0,0 +1,11 @@
+// Place all the styles related to the KeepDisks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+
+/* Margin allows us some space between the table above. */
+div.graph {
+    margin-top: 20px;
+}
+div.graph h3, div.graph h4 {
+    text-align: center;
+}
diff --git a/apps/workbench/app/assets/stylesheets/links.css.scss b/apps/workbench/app/assets/stylesheets/links.css.scss
new file mode 100644 (file)
index 0000000..220eb70
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Links controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/loading.css.scss.erb b/apps/workbench/app/assets/stylesheets/loading.css.scss.erb
new file mode 100644 (file)
index 0000000..9f74866
--- /dev/null
@@ -0,0 +1,68 @@
+.loading {
+    opacity: 0;
+}
+
+.spinner {
+    /* placeholder for stuff like $.find('.spinner').detach() */
+}
+
+.spinner-32px {
+    background-image: url('<%= asset_path('spinner_32px.gif') %>');
+    background-repeat: no-repeat;
+    width: 32px;
+    height: 32px;
+}
+
+.spinner-h-center {
+    margin-left: auto;
+    margin-right: auto;
+}
+
+.spinner-v-center {
+    position: relative;
+    top: 45%;
+}
+
+.rotating {
+    color: #f00;
+    /* Chrome and Firefox, at least in Linux, render a horrible shaky
+       mess -- better not to bother.
+
+      animation-name: rotateThis;
+      animation-duration: 2s;
+      animation-iteration-count: infinite;
+      animation-timing-function: linear;
+      -moz-animation-name: rotateThis;
+      -moz-animation-duration: 2s;
+      -moz-animation-iteration-count: infinite;
+      -moz-animation-timing-function: linear;
+      -ms-animation-name: rotateThis;
+      -ms-animation-duration: 2s;
+      -ms-animation-iteration-count: infinite;
+      -ms-animation-timing-function: linear;
+      -webkit-animation-name: rotateThis;
+      -webkit-animation-duration: 2s;
+      -webkit-animation-iteration-count: infinite;
+      -webkit-animation-timing-function: linear;
+      */
+}
+
+@keyframes rotateThis {
+  from { transform: rotate( 0deg );   }
+  to   { transform: rotate( 360deg ); }
+}
+
+@-webkit-keyframes rotateThis {
+  from { -webkit-transform: rotate( 0deg );   }
+  to   { -webkit-transform: rotate( 360deg ); }
+}
+
+@-moz-keyframes rotateThis {
+  from { -moz-transform: rotate( 0deg );   }
+  to   { -moz-transform: rotate( 360deg ); }
+}
+
+@-ms-keyframes rotateThis {
+  from { -ms-transform: rotate( 0deg );   }
+  to   { -ms-transform: rotate( 360deg ); }
+}
diff --git a/apps/workbench/app/assets/stylesheets/log_viewer.scss b/apps/workbench/app/assets/stylesheets/log_viewer.scss
new file mode 100644 (file)
index 0000000..318f6e7
--- /dev/null
@@ -0,0 +1,64 @@
+.log-viewer-table {
+ width: 100%;
+ font-family: "Lucida Console", Monaco, monospace;
+ font-size: 11px;
+ table-layout: fixed;
+ thead tr {
+   th {
+     padding-right: 1em;
+   }
+   th.id {
+     display: none;
+   }
+   th.timestamp {
+     width: 15em;
+   }
+   th.type {
+     width: 8em;
+   }
+   th.taskid {
+     width: 4em;
+   }
+   th.node {
+     width: 8em;
+   }
+   th.slot {
+     width: 3em;
+   }
+   th.message {
+     width: auto;
+   }
+ }
+ tbody tr {
+   vertical-align: top;
+   td {
+     padding-right: 1em;
+   }
+   td.id {
+     display: none;
+   }
+   td.taskid {
+     text-align: right;
+   }
+   td.slot {
+     text-align: right;
+   }
+   td.message {
+     word-wrap: break-word;
+   }
+ }
+}
+
+.log-viewer-button {
+  width: 12em;
+}
+
+.log-viewer-paging-div {
+  font-size: 18px;
+  text-align: center;
+}
+
+.log-viewer-page-num {
+  padding-left: .3em;
+  padding-right: .3em;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/logs.css.scss b/apps/workbench/app/assets/stylesheets/logs.css.scss
new file mode 100644 (file)
index 0000000..4aaccac
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Logs controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/nodes.css.scss b/apps/workbench/app/assets/stylesheets/nodes.css.scss
new file mode 100644 (file)
index 0000000..7210602
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Nodes controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/pipeline_instances.css.scss b/apps/workbench/app/assets/stylesheets/pipeline_instances.css.scss
new file mode 100644 (file)
index 0000000..c89e93d
--- /dev/null
@@ -0,0 +1,33 @@
+// Place all the styles related to the PipelineInstances controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+
+.pipeline-compare-headrow div {
+    padding-top: .5em;
+    padding-bottom: .5em;
+}
+.pipeline-compare-headrow:first-child {
+    border-bottom: 1px solid black;
+}
+.pipeline-compare-row .notnormal {
+    background: #ffffaa;
+}
+
+.pipeline_color_legend {
+    margin-top: 0.2em;
+    padding: 0.2em 1em;
+    border: 1px solid #000;
+}
+.pipeline_color_legend a {
+    color: #000;
+}
+
+.col-md-1.pipeline-instance-spacing {
+  padding: 0px;
+  margin: 0px;
+}
+
+.col-md-3.pipeline-instance-spacing > .progress {
+  padding: 0px;
+  margin: 0px;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/pipeline_templates.css.scss b/apps/workbench/app/assets/stylesheets/pipeline_templates.css.scss
new file mode 100644 (file)
index 0000000..c70377a
--- /dev/null
@@ -0,0 +1,30 @@
+// Place all the styles related to the PipelineTemplates controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
+
+.pipeline_color_legend {
+    padding-left: 1em;
+    padding-right: 1em;
+}
+
+table.pipeline-components-table {
+  width: 100%;
+  table-layout: fixed;
+  overflow: hidden;
+}
+
+table.pipeline-components-table thead th {
+  text-align: bottom;
+}
+table.pipeline-components-table div.progress {
+  margin-bottom: 0;
+}
+
+table.pipeline-components-table td {
+  overflow: hidden;
+  text-overflow: ellipsis;
+}
+
+td.required {
+  background: #ffdddd;
+}
diff --git a/apps/workbench/app/assets/stylesheets/projects.css.scss b/apps/workbench/app/assets/stylesheets/projects.css.scss
new file mode 100644 (file)
index 0000000..cf1d15e
--- /dev/null
@@ -0,0 +1,67 @@
+.arv-project-list > .row {
+    padding-top: 5px;
+    padding-bottom: 5px;
+    padding-right: 1em;
+}
+.arv-project-list > .row.project:hover {
+    background: #d9edf7;
+}
+div.scroll-20em {
+    height: 20em;
+    overflow-y: scroll;
+}
+
+.compute-summary {
+    margin: 0.15em 0em 0.15em 0em;
+    display: inline-block;
+}
+
+.compute-summary-head {
+    margin-left: 0.3em;
+}
+
+.compute-detail {
+    border: 1px solid;
+    border-color: #DDD;
+    border-radius: 3px;
+    padding: 0.2em;
+    position: absolute;
+    z-index: 1;
+    background: white;
+}
+
+.compute-detail:hover {
+   cursor: pointer;
+}
+
+.compute-node-summary:hover {
+  cursor: pointer;
+}
+
+.compute-summary-numbers .panel {
+  margin-bottom: 0px;
+}
+
+.compute-summary-numbers table {
+  width: 100%;
+  td,th {
+    text-align: center;
+  }
+}
+
+.compute-summary-nodelist {
+  margin-bottom: 10px
+}
+
+.dashboard-panel-info-row {
+  padding: .5em;
+  border-radius: .3em;
+}
+
+.dashboard-panel-info-row:hover {
+  background-color: #D9EDF7;
+}
+
+.progress-bar.progress-bar-default {
+  background-color: #999;
+}
\ No newline at end of file
diff --git a/apps/workbench/app/assets/stylesheets/repositories.css.scss b/apps/workbench/app/assets/stylesheets/repositories.css.scss
new file mode 100644 (file)
index 0000000..85e38d2
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Repositories controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/sb-admin.css.scss b/apps/workbench/app/assets/stylesheets/sb-admin.css.scss
new file mode 100644 (file)
index 0000000..9bae214
--- /dev/null
@@ -0,0 +1,164 @@
+/* 
+Author: Start Bootstrap - http://startbootstrap.com
+'SB Admin' HTML Template by Start Bootstrap
+
+All Start Bootstrap themes are licensed under Apache 2.0. 
+For more info and more free Bootstrap 3 HTML themes, visit http://startbootstrap.com!
+*/
+
+/* ATTN: This is mobile first CSS - to update 786px and up screen width use the media query near the bottom of the document! */
+
+/* Global Styles */
+
+body {
+  margin-top: 50px;
+}
+
+#wrapper {
+  padding-left: 0;
+}
+
+#page-wrapper {
+  width: 100%;
+  padding: 5px 15px;
+}
+
+/* Nav Messages */
+
+.messages-dropdown .dropdown-menu .message-preview .avatar,
+.messages-dropdown .dropdown-menu .message-preview .name,
+.messages-dropdown .dropdown-menu .message-preview .message,
+.messages-dropdown .dropdown-menu .message-preview .time {
+  display: block;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .avatar {
+  float: left;
+  margin-right: 15px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .name {
+  font-weight: bold;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .message {
+  font-size: 12px;
+}
+
+.messages-dropdown .dropdown-menu .message-preview .time {
+  font-size: 12px;
+}
+
+
+/* Nav Announcements */
+
+.announcement-heading {
+  font-size: 50px;
+  margin: 0;
+}
+
+.announcement-text {
+  margin: 0;
+}
+
+/* Table Headers */
+
+table.tablesorter thead {
+  cursor: pointer;
+}
+
+table.tablesorter thead tr th:hover {
+  background-color: #f5f5f5;
+}
+
+/* Flot Chart Containers */
+
+.flot-chart {
+  display: block;
+  height: 400px;
+}
+
+.flot-chart-content {
+  width: 100%;
+  height: 100%;
+}
+
+/* Edit Below to Customize Widths > 768px */
+@media (min-width:768px) {
+
+  /* Wrappers */
+
+  #wrapper {
+        padding-left: 225px;
+  }
+
+  #page-wrapper {
+        padding: 15px 25px;
+  }
+
+  /* Side Nav */
+
+  .side-nav {
+        margin-left: -225px;
+        left: 225px;
+        width: 225px;
+        position: fixed;
+        top: 50px;
+        height: calc(100% - 50px);
+        border-radius: 0;
+        border: none;
+        background-color: #f8f8f8;
+        overflow-y: auto;
+        overflow-x: hidden; /* no left nav scroll bar */
+  }
+
+  /* Bootstrap Default Overrides - Customized Dropdowns for the Side Nav */
+
+  .side-nav>li.dropdown>ul.dropdown-menu {
+        position: relative;
+        min-width: 225px;
+        margin: 0;
+        padding: 0;
+        border: none;
+        border-radius: 0;
+        background-color: transparent;
+        box-shadow: none;
+        -webkit-box-shadow: none;
+  }
+
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a {
+        color: #777777;
+        padding: 15px 15px 15px 25px;
+  }
+
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a:hover,
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a.active,
+  .side-nav>li.dropdown>ul.dropdown-menu>li>a:focus {
+        background-color: #ffffff;
+  }
+
+  .side-nav>li>a {
+        width: 225px;
+  }
+
+  .navbar-default .navbar-nav.side-nav>li>a:hover,
+  .navbar-default .navbar-nav.side-nav>li>a:focus {
+        background-color: #ffffff;
+  }
+
+  /* Nav Messages */
+
+  .messages-dropdown .dropdown-menu {
+        min-width: 300px;
+  }
+
+  .messages-dropdown .dropdown-menu li a {
+        white-space: normal;
+  }
+
+  .navbar-collapse {
+    padding-left: 15px !important;
+    padding-right: 15px !important;
+  }
+
+}
diff --git a/apps/workbench/app/assets/stylesheets/scaffolds.css.scss b/apps/workbench/app/assets/stylesheets/scaffolds.css.scss
new file mode 100644 (file)
index 0000000..d3915bd
--- /dev/null
@@ -0,0 +1,5 @@
+/*
+  We don't want the default Rails CSS, so the rules are deleted. This
+  empty file is left here so Rails doesn't re-add it next time it
+  generates a scaffold.
+  */
diff --git a/apps/workbench/app/assets/stylesheets/select_modal.css.scss b/apps/workbench/app/assets/stylesheets/select_modal.css.scss
new file mode 100644 (file)
index 0000000..425793a
--- /dev/null
@@ -0,0 +1,23 @@
+.selectable-container > .row {
+    padding-top: 5px;
+    padding-bottom: 5px;
+    padding-right: 1em;
+    color: #888;
+}
+.selectable-container > .row.selectable {
+    color: #000;
+}
+.selectable.active, .selectable:hover {
+    background: #d9edf7;
+    cursor: pointer;
+}
+.selectable.active,
+.selectable.active *,
+.selectable.active:hover,
+.selectable.active:hover * {
+    background: #428bca;
+    color: #fff;
+}
+.selectable-container > .row.class-separator {
+    background: #ddd;
+}
diff --git a/apps/workbench/app/assets/stylesheets/sessions.css.scss b/apps/workbench/app/assets/stylesheets/sessions.css.scss
new file mode 100644 (file)
index 0000000..ccb1ed2
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Sessions controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/specimens.css.scss b/apps/workbench/app/assets/stylesheets/specimens.css.scss
new file mode 100644 (file)
index 0000000..460e42e
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Specimens controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/traits.css.scss b/apps/workbench/app/assets/stylesheets/traits.css.scss
new file mode 100644 (file)
index 0000000..5f30857
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Traits controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/user_agreements.css.scss b/apps/workbench/app/assets/stylesheets/user_agreements.css.scss
new file mode 100644 (file)
index 0000000..98edb29
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the user_agreements controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/users.css.scss b/apps/workbench/app/assets/stylesheets/users.css.scss
new file mode 100644 (file)
index 0000000..31a2eac
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Users controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/assets/stylesheets/virtual_machines.css.scss b/apps/workbench/app/assets/stylesheets/virtual_machines.css.scss
new file mode 100644 (file)
index 0000000..5532eb9
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the VirtualMachines controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/apps/workbench/app/controllers/actions_controller.rb b/apps/workbench/app/controllers/actions_controller.rb
new file mode 100644 (file)
index 0000000..59dcbb9
--- /dev/null
@@ -0,0 +1,209 @@
+class ActionsController < ApplicationController
+
+  skip_filter :require_thread_api_token, only: [:report_issue_popup, :report_issue]
+  skip_filter :check_user_agreements, only: [:report_issue_popup, :report_issue]
+
+  @@exposed_actions = {}
+  def self.expose_action method, &block
+    @@exposed_actions[method] = true
+    define_method method, block
+  end
+
+  def model_class
+    ArvadosBase::resource_class_for_uuid(params[:uuid])
+  end
+
+  def show
+    @object = model_class.andand.find(params[:uuid])
+    if @object.is_a? Link and
+        @object.link_class == 'name' and
+        ArvadosBase::resource_class_for_uuid(@object.head_uuid) == Collection
+      redirect_to collection_path(id: @object.uuid)
+    elsif @object
+      redirect_to @object
+    else
+      raise ActiveRecord::RecordNotFound
+    end
+  end
+
+  def post
+    params.keys.collect(&:to_sym).each do |param|
+      if @@exposed_actions[param]
+        return self.send(param)
+      end
+    end
+    redirect_to :back
+  end
+
+  expose_action :copy_selections_into_project do
+    move_or_copy :copy
+  end
+
+  expose_action :move_selections_into_project do
+    move_or_copy :move
+  end
+
+  def move_or_copy action
+    uuids_to_add = params["selection"]
+    uuids_to_add = [ uuids_to_add ] unless uuids_to_add.is_a? Array
+    uuids_to_add.
+      collect { |x| ArvadosBase::resource_class_for_uuid(x) }.
+      uniq.
+      each do |resource_class|
+      resource_class.filter([['uuid','in',uuids_to_add]]).each do |src|
+        if resource_class == Collection and not Collection.attribute_info.include?(:name)
+          dst = Link.new(owner_uuid: @object.uuid,
+                         tail_uuid: @object.uuid,
+                         head_uuid: src.uuid,
+                         link_class: 'name',
+                         name: src.uuid)
+        else
+          case action
+          when :copy
+            dst = src.dup
+            if dst.respond_to? :'name='
+              if dst.name
+                dst.name = "Copy of #{dst.name}"
+              else
+                dst.name = "Copy of unnamed #{dst.class_for_display.downcase}"
+              end
+            end
+            if resource_class == Collection
+              dst.manifest_text = Collection.select([:manifest_text]).where(uuid: src.uuid).first.manifest_text
+            end
+          when :move
+            dst = src
+          else
+            raise ArgumentError.new "Unsupported action #{action}"
+          end
+          dst.owner_uuid = @object.uuid
+          dst.tail_uuid = @object.uuid if dst.class == Link
+        end
+        begin
+          dst.save!
+        rescue
+          dst.name += " (#{Time.now.localtime})" if dst.respond_to? :name=
+          dst.save!
+        end
+      end
+    end
+    redirect_to @object
+  end
+
+  def arv_normalize mt, *opts
+    r = ""
+    env = Hash[ENV].
+      merge({'ARVADOS_API_HOST' =>
+              arvados_api_client.arvados_v1_base.
+              sub(/\/arvados\/v1/, '').
+              sub(/^https?:\/\//, ''),
+              'ARVADOS_API_TOKEN' => 'x',
+              'ARVADOS_API_HOST_INSECURE' =>
+              Rails.configuration.arvados_insecure_https ? 'true' : 'false'
+            })
+    IO.popen([env, 'arv-normalize'] + opts, 'w+b') do |io|
+      io.write mt
+      io.close_write
+      while buf = io.read(2**16)
+        r += buf
+      end
+    end
+    r
+  end
+
+  expose_action :combine_selected_files_into_collection do
+    uuids = []
+    pdhs = []
+    files = []
+    params["selection"].each do |s|
+      a = ArvadosBase::resource_class_for_uuid s
+      if a == Link
+        begin
+          if (m = CollectionsHelper.match(Link.find(s).head_uuid))
+            pdhs.append(m[1] + m[2])
+            files.append(m)
+          end
+        rescue
+        end
+      elsif (m = CollectionsHelper.match(s))
+        pdhs.append(m[1] + m[2])
+        files.append(m)
+      elsif (m = CollectionsHelper.match_uuid_with_optional_filepath(s))
+        uuids.append(m[1])
+        files.append(m)
+      end
+    end
+
+    pdhs = pdhs.uniq
+    uuids = uuids.uniq
+    chash = {}
+
+    Collection.select([:uuid, :manifest_text]).where(uuid: uuids).each do |c|
+      chash[c.uuid] = c
+    end
+
+    Collection.select([:portable_data_hash, :manifest_text]).where(portable_data_hash: pdhs).each do |c|
+      chash[c.portable_data_hash] = c
+    end
+
+    combined = ""
+    files.each do |m|
+      mt = chash[m[1]+m[2]].andand.manifest_text
+      if not m[4].nil? and m[4].size > 1
+        combined += arv_normalize mt, '--extract', ".#{m[4]}"
+      else
+        combined += mt
+      end
+    end
+
+    normalized = arv_normalize combined
+    newc = Collection.new({:manifest_text => normalized})
+    newc.name = newc.name || "Collection created at #{Time.now.localtime}"
+
+    # set owner_uuid to current project, provided it is writable
+    current_project_writable = false
+    action_data = JSON.parse(params['action_data']) if params['action_data']
+    if action_data && action_data['current_project_uuid']
+      current_project = Group.find(action_data['current_project_uuid']) rescue nil
+      if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
+        newc.owner_uuid = action_data['current_project_uuid']
+        current_project_writable = true
+      end
+    end
+
+    newc.save!
+
+    chash.each do |k,v|
+      l = Link.new({
+                     tail_uuid: k,
+                     head_uuid: newc.uuid,
+                     link_class: "provenance",
+                     name: "provided"
+                   })
+      l.save!
+    end
+
+    msg = current_project_writable ?
+              "Created new collection in the project #{current_project.name}." :
+              "Created new collection in your Home project."
+
+    redirect_to newc, flash: {'message' => msg}
+  end
+
+  def report_issue_popup
+    respond_to do |format|
+      format.js
+      format.html
+    end
+  end
+
+  def report_issue
+    logger.warn "report_issue: #{params.inspect}"
+
+    respond_to do |format|
+      IssueReporter.send_report(current_user, params).deliver
+      format.js {render nothing: true}
+    end
+  end
+
+end
diff --git a/apps/workbench/app/controllers/api_client_authorizations_controller.rb b/apps/workbench/app/controllers/api_client_authorizations_controller.rb
new file mode 100644 (file)
index 0000000..85f52f2
--- /dev/null
@@ -0,0 +1,7 @@
+class ApiClientAuthorizationsController < ApplicationController
+
+  def index_pane_list
+    %w(Recent Help)
+  end
+
+end
diff --git a/apps/workbench/app/controllers/application_controller.rb b/apps/workbench/app/controllers/application_controller.rb
new file mode 100644 (file)
index 0000000..c757517
--- /dev/null
@@ -0,0 +1,1048 @@
+class ApplicationController < ActionController::Base
+  include ArvadosApiClientHelper
+  include ApplicationHelper
+
+  respond_to :html, :json, :js
+  protect_from_forgery
+
+  ERROR_ACTIONS = [:render_error, :render_not_found]
+
+  around_filter :thread_clear
+  around_filter :set_thread_api_token
+  # Methods that don't require login should
+  #   skip_around_filter :require_thread_api_token
+  around_filter :require_thread_api_token, except: ERROR_ACTIONS
+  before_filter :set_cache_buster
+  before_filter :accept_uuid_as_id_param, except: ERROR_ACTIONS
+  before_filter :check_user_agreements, except: ERROR_ACTIONS
+  before_filter :check_user_profile, except: ERROR_ACTIONS
+  before_filter :check_user_notifications, except: ERROR_ACTIONS
+  before_filter :load_filters_and_paging_params, except: ERROR_ACTIONS
+  before_filter :find_object_by_uuid, except: [:create, :index, :choose] + ERROR_ACTIONS
+  theme :select_theme
+
+  begin
+    rescue_from(ActiveRecord::RecordNotFound,
+                ActionController::RoutingError,
+                ActionController::UnknownController,
+                AbstractController::ActionNotFound,
+                with: :render_not_found)
+    rescue_from(Exception,
+                ActionController::UrlGenerationError,
+                with: :render_exception)
+  end
+
+  def set_cache_buster
+    response.headers["Cache-Control"] = "no-cache, no-store, max-age=0, must-revalidate"
+    response.headers["Pragma"] = "no-cache"
+    response.headers["Expires"] = "Fri, 01 Jan 1990 00:00:00 GMT"
+  end
+
+  def unprocessable(message=nil)
+    @errors ||= []
+
+    @errors << message if message
+    render_error status: 422
+  end
+
+  def render_error(opts={})
+    opts[:status] ||= 500
+    respond_to do |f|
+      # json must come before html here, so it gets used as the
+      # default format when js is requested by the client. This lets
+      # ajax:error callback parse the response correctly, even though
+      # the browser can't.
+      f.json { render opts.merge(json: {success: false, errors: @errors}) }
+      f.html { render({action: 'error'}.merge(opts)) }
+    end
+  end
+
+  def render_exception(e)
+    logger.error e.inspect
+    logger.error e.backtrace.collect { |x| x + "\n" }.join('') if e.backtrace
+    err_opts = {status: 422}
+    if e.is_a?(ArvadosApiClient::ApiError)
+      err_opts.merge!(action: 'api_error', locals: {api_error: e})
+      @errors = e.api_response[:errors]
+    elsif @object.andand.errors.andand.full_messages.andand.any?
+      @errors = @object.errors.full_messages
+    else
+      @errors = [e.to_s]
+    end
+    # Make user information available on the error page, falling back to the
+    # session cache if the API server is unavailable.
+    begin
+      load_api_token(session[:arvados_api_token])
+    rescue ArvadosApiClient::ApiError
+      unless session[:user].nil?
+        begin
+          Thread.current[:user] = User.new(session[:user])
+        rescue ArvadosApiClient::ApiError
+          # This can happen if User's columns are unavailable.  Nothing to do.
+        end
+      end
+    end
+    # Preload projects trees for the template.  If that's not doable, set empty
+    # trees so error page rendering can proceed.  (It's easier to rescue the
+    # exception here than in a template.)
+    unless current_user.nil?
+      begin
+        build_project_trees
+      rescue ArvadosApiClient::ApiError
+        # Fall back to the default-setting code later.
+      end
+    end
+    @my_project_tree ||= []
+    @shared_project_tree ||= []
+    render_error(err_opts)
+  end
+
+  def render_not_found(e=ActionController::RoutingError.new("Path not found"))
+    logger.error e.inspect
+    @errors = ["Path not found"]
+    set_thread_api_token do
+      self.render_error(action: '404', status: 404)
+    end
+  end
+
+  # params[:order]:
+  #
+  # The order can be left empty to allow it to default.
+  # Or it can be a comma separated list of real database column names, one per model.
+  # Column names should always be qualified by a table name and a direction is optional, defaulting to asc
+  # (e.g. "collections.name" or "collections.name desc").
+  # If a column name is specified, that table will be sorted by that column.
+  # If there are objects from different models that will be shown (such as in Jobs and Pipelines tab),
+  # then a sort column name can optionally be specified for each model, passed as an comma-separated list (e.g. "jobs.script, pipeline_instances.name")
+  # Currently only one sort column name and direction can be specified for each model.
+  def load_filters_and_paging_params
+    if params[:order].blank?
+      @order = 'created_at desc'
+    elsif params[:order].is_a? Array
+      @order = params[:order]
+    else
+      begin
+        @order = JSON.load(params[:order])
+      rescue
+        @order = params[:order].split(',')
+      end
+    end
+    @order = [@order] unless @order.is_a? Array
+
+    @limit ||= 200
+    if params[:limit]
+      @limit = params[:limit].to_i
+    end
+
+    @offset ||= 0
+    if params[:offset]
+      @offset = params[:offset].to_i
+    end
+
+    @filters ||= []
+    if params[:filters]
+      filters = params[:filters]
+      if filters.is_a? String
+        filters = Oj.load filters
+      elsif filters.is_a? Array
+        filters = filters.collect do |filter|
+          if filter.is_a? String
+            # Accept filters[]=["foo","=","bar"]
+            Oj.load filter
+          else
+            # Accept filters=[["foo","=","bar"]]
+            filter
+          end
+        end
+      end
+      # After this, params[:filters] can be trusted to be an array of arrays:
+      params[:filters] = filters
+      @filters += filters
+    end
+  end
+
+  def find_objects_for_index
+    @objects ||= model_class
+    @objects = @objects.filter(@filters).limit(@limit).offset(@offset)
+    @objects.fetch_multiple_pages(false)
+  end
+
+  def render_index
+    respond_to do |f|
+      f.json {
+        if params[:partial]
+          @next_page_href = next_page_href(partial: params[:partial], filters: @filters.to_json)
+          render json: {
+            content: render_to_string(partial: "show_#{params[:partial]}",
+                                      formats: [:html]),
+            next_page_href: @next_page_href
+          }
+        else
+          render json: @objects
+        end
+      }
+      f.html {
+        if params[:tab_pane]
+          render_pane params[:tab_pane]
+        else
+          render
+        end
+      }
+      f.js { render }
+    end
+  end
+
+  helper_method :render_pane
+  def render_pane tab_pane, opts={}
+    render_opts = {
+      partial: 'show_' + tab_pane.downcase,
+      locals: {
+        comparable: self.respond_to?(:compare),
+        objects: @objects,
+        tab_pane: tab_pane
+      }.merge(opts[:locals] || {})
+    }
+    if opts[:to_string]
+      render_to_string render_opts
+    else
+      render render_opts
+    end
+  end
+
+  def index
+    find_objects_for_index if !@objects
+    render_index
+  end
+
+  helper_method :next_page_offset
+  def next_page_offset objects=nil
+    if !objects
+      objects = @objects
+    end
+    if objects.respond_to?(:result_offset) and
+        objects.respond_to?(:result_limit) and
+        objects.respond_to?(:items_available)
+      next_offset = objects.result_offset + objects.result_limit
+      if next_offset < objects.items_available
+        next_offset
+      else
+        nil
+      end
+    end
+  end
+
+  helper_method :next_page_href
+  def next_page_href with_params={}
+    if next_page_offset
+      url_for with_params.merge(offset: next_page_offset)
+    end
+  end
+
+  def show
+    if !@object
+      return render_not_found("object not found")
+    end
+    respond_to do |f|
+      f.json do
+        extra_attrs = { href: url_for(action: :show, id: @object) }
+        @object.textile_attributes.each do |textile_attr|
+          extra_attrs.merge!({ "#{textile_attr}Textile" => view_context.render_markup(@object.attributes[textile_attr]) })
+        end
+        render json: @object.attributes.merge(extra_attrs)
+      end
+      f.html {
+        if params['tab_pane']
+          render_pane(if params['tab_pane'].is_a? Hash then params['tab_pane']["name"] else params['tab_pane'] end)
+        elsif request.method.in? ['GET', 'HEAD']
+          render
+        else
+          redirect_to (params[:return_to] ||
+                       polymorphic_url(@object,
+                                       anchor: params[:redirect_to_anchor]))
+        end
+      }
+      f.js { render }
+    end
+  end
+
+  def choose
+    params[:limit] ||= 40
+    respond_to do |f|
+      if params[:partial]
+        f.json {
+          find_objects_for_index if !@objects
+          render json: {
+            content: render_to_string(partial: "choose_rows.html",
+                                      formats: [:html]),
+            next_page_href: next_page_href(partial: params[:partial])
+          }
+        }
+      end
+      f.js {
+        find_objects_for_index if !@objects
+        render partial: 'choose', locals: {multiple: params[:multiple]}
+      }
+    end
+  end
+
+  def render_content
+    if !@object
+      return render_not_found("object not found")
+    end
+  end
+
+  def new
+    @object = model_class.new
+  end
+
+  def update
+    @updates ||= params[@object.resource_param_name.to_sym]
+    @updates.keys.each do |attr|
+      if @object.send(attr).is_a? Hash
+        if @updates[attr].is_a? String
+          @updates[attr] = Oj.load @updates[attr]
+        end
+        if params[:merge] || params["merge_#{attr}".to_sym]
+          # Merge provided Hash with current Hash, instead of
+          # replacing.
+          @updates[attr] = @object.send(attr).with_indifferent_access.
+            deep_merge(@updates[attr].with_indifferent_access)
+        end
+      end
+    end
+    if @object.update_attributes @updates
+      show
+    else
+      self.render_error status: 422
+    end
+  end
+
+  def create
+    @new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
+    @new_resource_attrs ||= {}
+    @new_resource_attrs.reject! { |k,v| k.to_s == 'uuid' }
+    @object ||= model_class.new @new_resource_attrs, params["options"]
+
+    if @object.save
+      show
+    else
+      render_error status: 422
+    end
+  end
+
+  # Clone the given object, merging any attribute values supplied as
+  # with a create action.
+  def copy
+    @new_resource_attrs ||= params[model_class.to_s.underscore.singularize]
+    @new_resource_attrs ||= {}
+    @object = @object.dup
+    @object.update_attributes @new_resource_attrs
+    if not @new_resource_attrs[:name] and @object.respond_to? :name
+      if @object.name and @object.name != ''
+        @object.name = "Copy of #{@object.name}"
+      else
+        @object.name = ""
+      end
+    end
+    @object.save!
+    show
+  end
+
+  def destroy
+    if @object.destroy
+      respond_to do |f|
+        f.json { render json: @object }
+        f.html {
+          redirect_to(params[:return_to] || :back)
+        }
+        f.js { render }
+      end
+    else
+      self.render_error status: 422
+    end
+  end
+
+  def current_user
+    Thread.current[:user]
+  end
+
+  def model_class
+    controller_name.classify.constantize
+  end
+
+  def breadcrumb_page_name
+    (@breadcrumb_page_name ||
+     (@object.friendly_link_name if @object.respond_to? :friendly_link_name) ||
+     action_name)
+  end
+
+  def index_pane_list
+    %w(Recent)
+  end
+
+  def show_pane_list
+    %w(Attributes Advanced)
+  end
+
+  protected
+
+  def strip_token_from_path(path)
+    path.sub(/([\?&;])api_token=[^&;]*[&;]?/, '\1')
+  end
+
+  def redirect_to_login
+    respond_to do |f|
+      f.html {
+        if request.method.in? ['GET', 'HEAD']
+          redirect_to arvados_api_client.arvados_login_url(return_to: strip_token_from_path(request.url))
+        else
+          flash[:error] = "Either you are not logged in, or your session has timed out. I can't automatically log you in and re-attempt this request."
+          redirect_to :back
+        end
+      }
+      f.json {
+        @errors = ['You do not seem to be logged in. You did not supply an API token with this request, and your session (if any) has timed out.']
+        self.render_error status: 422
+      }
+    end
+    false  # For convenience to return from callbacks
+  end
+
+  def using_specific_api_token(api_token, opts={})
+    start_values = {}
+    [:arvados_api_token, :user].each do |key|
+      start_values[key] = Thread.current[key]
+    end
+    if opts.fetch(:load_user, true)
+      load_api_token(api_token)
+    else
+      Thread.current[:arvados_api_token] = api_token
+      Thread.current[:user] = nil
+    end
+    begin
+      yield
+    ensure
+      start_values.each_key { |key| Thread.current[key] = start_values[key] }
+    end
+  end
+
+
+  def accept_uuid_as_id_param
+    if params[:id] and params[:id].match /\D/
+      params[:uuid] = params.delete :id
+    end
+  end
+
+  def find_object_by_uuid
+    begin
+      if not model_class
+        @object = nil
+      elsif not params[:uuid].is_a?(String)
+        @object = model_class.where(uuid: params[:uuid]).first
+      elsif params[:uuid].empty?
+        @object = nil
+      elsif (model_class != Link and
+             resource_class_for_uuid(params[:uuid]) == Link)
+        @name_link = Link.find(params[:uuid])
+        @object = model_class.find(@name_link.head_uuid)
+      else
+        @object = model_class.find(params[:uuid])
+      end
+    rescue ArvadosApiClient::NotFoundException, RuntimeError => error
+      if error.is_a?(RuntimeError) and (error.message !~ /^argument to find\(/)
+        raise
+      end
+      render_not_found(error)
+      return false
+    end
+  end
+
+  def thread_clear
+    load_api_token(nil)
+    Rails.cache.delete_matched(/^request_#{Thread.current.object_id}_/)
+    yield
+    Rails.cache.delete_matched(/^request_#{Thread.current.object_id}_/)
+  end
+
+  # Set up the thread with the given API token and associated user object.
+  def load_api_token(new_token)
+    Thread.current[:arvados_api_token] = new_token
+    if new_token.nil?
+      Thread.current[:user] = nil
+    else
+      Thread.current[:user] = User.current
+    end
+  end
+
+  # If there's a valid api_token parameter, set up the session with that
+  # user's information.  Return true if the method redirects the request
+  # (usually a post-login redirect); false otherwise.
+  def setup_user_session
+    return false unless params[:api_token]
+    Thread.current[:arvados_api_token] = params[:api_token]
+    begin
+      user = User.current
+    rescue ArvadosApiClient::NotLoggedInException
+      false  # We may redirect to login, or not, based on the current action.
+    else
+      session[:arvados_api_token] = params[:api_token]
+      # If we later have trouble contacting the API server, we still want
+      # to be able to render basic user information in the UI--see
+      # render_exception above.  We store that in the session here.  This is
+      # not intended to be used as a general-purpose cache.  See #2891.
+      session[:user] = {
+        uuid: user.uuid,
+        email: user.email,
+        first_name: user.first_name,
+        last_name: user.last_name,
+        is_active: user.is_active,
+        is_admin: user.is_admin,
+        prefs: user.prefs
+      }
+
+      if !request.format.json? and request.method.in? ['GET', 'HEAD']
+        # Repeat this request with api_token in the (new) session
+        # cookie instead of the query string.  This prevents API
+        # tokens from appearing in (and being inadvisedly copied
+        # and pasted from) browser Location bars.
+        redirect_to strip_token_from_path(request.fullpath)
+        true
+      else
+        false
+      end
+    ensure
+      Thread.current[:arvados_api_token] = nil
+    end
+  end
+
+  # Save the session API token in thread-local storage, and yield.
+  # This method also takes care of session setup if the request
+  # provides a valid api_token parameter.
+  # If a token is unavailable or expired, the block is still run, with
+  # a nil token.
+  def set_thread_api_token
+    if Thread.current[:arvados_api_token]
+      yield   # An API token has already been found - pass it through.
+      return
+    elsif setup_user_session
+      return  # A new session was set up and received a response.
+    end
+
+    begin
+      load_api_token(session[:arvados_api_token])
+      yield
+    rescue ArvadosApiClient::NotLoggedInException
+      # If we got this error with a token, it must've expired.
+      # Retry the request without a token.
+      unless Thread.current[:arvados_api_token].nil?
+        load_api_token(nil)
+        yield
+      end
+    ensure
+      # Remove token in case this Thread is used for anything else.
+      load_api_token(nil)
+    end
+  end
+
+  # Redirect to login/welcome if client provided expired API token (or none at all)
+  def require_thread_api_token
+    if Thread.current[:arvados_api_token]
+      yield
+    elsif session[:arvados_api_token]
+      # Expired session. Clear it before refreshing login so that,
+      # if this login procedure fails, we end up showing the "please
+      # log in" page instead of getting stuck in a redirect loop.
+      session.delete :arvados_api_token
+      redirect_to_login
+    else
+      redirect_to welcome_users_path(return_to: request.fullpath)
+    end
+  end
+
+  def ensure_current_user_is_admin
+    unless current_user and current_user.is_admin
+      @errors = ['Permission denied']
+      self.render_error status: 401
+    end
+  end
+
+  helper_method :unsigned_user_agreements
+  def unsigned_user_agreements
+    @signed_ua_uuids ||= UserAgreement.signatures.map &:head_uuid
+    @unsigned_user_agreements ||= UserAgreement.all.map do |ua|
+      if not @signed_ua_uuids.index ua.uuid
+        Collection.find(ua.uuid)
+      end
+    end.compact
+  end
+
+  def check_user_agreements
+    if current_user && !current_user.is_active
+      if not current_user.is_invited
+        return redirect_to inactive_users_path(return_to: request.fullpath)
+      end
+      if unsigned_user_agreements.empty?
+        # No agreements to sign. Perhaps we just need to ask?
+        current_user.activate
+        if !current_user.is_active
+          logger.warn "#{current_user.uuid.inspect}: " +
+            "No user agreements to sign, but activate failed!"
+        end
+      end
+      if !current_user.is_active
+        redirect_to user_agreements_path(return_to: request.fullpath)
+      end
+    end
+    true
+  end
+
+  def check_user_profile
+    if request.method.downcase != 'get' || params[:partial] ||
+       params[:tab_pane] || params[:action_method] ||
+       params[:action] == 'setup_popup'
+      return true
+    end
+
+    if missing_required_profile?
+      redirect_to profile_user_path(current_user.uuid, return_to: request.fullpath)
+    end
+    true
+  end
+
+  helper_method :missing_required_profile?
+  def missing_required_profile?
+    missing_required = false
+
+    profile_config = Rails.configuration.user_profile_form_fields
+    if current_user && profile_config
+      current_user_profile = current_user.prefs[:profile]
+      profile_config.kind_of?(Array) && profile_config.andand.each do |entry|
+        if entry['required']
+          if !current_user_profile ||
+             !current_user_profile[entry['key'].to_sym] ||
+             current_user_profile[entry['key'].to_sym].empty?
+            missing_required = true
+            break
+          end
+        end
+      end
+    end
+
+    missing_required
+  end
+
+  def select_theme
+    return Rails.configuration.arvados_theme
+  end
+
+  @@notification_tests = []
+
+  @@notification_tests.push lambda { |controller, current_user|
+    AuthorizedKey.limit(1).where(authorized_user_uuid: current_user.uuid).each do
+      return nil
+    end
+    return lambda { |view|
+      view.render partial: 'notifications/ssh_key_notification'
+    }
+  }
+
+  @@notification_tests.push lambda { |controller, current_user|
+    Collection.limit(1).where(created_by: current_user.uuid).each do
+      return nil
+    end
+    return lambda { |view|
+      view.render partial: 'notifications/collections_notification'
+    }
+  }
+
+  @@notification_tests.push lambda { |controller, current_user|
+    PipelineInstance.limit(1).where(created_by: current_user.uuid).each do
+      return nil
+    end
+    return lambda { |view|
+      view.render partial: 'notifications/pipelines_notification'
+    }
+  }
+
+  def check_user_notifications
+    return if params['tab_pane']
+
+    @notification_count = 0
+    @notifications = []
+
+    if current_user.andand.is_active
+      @showallalerts = false
+      @@notification_tests.each do |t|
+        a = t.call(self, current_user)
+        if a
+          @notification_count += 1
+          @notifications.push a
+        end
+      end
+    end
+
+    if @notification_count == 0
+      @notification_count = ''
+    end
+  end
+
+  helper_method :all_projects
+  def all_projects
+    @all_projects ||= Group.
+      filter([['group_class','=','project']]).order('name')
+  end
+
+  helper_method :my_projects
+  def my_projects
+    return @my_projects if @my_projects
+    @my_projects = []
+    root_of = {}
+    all_projects.each do |g|
+      root_of[g.uuid] = g.owner_uuid
+      @my_projects << g
+    end
+    done = false
+    while not done
+      done = true
+      root_of = root_of.each_with_object({}) do |(child, parent), h|
+        if root_of[parent]
+          h[child] = root_of[parent]
+          done = false
+        else
+          h[child] = parent
+        end
+      end
+    end
+    @my_projects = @my_projects.select do |g|
+      root_of[g.uuid] == current_user.uuid
+    end
+  end
+
+  helper_method :projects_shared_with_me
+  def projects_shared_with_me
+    my_project_uuids = my_projects.collect &:uuid
+    all_projects.reject { |x| x.uuid.in? my_project_uuids }
+  end
+
+  helper_method :recent_jobs_and_pipelines
+  def recent_jobs_and_pipelines
+    (Job.limit(10) |
+     PipelineInstance.limit(10)).
+      sort_by do |x|
+      (x.finished_at || x.started_at rescue nil) || x.modified_at || x.created_at
+    end.reverse
+  end
+
+  helper_method :running_pipelines
+  def running_pipelines
+    pi = PipelineInstance.order(["started_at asc", "created_at asc"]).filter([["state", "in", ["RunningOnServer", "RunningOnClient"]]])
+    jobs = {}
+    pi.each do |pl|
+      pl.components.each do |k,v|
+        if v.is_a? Hash and v[:job]
+          jobs[v[:job][:uuid]] = {}
+        end
+      end
+    end
+
+    if jobs.keys.any?
+      Job.filter([["uuid", "in", jobs.keys]]).each do |j|
+        jobs[j[:uuid]] = j
+      end
+
+      pi.each do |pl|
+        pl.components.each do |k,v|
+          if v.is_a? Hash and v[:job]
+            v[:job] = jobs[v[:job][:uuid]]
+          end
+        end
+      end
+    end
+
+    pi
+  end
+
+  helper_method :finished_pipelines
+  def finished_pipelines lim
+    PipelineInstance.limit(lim).order(["finished_at desc"]).filter([["state", "in", ["Complete", "Failed", "Paused"]], ["finished_at", "!=", nil]])
+  end
+
+  helper_method :recent_collections
+  def recent_collections lim
+    c = Collection.limit(lim).order(["modified_at desc"]).filter([["owner_uuid", "is_a", "arvados#group"]])
+    own = {}
+    Group.filter([["uuid", "in", c.map(&:owner_uuid)]]).each do |g|
+      own[g[:uuid]] = g
+    end
+    {collections: c, owners: own}
+  end
+
+  helper_method :my_project_tree
+  def my_project_tree
+    build_project_trees
+    @my_project_tree
+  end
+
+  helper_method :shared_project_tree
+  def shared_project_tree
+    build_project_trees
+    @shared_project_tree
+  end
+
+  def build_project_trees
+    return if @my_project_tree and @shared_project_tree
+    parent_of = {current_user.uuid => 'me'}
+    all_projects.each do |ob|
+      parent_of[ob.uuid] = ob.owner_uuid
+    end
+    children_of = {false => [], 'me' => [current_user]}
+    all_projects.each do |ob|
+      if ob.owner_uuid != current_user.uuid and
+          not parent_of.has_key? ob.owner_uuid
+        parent_of[ob.uuid] = false
+      end
+      children_of[parent_of[ob.uuid]] ||= []
+      children_of[parent_of[ob.uuid]] << ob
+    end
+    buildtree = lambda do |children_of, root_uuid=false|
+      tree = {}
+      children_of[root_uuid].andand.each do |ob|
+        tree[ob] = buildtree.call(children_of, ob.uuid)
+      end
+      tree
+    end
+    sorted_paths = lambda do |tree, depth=0|
+      paths = []
+      tree.keys.sort_by { |ob|
+        ob.is_a?(String) ? ob : ob.friendly_link_name
+      }.each do |ob|
+        paths << {object: ob, depth: depth}
+        paths += sorted_paths.call tree[ob], depth+1
+      end
+      paths
+    end
+    @my_project_tree =
+      sorted_paths.call buildtree.call(children_of, 'me')
+    @shared_project_tree =
+      sorted_paths.call({'Projects shared with me' =>
+                          buildtree.call(children_of, false)})
+  end
+
+  helper_method :get_object
+  def get_object uuid
+    if @get_object.nil? and @objects
+      @get_object = @objects.each_with_object({}) do |object, h|
+        h[object.uuid] = object
+      end
+    end
+    @get_object ||= {}
+    @get_object[uuid]
+  end
+
+  helper_method :project_breadcrumbs
+  def project_breadcrumbs
+    crumbs = []
+    current = @name_link || @object
+    while current
+      # Halt if a group ownership loop is detected. API should refuse
+      # to produce this state, but it could still arise from a race
+      # condition when group ownership changes between our find()
+      # queries.
+      break if crumbs.collect(&:uuid).include? current.uuid
+
+      if current.is_a?(Group) and current.group_class == 'project'
+        crumbs.prepend current
+      end
+      if current.is_a? Link
+        current = Group.find?(current.tail_uuid)
+      else
+        current = Group.find?(current.owner_uuid)
+      end
+    end
+    crumbs
+  end
+
+  helper_method :current_project_uuid
+  def current_project_uuid
+    if @object.is_a? Group and @object.group_class == 'project'
+      @object.uuid
+    elsif @name_link.andand.tail_uuid
+      @name_link.tail_uuid
+    elsif @object and resource_class_for_uuid(@object.owner_uuid) == Group
+      @object.owner_uuid
+    else
+      nil
+    end
+  end
+
+  # helper method to get links for given object or uuid
+  helper_method :links_for_object
+  def links_for_object object_or_uuid
+    raise ArgumentError, 'No input argument' unless object_or_uuid
+    preload_links_for_objects([object_or_uuid])
+    uuid = object_or_uuid.is_a?(String) ? object_or_uuid : object_or_uuid.uuid
+    @all_links_for[uuid] ||= []
+  end
+
+  # helper method to preload links for given objects and uuids
+  helper_method :preload_links_for_objects
+  def preload_links_for_objects objects_and_uuids
+    @all_links_for ||= {}
+
+    raise ArgumentError, 'Argument is not an array' unless objects_and_uuids.is_a? Array
+    return @all_links_for if objects_and_uuids.empty?
+
+    uuids = objects_and_uuids.collect { |x| x.is_a?(String) ? x : x.uuid }
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| @all_links_for[x].nil? }.any?
+      return @all_links_for
+    end
+
+    uuids.each do |x|
+      @all_links_for[x] = []
+    end
+
+    # TODO: make sure we get every page of results from API server
+    Link.filter([['head_uuid', 'in', uuids]]).each do |link|
+      @all_links_for[link.head_uuid] << link
+    end
+    @all_links_for
+  end
+
+  # helper method to get a certain number of objects of a specific type
+  # this can be used to replace any uses of: "dataclass.limit(n)"
+  helper_method :get_n_objects_of_class
+  def get_n_objects_of_class dataclass, size
+    @objects_map_for ||= {}
+
+    raise ArgumentError, 'Argument is not a data class' unless dataclass.is_a? Class and dataclass < ArvadosBase
+    raise ArgumentError, 'Argument is not a valid limit size' unless (size && size>0)
+
+    # if the objects_map_for has a value for this dataclass, and the
+    # size used to retrieve those objects is equal, return it
+    size_key = "#{dataclass.name}_size"
+    if @objects_map_for[dataclass.name] && @objects_map_for[size_key] &&
+        (@objects_map_for[size_key] == size)
+      return @objects_map_for[dataclass.name]
+    end
+
+    @objects_map_for[size_key] = size
+    @objects_map_for[dataclass.name] = dataclass.limit(size)
+  end
+
+  # helper method to get collections for the given uuid
+  helper_method :collections_for_object
+  def collections_for_object uuid
+    raise ArgumentError, 'No input argument' unless uuid
+    preload_collections_for_objects([uuid])
+    @all_collections_for[uuid] ||= []
+  end
+
+  # helper method to preload collections for the given uuids
+  helper_method :preload_collections_for_objects
+  def preload_collections_for_objects uuids
+    @all_collections_for ||= {}
+
+    raise ArgumentError, 'Argument is not an array' unless uuids.is_a? Array
+    return @all_collections_for if uuids.empty?
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| @all_collections_for[x].nil? }.any?
+      return @all_collections_for
+    end
+
+    uuids.each do |x|
+      @all_collections_for[x] = []
+    end
+
+    # TODO: make sure we get every page of results from API server
+    Collection.where(uuid: uuids).each do |collection|
+      @all_collections_for[collection.uuid] << collection
+    end
+    @all_collections_for
+  end
+
+  # helper method to get log collections for the given log
+  helper_method :log_collections_for_object
+  def log_collections_for_object log
+    raise ArgumentError, 'No input argument' unless log
+
+    preload_log_collections_for_objects([log])
+
+    uuid = log
+    fixup = /([a-f0-9]{32}\+\d+)(\+?.*)/.match(log)
+    if fixup && fixup.size>1
+      uuid = fixup[1]
+    end
+
+    @all_log_collections_for[uuid] ||= []
+  end
+
+  # helper method to preload collections for the given uuids
+  helper_method :preload_log_collections_for_objects
+  def preload_log_collections_for_objects logs
+    @all_log_collections_for ||= {}
+
+    raise ArgumentError, 'Argument is not an array' unless logs.is_a? Array
+    return @all_log_collections_for if logs.empty?
+
+    uuids = []
+    logs.each do |log|
+      fixup = /([a-f0-9]{32}\+\d+)(\+?.*)/.match(log)
+      if fixup && fixup.size>1
+        uuids << fixup[1]
+      else
+        uuids << log
+      end
+    end
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| @all_log_collections_for[x].nil? }.any?
+      return @all_log_collections_for
+    end
+
+    uuids.each do |x|
+      @all_log_collections_for[x] = []
+    end
+
+    # TODO: make sure we get every page of results from API server
+    Collection.where(uuid: uuids).each do |collection|
+      @all_log_collections_for[collection.uuid] << collection
+    end
+    @all_log_collections_for
+  end
+
+  # helper method to get object of a given dataclass and uuid
+  helper_method :object_for_dataclass
+  def object_for_dataclass dataclass, uuid
+    raise ArgumentError, 'No input argument dataclass' unless (dataclass && uuid)
+    preload_objects_for_dataclass(dataclass, [uuid])
+    @objects_for[uuid]
+  end
+
+  # helper method to preload objects for given dataclass and uuids
+  helper_method :preload_objects_for_dataclass
+  def preload_objects_for_dataclass dataclass, uuids
+    @objects_for ||= {}
+
+    raise ArgumentError, 'Argument is not a data class' unless dataclass.is_a? Class
+    raise ArgumentError, 'Argument is not an array' unless uuids.is_a? Array
+
+    return @objects_for if uuids.empty?
+
+    # if already preloaded for all of these uuids, return
+    if not uuids.select { |x| @objects_for[x].nil? }.any?
+      return @objects_for
+    end
+
+    dataclass.where(uuid: uuids).each do |obj|
+      @objects_for[obj.uuid] = obj
+    end
+    @objects_for
+  end
+
+  def wiselinks_layout
+    'body'
+  end
+end
diff --git a/apps/workbench/app/controllers/authorized_keys_controller.rb b/apps/workbench/app/controllers/authorized_keys_controller.rb
new file mode 100644 (file)
index 0000000..6eaec1e
--- /dev/null
@@ -0,0 +1,17 @@
+class AuthorizedKeysController < ApplicationController
+  def index_pane_list
+    %w(Recent Help)
+  end
+
+  def new
+    super
+    @object.authorized_user_uuid = current_user.uuid if current_user
+    @object.key_type = 'SSH'
+  end
+
+  def create
+    defaults = { authorized_user_uuid: current_user.uuid, key_type: 'SSH' }
+    @object = AuthorizedKey.new defaults.merge(params[:authorized_key] || {})
+    super
+  end
+end
diff --git a/apps/workbench/app/controllers/collections_controller.rb b/apps/workbench/app/controllers/collections_controller.rb
new file mode 100644 (file)
index 0000000..f4aa039
--- /dev/null
@@ -0,0 +1,327 @@
+require "arvados/keep"
+
+class CollectionsController < ApplicationController
+  include ActionController::Live
+
+  skip_around_filter(:require_thread_api_token,
+                     only: [:show_file, :show_file_links])
+  skip_before_filter(:find_object_by_uuid,
+                     only: [:provenance, :show_file, :show_file_links])
+  # We depend on show_file to display the user agreement:
+  skip_before_filter :check_user_agreements, only: :show_file
+  skip_before_filter :check_user_profile, only: :show_file
+
+  RELATION_LIMIT = 5
+
+  def show_pane_list
+    panes = %w(Files Upload Provenance_graph Used_by Advanced)
+    panes = panes - %w(Upload) unless (@object.editable? rescue false)
+    panes
+  end
+
+  def set_persistent
+    case params[:value]
+    when 'persistent', 'cache'
+      persist_links = Link.filter([['owner_uuid', '=', current_user.uuid],
+                                   ['link_class', '=', 'resources'],
+                                   ['name', '=', 'wants'],
+                                   ['tail_uuid', '=', current_user.uuid],
+                                   ['head_uuid', '=', @object.uuid]])
+      logger.debug persist_links.inspect
+    else
+      return unprocessable "Invalid value #{value.inspect}"
+    end
+    if params[:value] == 'persistent'
+      if not persist_links.any?
+        Link.create(link_class: 'resources',
+                    name: 'wants',
+                    tail_uuid: current_user.uuid,
+                    head_uuid: @object.uuid)
+      end
+    else
+      persist_links.each do |link|
+        link.destroy || raise
+      end
+    end
+
+    respond_to do |f|
+      f.json { render json: @object }
+    end
+  end
+
+  def index
+    # API server index doesn't return manifest_text by default, but our
+    # callers want it unless otherwise specified.
+    @select ||= Collection.columns.map(&:name)
+    base_search = Collection.select(@select)
+    if params[:search].andand.length.andand > 0
+      tags = Link.where(any: ['contains', params[:search]])
+      @objects = (base_search.where(uuid: tags.collect(&:head_uuid)) |
+                      base_search.where(any: ['contains', params[:search]])).
+        uniq { |c| c.uuid }
+    else
+      if params[:limit]
+        limit = params[:limit].to_i
+      else
+        limit = 100
+      end
+
+      if params[:offset]
+        offset = params[:offset].to_i
+      else
+        offset = 0
+      end
+
+      @objects = base_search.limit(limit).offset(offset)
+    end
+    @links = Link.where(head_uuid: @objects.collect(&:uuid))
+    @collection_info = {}
+    @objects.each do |c|
+      @collection_info[c.uuid] = {
+        tag_links: [],
+        wanted: false,
+        wanted_by_me: false,
+        provenance: [],
+        links: []
+      }
+    end
+    @links.each do |link|
+      @collection_info[link.head_uuid] ||= {}
+      info = @collection_info[link.head_uuid]
+      case link.link_class
+      when 'tag'
+        info[:tag_links] << link
+      when 'resources'
+        info[:wanted] = true
+        info[:wanted_by_me] ||= link.tail_uuid == current_user.uuid
+      when 'provenance'
+        info[:provenance] << link.name
+      end
+      info[:links] << link
+    end
+    @request_url = request.url
+
+    render_index
+  end
+
+  def show_file_links
+    Thread.current[:reader_tokens] = [params[:reader_token]]
+    return if false.equal?(find_object_by_uuid)
+    render layout: false
+  end
+
+  def show_file
+    # We pipe from arv-get to send the file to the user.  Before we start it,
+    # we ask the API server if the file actually exists.  This serves two
+    # purposes: it lets us return a useful status code for common errors, and
+    # helps us figure out which token to provide to arv-get.
+    coll = nil
+    tokens = [Thread.current[:arvados_api_token], params[:reader_token]].compact
+    usable_token = find_usable_token(tokens) do
+      coll = Collection.find(params[:uuid])
+    end
+
+    file_name = params[:file].andand.sub(/^(\.\/|\/|)/, './')
+    if usable_token.nil?
+      return  # Response already rendered.
+    elsif file_name.nil? or not coll.manifest.has_file?(file_name)
+      return render_not_found
+    end
+
+    opts = params.merge(arvados_api_token: usable_token)
+
+    # Handle Range requests. Currently we support only 'bytes=0-....'
+    if request.headers.include? 'HTTP_RANGE'
+      if m = /^bytes=0-(\d+)/.match(request.headers['HTTP_RANGE'])
+        opts[:maxbytes] = m[1]
+        size = params[:size] || '*'
+        self.response.status = 206
+        self.response.headers['Content-Range'] = "bytes 0-#{m[1]}/#{size}"
+      end
+    end
+
+    ext = File.extname(params[:file])
+    self.response.headers['Content-Type'] =
+      Rack::Mime::MIME_TYPES[ext] || 'application/octet-stream'
+    if params[:size]
+      size = params[:size].to_i
+      if opts[:maxbytes]
+        size = [size, opts[:maxbytes].to_i].min
+      end
+      self.response.headers['Content-Length'] = size.to_s
+    end
+    self.response.headers['Content-Disposition'] = params[:disposition] if params[:disposition]
+    begin
+      file_enumerator(opts).each do |bytes|
+        response.stream.write bytes
+      end
+    ensure
+      response.stream.close
+    end
+  end
+
+  def sharing_scopes
+    ["GET /arvados/v1/collections/#{@object.uuid}", "GET /arvados/v1/collections/#{@object.uuid}/", "GET /arvados/v1/keep_services/accessible"]
+  end
+
+  def search_scopes
+    begin
+      ApiClientAuthorization.filter([['scopes', '=', sharing_scopes]]).results
+    rescue ArvadosApiClient::AccessForbiddenException
+      nil
+    end
+  end
+
+  def find_object_by_uuid
+    if not Keep::Locator.parse params[:id]
+      super
+    end
+  end
+
+  def show
+    return super if !@object
+    if current_user
+      if Keep::Locator.parse params["uuid"]
+        @same_pdh = Collection.filter([["portable_data_hash", "=", @object.portable_data_hash]])
+        if @same_pdh.results.size == 1
+          redirect_to collection_path(@same_pdh[0]["uuid"])
+          return
+        end
+        owners = @same_pdh.map(&:owner_uuid).to_a.uniq
+        preload_objects_for_dataclass Group, owners
+        preload_objects_for_dataclass User, owners
+        render 'hash_matches'
+        return
+      else
+        jobs_with = lambda do |conds|
+          Job.limit(RELATION_LIMIT).where(conds)
+            .results.sort_by { |j| j.finished_at || j.created_at }
+        end
+        @output_of = jobs_with.call(output: @object.portable_data_hash)
+        @log_of = jobs_with.call(log: @object.portable_data_hash)
+        @project_links = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+          .where(head_uuid: @object.uuid, link_class: 'name').results
+        project_hash = Group.where(uuid: @project_links.map(&:tail_uuid)).to_hash
+        @projects = project_hash.values
+
+        @permissions = Link.limit(RELATION_LIMIT).order("modified_at DESC")
+          .where(head_uuid: @object.uuid, link_class: 'permission',
+                 name: 'can_read').results
+        @logs = Log.limit(RELATION_LIMIT).order("created_at DESC")
+          .where(object_uuid: @object.uuid).results
+        @is_persistent = Link.limit(1)
+          .where(head_uuid: @object.uuid, tail_uuid: current_user.uuid,
+                 link_class: 'resources', name: 'wants')
+          .results.any?
+        @search_sharing = search_scopes
+
+        if params["tab_pane"] == "Provenance_graph"
+          @prov_svg = ProvenanceHelper::create_provenance_graph(@object.provenance, "provenance_svg",
+                                                                {:request => request,
+                                                                  :direction => :bottom_up,
+                                                                  :combine_jobs => :script_only}) rescue nil
+        end
+        if params["tab_pane"] == "Used_by"
+          @used_by_svg = ProvenanceHelper::create_provenance_graph(@object.used_by, "used_by_svg",
+                                                                   {:request => request,
+                                                                     :direction => :top_down,
+                                                                     :combine_jobs => :script_only,
+                                                                     :pdata_only => true}) rescue nil
+        end
+      end
+    end
+    super
+  end
+
+  def sharing_popup
+    @search_sharing = search_scopes
+    render("sharing_popup.js", content_type: "text/javascript")
+  end
+
+  helper_method :download_link
+
+  def download_link
+    collections_url + "/download/#{@object.uuid}/#{@search_sharing.first.api_token}/"
+  end
+
+  def share
+    ApiClientAuthorization.create(scopes: sharing_scopes)
+    sharing_popup
+  end
+
+  def unshare
+    search_scopes.each do |s|
+      s.destroy
+    end
+    sharing_popup
+  end
+
+  protected
+
+  def find_usable_token(token_list)
+    # Iterate over every given token to make it the current token and
+    # yield the given block.
+    # If the block succeeds, return the token it used.
+    # Otherwise, render an error response based on the most specific
+    # error we encounter, and return nil.
+    most_specific_error = [401]
+    token_list.each do |api_token|
+      begin
+        # We can't load the corresponding user, because the token may not
+        # be scoped for that.
+        using_specific_api_token(api_token, load_user: false) do
+          yield
+          return api_token
+        end
+      rescue ArvadosApiClient::ApiError => error
+        if error.api_status >= most_specific_error.first
+          most_specific_error = [error.api_status, error]
+        end
+      end
+    end
+    case most_specific_error.shift
+    when 401, 403
+      redirect_to_login
+    when 404
+      render_not_found(*most_specific_error)
+    end
+    return nil
+  end
+
+  def file_enumerator(opts)
+    FileStreamer.new opts
+  end
+
+  class FileStreamer
+    include ArvadosApiClientHelper
+    def initialize(opts={})
+      @opts = opts
+    end
+    def each
+      return unless @opts[:uuid] && @opts[:file]
+
+      env = Hash[ENV].dup
+
+      require 'uri'
+      u = URI.parse(arvados_api_client.arvados_v1_base)
+      env['ARVADOS_API_HOST'] = "#{u.host}:#{u.port}"
+      env['ARVADOS_API_TOKEN'] = @opts[:arvados_api_token]
+      env['ARVADOS_API_HOST_INSECURE'] = "true" if Rails.configuration.arvados_insecure_https
+
+      bytesleft = @opts[:maxbytes].andand.to_i || 2**16
+      io = IO.popen([env, 'arv-get', "#{@opts[:uuid]}/#{@opts[:file]}"], 'rb')
+      while bytesleft > 0 && (buf = io.read([bytesleft, 2**16].min)) != nil
+        # shrink the bytesleft count, if we were given a maximum byte
+        # count to read
+        if @opts.include? :maxbytes
+          bytesleft = bytesleft - buf.length
+        end
+        yield buf
+      end
+      io.close
+      # "If ios is opened by IO.popen, close sets $?."
+      # http://www.ruby-doc.org/core-2.1.3/IO.html#method-i-close
+      Rails.logger.warn("#{@opts[:uuid]}/#{@opts[:file]}: #{$?}") if $? != 0
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/groups_controller.rb b/apps/workbench/app/controllers/groups_controller.rb
new file mode 100644 (file)
index 0000000..080386e
--- /dev/null
@@ -0,0 +1,17 @@
+class GroupsController < ApplicationController
+  def index
+    @groups = Group.filter [['group_class', '!=', 'project']]
+    @group_uuids = @groups.collect &:uuid
+    @links_from = Link.where link_class: 'permission', tail_uuid: @group_uuids
+    @links_to = Link.where link_class: 'permission', head_uuid: @group_uuids
+    render_index
+  end
+
+  def show
+    if @object.group_class == 'project'
+      redirect_to(project_path(@object))
+    else
+      super
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/humans_controller.rb b/apps/workbench/app/controllers/humans_controller.rb
new file mode 100644 (file)
index 0000000..e141655
--- /dev/null
@@ -0,0 +1,2 @@
+class HumansController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/job_tasks_controller.rb b/apps/workbench/app/controllers/job_tasks_controller.rb
new file mode 100644 (file)
index 0000000..5e8662b
--- /dev/null
@@ -0,0 +1,2 @@
+class JobTasksController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/jobs_controller.rb b/apps/workbench/app/controllers/jobs_controller.rb
new file mode 100644 (file)
index 0000000..08fb94d
--- /dev/null
@@ -0,0 +1,85 @@
+class JobsController < ApplicationController
+  include JobsHelper
+
+  def generate_provenance(jobs)
+    return if params['tab_pane'] != "Provenance"
+
+    nodes = {}
+    collections = []
+    hashes = []
+    jobs.each do |j|
+      nodes[j[:uuid]] = j
+      hashes << j[:output]
+      ProvenanceHelper::find_collections(j[:script_parameters]) do |hash, uuid|
+        collections << uuid if uuid
+        hashes << hash if hash
+      end
+      nodes[j[:script_version]] = {:uuid => j[:script_version]}
+    end
+
+    Collection.where(uuid: collections).each do |c|
+      nodes[c[:portable_data_hash]] = c
+    end
+
+    Collection.where(portable_data_hash: hashes).each do |c|
+      nodes[c[:portable_data_hash]] = c
+    end
+
+    @svg = ProvenanceHelper::create_provenance_graph nodes, "provenance_svg", {
+      :request => request,
+      :all_script_parameters => true,
+      :script_version_nodes => true}
+  end
+
+  def index
+    @svg = ""
+    if params[:uuid]
+      @objects = Job.where(uuid: params[:uuid])
+      generate_provenance(@objects)
+      render_index
+    else
+      @limit = 20
+      super
+    end
+  end
+
+  def cancel
+    @object.cancel
+    if params[:return_to]
+      redirect_to params[:return_to]
+    else
+      redirect_to @object
+    end
+  end
+
+  def show
+    generate_provenance([@object])
+    super
+  end
+
+  def logs
+    @logs = Log.select(%w(event_type object_uuid event_at properties))
+               .order('event_at DESC')
+               .filter([["event_type",  "=", "stderr"],
+                        ["object_uuid", "in", [@object.uuid]]])
+               .limit(500)
+               .results
+               .to_a
+               .map{ |e| e.serializable_hash.merge({ 'prepend' => true }) }
+    respond_to do |format|
+      format.json { render json: @logs }
+    end
+  end
+
+  def index_pane_list
+    if params[:uuid]
+      %w(Recent Provenance)
+    else
+      %w(Recent)
+    end
+  end
+
+  def show_pane_list
+    %w(Status Log Details Provenance Advanced)
+  end
+end
diff --git a/apps/workbench/app/controllers/keep_disks_controller.rb b/apps/workbench/app/controllers/keep_disks_controller.rb
new file mode 100644 (file)
index 0000000..f57455b
--- /dev/null
@@ -0,0 +1,54 @@
+class KeepDisksController < ApplicationController
+  def create
+    defaults = { is_readable: true, is_writable: true }
+    @object = KeepDisk.new defaults.merge(params[:keep_disk] || {})
+    super
+  end
+
+  def index
+    # Retrieve cache age histogram info from logs.
+
+    # In the logs we expect to find it in an ordered list with entries
+    # of the form (mtime, disk proportion free).
+
+    # An entry of the form (1388747781, 0.52) means that if we deleted
+    # the oldest non-presisted blocks until we had 52% of the disk
+    # free, then all blocks with an mtime greater than 1388747781
+    # would be preserved.
+
+    # The chart we want to produce, will tell us how much of the disk
+    # will be free if we use a cache age of x days. Therefore we will
+    # produce output specifying the age, cache and persisted. age is
+    # specified in milliseconds. cache is the size of the cache if we
+    # delete all blocks older than age. persistent is the size of the
+    # persisted blocks. It is constant regardless of age, but it lets
+    # us show a stacked graph.
+
+    # Finally each entry in cache_age_histogram is a dictionary,
+    # because that's what our charting package wats.
+
+    @cache_age_histogram = []
+    @histogram_pretty_date = nil
+    histogram_log = Log.
+      filter([[:event_type, '=', 'block-age-free-space-histogram']]).
+      order(:created_at => :desc).
+      limit(1)
+    histogram_log.each do |log_entry|
+      # We expect this block to only execute at most once since we
+      # specified limit(1)
+      @cache_age_histogram = log_entry['properties'][:histogram]
+      # Javascript wants dates in milliseconds.
+      histogram_date_ms = log_entry['event_at'].to_i * 1000
+      @histogram_pretty_date = log_entry['event_at'].strftime('%b %-d, %Y')
+
+      total_free_cache = @cache_age_histogram[-1][1]
+      persisted_storage = 1 - total_free_cache
+      @cache_age_histogram.map! { |x| {:age => histogram_date_ms - x[0]*1000,
+          :cache => total_free_cache - x[1],
+          :persisted => persisted_storage} }
+    end
+
+    # Do the regular control work needed.
+    super
+  end
+end
diff --git a/apps/workbench/app/controllers/keep_services_controller.rb b/apps/workbench/app/controllers/keep_services_controller.rb
new file mode 100644 (file)
index 0000000..eac2e22
--- /dev/null
@@ -0,0 +1,2 @@
+class KeepServicesController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/links_controller.rb b/apps/workbench/app/controllers/links_controller.rb
new file mode 100644 (file)
index 0000000..fd51fb1
--- /dev/null
@@ -0,0 +1,9 @@
+class LinksController < ApplicationController
+  def show
+    if @object.link_class == 'name' and
+        Collection == ArvadosBase::resource_class_for_uuid(@object.head_uuid)
+      return redirect_to collection_path(@object.uuid)
+    end
+    super
+  end
+end
diff --git a/apps/workbench/app/controllers/logs_controller.rb b/apps/workbench/app/controllers/logs_controller.rb
new file mode 100644 (file)
index 0000000..48ad4ed
--- /dev/null
@@ -0,0 +1,3 @@
+class LogsController < ApplicationController
+  before_filter :ensure_current_user_is_admin
+end
diff --git a/apps/workbench/app/controllers/nodes_controller.rb b/apps/workbench/app/controllers/nodes_controller.rb
new file mode 100644 (file)
index 0000000..36348d6
--- /dev/null
@@ -0,0 +1,2 @@
+class NodesController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/pipeline_instances_controller.rb b/apps/workbench/app/controllers/pipeline_instances_controller.rb
new file mode 100644 (file)
index 0000000..25f5ee4
--- /dev/null
@@ -0,0 +1,299 @@
+class PipelineInstancesController < ApplicationController
+  skip_before_filter :find_object_by_uuid, only: :compare
+  before_filter :find_objects_by_uuid, only: :compare
+  include PipelineInstancesHelper
+  include PipelineComponentsHelper
+
+  def copy
+    template = PipelineTemplate.find?(@object.pipeline_template_uuid)
+
+    source = @object
+    @object = PipelineInstance.new
+    @object.pipeline_template_uuid = source.pipeline_template_uuid
+
+    if params['components'] == 'use_latest' and template
+      @object.components = template.components.deep_dup
+      @object.components.each do |cname, component|
+        # Go through the script parameters of each component
+        # that are marked as user input and copy them over.
+        # Skip any components that are not present in the
+        # source instance (there's nothing to copy)
+        if source.components.include? cname
+          component[:script_parameters].each do |pname, val|
+            if val.is_a? Hash and val[:dataclass]
+              # this is user-inputtable, so check the value from the source pipeline
+              srcvalue = source.components[cname][:script_parameters][pname]
+              if not srcvalue.nil?
+                component[:script_parameters][pname] = srcvalue
+              end
+            end
+          end
+        end
+      end
+    else
+      @object.components = source.components.deep_dup
+    end
+
+    if params['script'] == 'use_same'
+      # Go through each component and copy the script_version from each job.
+      @object.components.each do |cname, component|
+        if source.components.include? cname and source.components[cname][:job]
+          component[:script_version] = source.components[cname][:job][:script_version]
+        end
+      end
+    end
+
+    @object.components.each do |cname, component|
+      component.delete :job
+    end
+    @object.state = 'New'
+
+    # set owner_uuid to that of source, provided it is a project and wriable by current user
+    current_project = Group.find(source.owner_uuid) rescue nil
+    if (current_project && current_project.writable_by.andand.include?(current_user.uuid))
+      @object.owner_uuid = source.owner_uuid
+    end
+
+    super
+  end
+
+  def update
+    @updates ||= params[@object.class.to_s.underscore.singularize.to_sym]
+    if (components = @updates[:components])
+      components.each do |cname, component|
+        if component[:script_parameters]
+          component[:script_parameters].each do |param, value_info|
+            if value_info.is_a? Hash
+              value_info_partitioned = value_info[:value].partition('/') if value_info[:value].andand.class.eql?(String)
+              value_info_value = value_info_partitioned ? value_info_partitioned[0] : value_info[:value]
+              value_info_class = resource_class_for_uuid value_info_value
+              if value_info_class == Link
+                # Use the link target, not the link itself, as script
+                # parameter; but keep the link info around as well.
+                link = Link.find value_info[:value]
+                value_info[:value] = link.head_uuid
+                value_info[:link_uuid] = link.uuid
+                value_info[:link_name] = link.name
+              else
+                # Delete stale link_uuid and link_name data.
+                value_info[:link_uuid] = nil
+                value_info[:link_name] = nil
+              end
+              if value_info_class == Collection
+                # to ensure reproducibility, the script_parameter for a
+                # collection should be the portable_data_hash
+                # keep the collection name and uuid for human-readability
+                obj = Collection.find value_info_value
+                if value_info_partitioned
+                  value_info[:value] = obj.portable_data_hash + value_info_partitioned[1] + value_info_partitioned[2]
+                  value_info[:selection_name] = obj.name ? obj.name + value_info_partitioned[1] + value_info_partitioned[2] : obj.name
+                else
+                  value_info[:value] = obj.portable_data_hash
+                  value_info[:selection_name] = obj.name
+                end
+                value_info[:selection_uuid] = obj.uuid
+              end
+            end
+          end
+        end
+      end
+    end
+    super
+  end
+
+  def graph(pipelines)
+    return nil, nil if params['tab_pane'] != "Graph"
+
+    provenance = {}
+    pips = {}
+    n = 1
+
+    # When comparing more than one pipeline, "pips" stores bit fields that
+    # indicates which objects are part of which pipelines.
+
+    pipelines.each do |p|
+      collections = []
+      hashes = []
+      jobs = []
+
+      p[:components].each do |k, v|
+        provenance["component_#{p[:uuid]}_#{k}"] = v
+
+        collections << v[:output_uuid] if v[:output_uuid]
+        jobs << v[:job][:uuid] if v[:job]
+      end
+
+      jobs = jobs.compact.uniq
+      if jobs.any?
+        Job.where(uuid: jobs).each do |j|
+          job_uuid = j.uuid
+
+          provenance[job_uuid] = j
+          pips[job_uuid] = 0 unless pips[job_uuid] != nil
+          pips[job_uuid] |= n
+
+          hashes << j[:output] if j[:output]
+          ProvenanceHelper::find_collections(j) do |hash, uuid|
+            collections << uuid if uuid
+            hashes << hash if hash
+          end
+
+          if j[:script_version]
+            script_uuid = j[:script_version]
+            provenance[script_uuid] = {:uuid => script_uuid}
+            pips[script_uuid] = 0 unless pips[script_uuid] != nil
+            pips[script_uuid] |= n
+          end
+        end
+      end
+
+      hashes = hashes.compact.uniq
+      if hashes.any?
+        Collection.where(portable_data_hash: hashes).each do |c|
+          hash_uuid = c.portable_data_hash
+          provenance[hash_uuid] = c
+          pips[hash_uuid] = 0 unless pips[hash_uuid] != nil
+          pips[hash_uuid] |= n
+        end
+      end
+
+      collections = collections.compact.uniq
+      if collections.any?
+        Collection.where(uuid: collections).each do |c|
+          collection_uuid = c.uuid
+          provenance[collection_uuid] = c
+          pips[collection_uuid] = 0 unless pips[collection_uuid] != nil
+          pips[collection_uuid] |= n
+        end
+      end
+
+      n = n << 1
+    end
+
+    return provenance, pips
+  end
+
+  def show
+    # the #show action can also be called by #compare, which does its own work to set up @pipelines
+    unless defined? @pipelines
+      @pipelines = [@object]
+    end
+
+    provenance, pips = graph(@pipelines)
+    if provenance
+      @prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+        :request => request,
+        :all_script_parameters => true,
+        :combine_jobs => :script_and_version,
+        :pips => pips,
+        :only_components => true,
+        :no_docker => true,
+        :no_log => true}
+    end
+
+    super
+  end
+
+  def compare
+    @breadcrumb_page_name = 'compare'
+
+    @rows = []          # each is {name: S, components: [...]}
+
+    if params['tab_pane'] == "Compare" or params['tab_pane'].nil?
+      # Build a table: x=pipeline y=component
+      @objects.each_with_index do |pi, pi_index|
+        pipeline_jobs(pi).each do |component|
+          # Find a cell with the same name as this component but no
+          # entry for this pipeline
+          target_row = nil
+          @rows.each_with_index do |row, row_index|
+            if row[:name] == component[:name] and !row[:components][pi_index]
+              target_row = row
+            end
+          end
+          if !target_row
+            target_row = {name: component[:name], components: []}
+            @rows << target_row
+          end
+          target_row[:components][pi_index] = component
+        end
+      end
+
+      @rows.each do |row|
+        # Build a "normal" pseudo-component for this row by picking the
+        # most common value for each attribute. If all values are
+        # equally common, there is no "normal".
+        normal = {}              # attr => most common value
+        highscore = {}           # attr => how common "normal" is
+        score = {}               # attr => { value => how common }
+        row[:components].each do |pj|
+          next if pj.nil?
+          pj.each do |k,v|
+            vstr = for_comparison v
+            score[k] ||= {}
+            score[k][vstr] = (score[k][vstr] || 0) + 1
+            highscore[k] ||= 0
+            if score[k][vstr] == highscore[k]
+              # tie for first place = no "normal"
+              normal.delete k
+            elsif score[k][vstr] == highscore[k] + 1
+              # more pipelines have v than anything else
+              highscore[k] = score[k][vstr]
+              normal[k] = vstr
+            end
+          end
+        end
+
+        # Add a hash in component[:is_normal]: { attr => is_the_value_normal? }
+        row[:components].each do |pj|
+          next if pj.nil?
+          pj[:is_normal] = {}
+          pj.each do |k,v|
+            pj[:is_normal][k] = (normal.has_key?(k) && normal[k] == for_comparison(v))
+          end
+        end
+      end
+    end
+
+    if params['tab_pane'] == "Graph"
+      @pipelines = @objects
+    end
+
+    @object = @objects.first
+
+    show
+  end
+
+  def show_pane_list
+    panes = %w(Components Log Graph Advanced)
+    if @object and @object.state.in? ['New', 'Ready']
+      panes = %w(Inputs) + panes - %w(Log)
+    end
+    if not @object.components.values.any? { |x| x[:job] rescue false }
+      panes -= ['Graph']
+    end
+    panes
+  end
+
+  def compare_pane_list
+    %w(Compare Graph)
+  end
+
+  protected
+  def for_comparison v
+    if v.is_a? Hash or v.is_a? Array
+      v.to_json
+    else
+      v.to_s
+    end
+  end
+
+  def load_filters_and_paging_params
+    params[:limit] = 20
+    super
+  end
+
+  def find_objects_by_uuid
+    @objects = model_class.where(uuid: params[:uuids])
+  end
+end
diff --git a/apps/workbench/app/controllers/pipeline_templates_controller.rb b/apps/workbench/app/controllers/pipeline_templates_controller.rb
new file mode 100644 (file)
index 0000000..2b2e9a4
--- /dev/null
@@ -0,0 +1,12 @@
+class PipelineTemplatesController < ApplicationController
+  include PipelineComponentsHelper
+
+  def show
+    @objects = PipelineInstance.where(pipeline_template_uuid: @object.uuid)
+    super
+  end
+
+  def show_pane_list
+    %w(Components Pipelines Advanced)
+  end
+end
diff --git a/apps/workbench/app/controllers/projects_controller.rb b/apps/workbench/app/controllers/projects_controller.rb
new file mode 100644 (file)
index 0000000..600af8d
--- /dev/null
@@ -0,0 +1,340 @@
+class ProjectsController < ApplicationController
+  before_filter :set_share_links, if: -> { defined? @object }
+
+  def model_class
+    Group
+  end
+
+  def find_object_by_uuid
+    if current_user and params[:uuid] == current_user.uuid
+      @object = current_user.dup
+      @object.uuid = current_user.uuid
+      class << @object
+        def name
+          'Home'
+        end
+        def description
+          ''
+        end
+        def attribute_editable? attr, *args
+          case attr
+          when 'description', 'name'
+            false
+          else
+            super
+          end
+        end
+      end
+    else
+      super
+    end
+  end
+
+  def set_share_links
+    @user_is_manager = false
+    @share_links = []
+    if @object.uuid != current_user.uuid
+      begin
+        @share_links = Link.permissions_for(@object)
+        @user_is_manager = true
+      rescue ArvadosApiClient::AccessForbiddenException,
+        ArvadosApiClient::NotFoundException
+      end
+    end
+  end
+
+  def index_pane_list
+    %w(Projects)
+  end
+
+  # Returning an array of hashes instead of an array of strings will allow
+  # us to tell the interface to get counts for each pane (using :filters).
+  # It also seems to me that something like these could be used to configure the contents of the panes.
+  def show_pane_list
+    pane_list = [
+      {
+        :name => 'Data_collections',
+        :filters => [%w(uuid is_a arvados#collection)]
+      },
+      {
+        :name => 'Jobs_and_pipelines',
+        :filters => [%w(uuid is_a) + [%w(arvados#job arvados#pipelineInstance)]]
+      },
+      {
+        :name => 'Pipeline_templates',
+        :filters => [%w(uuid is_a arvados#pipelineTemplate)]
+      },
+      {
+        :name => 'Subprojects',
+        :filters => [%w(uuid is_a arvados#group)]
+      },
+      { :name => 'Other_objects',
+        :filters => [%w(uuid is_a) + [%w(arvados#human arvados#specimen arvados#trait)]]
+      }
+    ]
+    pane_list << { :name => 'Sharing',
+                   :count => @share_links.count } if @user_is_manager
+    pane_list << { :name => 'Advanced' }
+  end
+
+  # Called via AJAX and returns Javascript that populates tab counts into tab titles.
+  # References #show_pane_list action which should return an array of hashes each with :name
+  # and then optionally a :filters to run or a straight up :count
+  #
+  # This action could easily be moved to the ApplicationController to genericize the tab_counts behaviour,
+  # but one or more new routes would have to be created, the js.erb would also have to be moved
+  def tab_counts
+    @tab_counts = {}
+    show_pane_list.each do |pane|
+      if pane.is_a?(Hash)
+        if pane[:count]
+          @tab_counts[pane[:name]] = pane[:count]
+        elsif pane[:filters]
+          @tab_counts[pane[:name]] = @object.contents(filters: pane[:filters]).items_available
+        end
+      end
+    end
+  end
+
+  def remove_item
+    params[:item_uuids] = [params[:item_uuid]]
+    remove_items
+    render template: 'projects/remove_items'
+  end
+
+  def remove_items
+    @removed_uuids = []
+    links = []
+    params[:item_uuids].collect { |uuid| ArvadosBase.find uuid }.each do |item|
+      if (item.class == Link and
+          item.link_class == 'name' and
+          item.tail_uuid == @object.uuid)
+        # Given uuid is a name link, linking an object to this
+        # project. First follow the link to find the item we're removing,
+        # then delete the link.
+        links << item
+        item = ArvadosBase.find item.head_uuid
+      else
+        # Given uuid is an object. Delete all names.
+        links += Link.where(tail_uuid: @object.uuid,
+                            head_uuid: item.uuid,
+                            link_class: 'name')
+      end
+      links.each do |link|
+        @removed_uuids << link.uuid
+        link.destroy
+      end
+      if item.owner_uuid == @object.uuid
+        # Object is owned by this project. Remove it from the project by
+        # changing owner to the current user.
+        begin
+          item.update_attributes owner_uuid: current_user.uuid
+          @removed_uuids << item.uuid
+        rescue ArvadosApiClient::ApiErrorResponseException => e
+          if e.message.include? 'collection_owner_uuid_name_unique'
+            rename_to = item.name + ' removed from ' +
+                        (@object.name ? @object.name : @object.uuid) +
+                        ' at ' + Time.now.to_s
+            updates = {}
+            updates[:name] = rename_to
+            updates[:owner_uuid] = current_user.uuid
+            item.update_attributes updates
+            @removed_uuids << item.uuid
+          else
+            raise
+          end
+        end
+      end
+    end
+  end
+
+  def destroy
+    while (objects = Link.filter([['owner_uuid','=',@object.uuid],
+                                  ['tail_uuid','=',@object.uuid]])).any?
+      objects.each do |object|
+        object.destroy
+      end
+    end
+    while (objects = @object.contents(include_linked: false)).any?
+      objects.each do |object|
+        object.update_attributes! owner_uuid: current_user.uuid
+      end
+    end
+    if ArvadosBase::resource_class_for_uuid(@object.owner_uuid) == Group
+      params[:return_to] ||= group_path(@object.owner_uuid)
+    else
+      params[:return_to] ||= projects_path
+    end
+    super
+  end
+
+  def find_objects_for_index
+    @objects = all_projects
+    super
+  end
+
+  def load_contents_objects kinds=[]
+    kind_filters = @filters.select do |attr,op,val|
+      op == 'is_a' and val.is_a? Array and val.count > 1
+    end
+    if /^created_at\b/ =~ @order[0] and kind_filters.count == 1
+      # If filtering on multiple types and sorting by date: Get the
+      # first page of each type, sort the entire set, truncate to one
+      # page, and use the last item on this page as a filter for
+      # retrieving the next page. Ideally the API would do this for
+      # us, but it doesn't (yet).
+
+      # To avoid losing items that have the same created_at as the
+      # last item on this page, we retrieve an overlapping page with a
+      # "created_at <= last_created_at" filter, then remove duplicates
+      # with a "uuid not in [...]" filter (see below).
+      nextpage_operator = /\bdesc$/i =~ @order[0] ? '<=' : '>='
+
+      @objects = []
+      @name_link_for = {}
+      kind_filters.each do |attr,op,val|
+        (val.is_a?(Array) ? val : [val]).each do |type|
+          objects = @object.contents(order: @order,
+                                     limit: @limit,
+                                     include_linked: true,
+                                     filters: (@filters - kind_filters + [['uuid', 'is_a', type]]),
+                                    )
+          objects.each do |object|
+            @name_link_for[object.andand.uuid] = objects.links_for(object, 'name').first
+          end
+          @objects += objects
+        end
+      end
+      @objects = @objects.to_a.sort_by(&:created_at)
+      @objects.reverse! if nextpage_operator == '<='
+      @objects = @objects[0..@limit-1]
+      @next_page_filters = @filters.reject do |attr,op,val|
+        (attr == 'created_at' and op == nextpage_operator) or
+          (attr == 'uuid' and op == 'not in')
+      end
+
+      if @objects.any?
+        last_created_at = @objects.last.created_at
+
+        last_uuids = []
+        @objects.each do |obj|
+          last_uuids << obj.uuid if obj.created_at.eql?(last_created_at)
+        end
+
+        @next_page_filters += [['created_at',
+                                nextpage_operator,
+                                last_created_at]]
+        @next_page_filters += [['uuid', 'not in', last_uuids]]
+        @next_page_href = url_for(partial: :contents_rows,
+                                  limit: @limit,
+                                  filters: @next_page_filters.to_json)
+      else
+        @next_page_href = nil
+      end
+    else
+      @objects = @object.contents(order: @order,
+                                  limit: @limit,
+                                  include_linked: true,
+                                  filters: @filters,
+                                  offset: @offset)
+      @next_page_href = next_page_href(partial: :contents_rows,
+                                       filters: @filters.to_json,
+                                       order: @order.to_json)
+    end
+
+    preload_links_for_objects(@objects.to_a)
+  end
+
+  def show
+    if !@object
+      return render_not_found("object not found")
+    end
+
+    if params[:partial]
+      load_contents_objects
+      respond_to do |f|
+        f.json {
+          render json: {
+            content: render_to_string(partial: 'show_contents_rows.html',
+                                      formats: [:html]),
+            next_page_href: @next_page_href
+          }
+        }
+      end
+    else
+      @objects = []
+      super
+    end
+  end
+
+  def create
+    @new_resource_attrs = (params['project'] || {}).merge(group_class: 'project')
+    @new_resource_attrs[:name] ||= 'New project'
+    super
+  end
+
+  def update
+    @updates = params['project']
+    super
+  end
+
+  helper_method :get_objects_and_names
+  def get_objects_and_names(objects=nil)
+    objects = @objects if objects.nil?
+    objects_and_names = []
+    objects.each do |object|
+      if objects.respond_to? :links_for and
+          !(name_links = objects.links_for(object, 'name')).empty?
+        name_links.each do |name_link|
+          objects_and_names << [object, name_link]
+        end
+      elsif @name_link_for.andand[object.uuid]
+        objects_and_names << [object, @name_link_for[object.uuid]]
+      elsif object.respond_to? :name
+        objects_and_names << [object, object]
+      else
+        objects_and_names << [object,
+                               Link.new(owner_uuid: @object.uuid,
+                                        tail_uuid: @object.uuid,
+                                        head_uuid: object.uuid,
+                                        link_class: "name",
+                                        name: "")]
+
+      end
+    end
+    objects_and_names
+  end
+
+  def share_with
+    if not params[:uuids].andand.any?
+      @errors = ["No user/group UUIDs specified to share with."]
+      return render_error(status: 422)
+    end
+    results = {"success" => [], "errors" => []}
+    params[:uuids].each do |shared_uuid|
+      begin
+        Link.create(tail_uuid: shared_uuid, link_class: "permission",
+                    name: "can_read", head_uuid: @object.uuid)
+      rescue ArvadosApiClient::ApiError => error
+        error_list = error.api_response.andand[:errors]
+        if error_list.andand.any?
+          results["errors"] += error_list.map { |e| "#{shared_uuid}: #{e}" }
+        else
+          error_code = error.api_status || "Bad status"
+          results["errors"] << "#{shared_uuid}: #{error_code} response"
+        end
+      else
+        results["success"] << shared_uuid
+      end
+    end
+    if results["errors"].empty?
+      results.delete("errors")
+      status = 200
+    else
+      status = 422
+    end
+    respond_to do |f|
+      f.json { render(json: results, status: status) }
+    end
+  end
+end
diff --git a/apps/workbench/app/controllers/repositories_controller.rb b/apps/workbench/app/controllers/repositories_controller.rb
new file mode 100644 (file)
index 0000000..b6b3295
--- /dev/null
@@ -0,0 +1,5 @@
+class RepositoriesController < ApplicationController
+  def index_pane_list
+    %w(recent help)
+  end
+end
diff --git a/apps/workbench/app/controllers/search_controller.rb b/apps/workbench/app/controllers/search_controller.rb
new file mode 100644 (file)
index 0000000..9e2ff1b
--- /dev/null
@@ -0,0 +1,28 @@
+class SearchController < ApplicationController
+  def find_objects_for_index
+    search_what = Group
+    if params[:project_uuid]
+      # Special case for "search all things in project":
+      @filters = @filters.select do |attr, operator, operand|
+        not (attr == 'owner_uuid' and operator == '=')
+      end
+      # Special case for project_uuid is a user uuid:
+      if ArvadosBase::resource_class_for_uuid(params[:project_uuid]) == User
+        search_what = User.find params[:project_uuid]
+      else
+        search_what = Group.find params[:project_uuid]
+      end
+    end
+    @objects = search_what.contents(limit: @limit,
+                                    offset: @offset,
+                                    filters: @filters,
+                                    include_linked: true)
+    super
+  end
+
+  def next_page_href with_params={}
+    super with_params.merge(last_object_class: @objects.last.class.to_s,
+                            project_uuid: params[:project_uuid],
+                            filters: @filters.to_json)
+  end
+end
diff --git a/apps/workbench/app/controllers/sessions_controller.rb b/apps/workbench/app/controllers/sessions_controller.rb
new file mode 100644 (file)
index 0000000..97c8d5a
--- /dev/null
@@ -0,0 +1,15 @@
+class SessionsController < ApplicationController
+  skip_around_filter :require_thread_api_token, :only => [:destroy, :index]
+  skip_around_filter :set_thread_api_token, :only => [:destroy, :index]
+  skip_before_filter :find_object_by_uuid, :only => [:destroy, :index]
+
+  def destroy
+    session.clear
+    redirect_to arvados_api_client.arvados_logout_url(return_to: root_url)
+  end
+
+  def index
+    redirect_to root_url if session[:arvados_api_token]
+    render_index
+  end
+end
diff --git a/apps/workbench/app/controllers/specimens_controller.rb b/apps/workbench/app/controllers/specimens_controller.rb
new file mode 100644 (file)
index 0000000..47dd8a2
--- /dev/null
@@ -0,0 +1,2 @@
+class SpecimensController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/traits_controller.rb b/apps/workbench/app/controllers/traits_controller.rb
new file mode 100644 (file)
index 0000000..38f33a3
--- /dev/null
@@ -0,0 +1,2 @@
+class TraitsController < ApplicationController
+end
diff --git a/apps/workbench/app/controllers/user_agreements_controller.rb b/apps/workbench/app/controllers/user_agreements_controller.rb
new file mode 100644 (file)
index 0000000..bec11f2
--- /dev/null
@@ -0,0 +1,25 @@
+class UserAgreementsController < ApplicationController
+  skip_before_filter :check_user_agreements
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :check_user_profile
+
+  def index
+    if unsigned_user_agreements.empty?
+      redirect_to(params[:return_to] || :back)
+    end
+  end
+
+  def model_class
+    Collection
+  end
+
+  def sign
+    params[:checked].each do |checked|
+      if (r = CollectionsHelper.match_uuid_with_optional_filepath(checked))
+        UserAgreement.sign uuid: r[1]
+      end
+    end
+    current_user.activate
+    redirect_to(params[:return_to] || :back)
+  end
+end
diff --git a/apps/workbench/app/controllers/users_controller.rb b/apps/workbench/app/controllers/users_controller.rb
new file mode 100644 (file)
index 0000000..43a8895
--- /dev/null
@@ -0,0 +1,353 @@
+class UsersController < ApplicationController
+  skip_around_filter :require_thread_api_token, only: :welcome
+  skip_before_filter :check_user_agreements, only: [:welcome, :inactive]
+  skip_before_filter :check_user_profile, only: [:welcome, :inactive, :profile]
+  skip_before_filter :find_object_by_uuid, only: [:welcome, :activity, :storage]
+  before_filter :ensure_current_user_is_admin, only: [:sudo, :unsetup, :setup]
+
+  def show
+    if params[:uuid] == current_user.uuid
+      respond_to do |f|
+        f.html do
+          redirect_to(params[:return_to] || project_path(params[:uuid]))
+        end
+      end
+    else
+      super
+    end
+  end
+
+  def welcome
+    if current_user
+      redirect_to (params[:return_to] || '/')
+    end
+  end
+
+  def inactive
+    if current_user.andand.is_invited
+      redirect_to (params[:return_to] || '/')
+    end
+  end
+
+  def profile
+    params[:offer_return_to] ||= params[:return_to]
+  end
+
+  def activity
+    @breadcrumb_page_name = nil
+    @users = User.limit(params[:limit])
+    @user_activity = {}
+    @activity = {
+      logins: {},
+      jobs: {},
+      pipeline_instances: {}
+    }
+    @total_activity = {}
+    @spans = [['This week', Time.now.beginning_of_week, Time.now],
+              ['Last week',
+               Time.now.beginning_of_week.advance(weeks:-1),
+               Time.now.beginning_of_week],
+              ['This month', Time.now.beginning_of_month, Time.now],
+              ['Last month',
+               1.month.ago.beginning_of_month,
+               Time.now.beginning_of_month]]
+    @spans.each do |span, threshold_start, threshold_end|
+      @activity[:logins][span] = Log.
+        filter([[:event_type, '=', 'login'],
+                [:object_kind, '=', 'arvados#user'],
+                [:created_at, '>=', threshold_start],
+                [:created_at, '<', threshold_end]])
+      @activity[:jobs][span] = Job.
+        filter([[:created_at, '>=', threshold_start],
+                [:created_at, '<', threshold_end]])
+      @activity[:pipeline_instances][span] = PipelineInstance.
+        filter([[:created_at, '>=', threshold_start],
+                [:created_at, '<', threshold_end]])
+      @activity.each do |type, act|
+        records = act[span]
+        @users.each do |u|
+          @user_activity[u.uuid] ||= {}
+          @user_activity[u.uuid][span + ' ' + type.to_s] ||= 0
+        end
+        records.each do |record|
+          @user_activity[record.modified_by_user_uuid] ||= {}
+          @user_activity[record.modified_by_user_uuid][span + ' ' + type.to_s] ||= 0
+          @user_activity[record.modified_by_user_uuid][span + ' ' + type.to_s] += 1
+          @total_activity[span + ' ' + type.to_s] ||= 0
+          @total_activity[span + ' ' + type.to_s] += 1
+        end
+      end
+    end
+    @users = @users.sort_by do |a|
+      [-@user_activity[a.uuid].values.inject(:+), a.full_name]
+    end
+    # Prepend a "Total" pseudo-user to the sorted list
+    @user_activity[nil] = @total_activity
+    @users = [OpenStruct.new(uuid: nil)] + @users
+  end
+
+  def storage
+    @breadcrumb_page_name = nil
+    @users = User.limit(params[:limit])
+    @user_storage = {}
+    total_storage = {}
+    @log_date = {}
+    @users.each do |u|
+      @user_storage[u.uuid] ||= {}
+      storage_log = Log.
+        filter([[:object_uuid, '=', u.uuid],
+                [:event_type, '=', 'user-storage-report']]).
+        order(:created_at => :desc).
+        limit(1)
+      storage_log.each do |log_entry|
+        # We expect this block to only execute once since we specified limit(1)
+        @user_storage[u.uuid] = log_entry['properties']
+        @log_date[u.uuid] = log_entry['event_at']
+      end
+      total_storage.merge!(@user_storage[u.uuid]) { |k,v1,v2| v1 + v2 }
+    end
+    @users = @users.sort_by { |u|
+      [-@user_storage[u.uuid].values.push(0).inject(:+), u.full_name]}
+    # Prepend a "Total" pseudo-user to the sorted list
+    @users = [OpenStruct.new(uuid: nil)] + @users
+    @user_storage[nil] = total_storage
+  end
+
+  def show_pane_list
+    if current_user.andand.is_admin
+      super | %w(Admin)
+    else
+      super
+    end
+  end
+
+  def index_pane_list
+    if current_user.andand.is_admin
+      super | %w(Activity)
+    else
+      super
+    end
+  end
+
+  def sudo
+    resp = arvados_api_client.api(ApiClientAuthorization, '', {
+                                    api_client_authorization: {
+                                      owner_uuid: @object.uuid
+                                    }
+                                  })
+    redirect_to root_url(api_token: resp[:api_token])
+  end
+
+  def home
+    @showallalerts = false
+    @my_ssh_keys = AuthorizedKey.where(authorized_user_uuid: current_user.uuid)
+    @my_tag_links = {}
+
+    @my_jobs = Job.
+      limit(10).
+      order('created_at desc').
+      where(created_by: current_user.uuid)
+
+    @my_collections = Collection.
+      limit(10).
+      order('created_at desc').
+      where(created_by: current_user.uuid)
+    collection_uuids = @my_collections.collect &:uuid
+
+    @persist_state = {}
+    collection_uuids.each do |uuid|
+      @persist_state[uuid] = 'cache'
+    end
+
+    Link.filter([['head_uuid', 'in', collection_uuids],
+                             ['link_class', 'in', ['tag', 'resources']]]).
+      each do |link|
+      case link.link_class
+      when 'tag'
+        (@my_tag_links[link.head_uuid] ||= []) << link
+      when 'resources'
+        if link.name == 'wants'
+          @persist_state[link.head_uuid] = 'persistent'
+        end
+      end
+    end
+
+    @my_pipelines = PipelineInstance.
+      limit(10).
+      order('created_at desc').
+      where(created_by: current_user.uuid)
+
+    respond_to do |f|
+      f.js { render template: 'users/home.js' }
+      f.html { render template: 'users/home' }
+    end
+  end
+
+  def unsetup
+    if current_user.andand.is_admin
+      @object.unsetup
+    end
+    show
+  end
+
+  def setup
+    respond_to do |format|
+      if current_user.andand.is_admin
+        setup_params = {}
+        setup_params[:send_notification_email] = "#{Rails.configuration.send_user_setup_notification_email}"
+        if params['user_uuid'] && params['user_uuid'].size>0
+          setup_params[:uuid] = params['user_uuid']
+        end
+        if params['email'] && params['email'].size>0
+          user = {email: params['email']}
+          setup_params[:user] = user
+        end
+        if params['openid_prefix'] && params['openid_prefix'].size>0
+          setup_params[:openid_prefix] = params['openid_prefix']
+        end
+        if params['repo_name'] && params['repo_name'].size>0
+          setup_params[:repo_name] = params['repo_name']
+        end
+        if params['vm_uuid'] && params['vm_uuid'].size>0
+          setup_params[:vm_uuid] = params['vm_uuid']
+        end
+
+        if User.setup setup_params
+          format.js
+        else
+          self.render_error status: 422
+        end
+      else
+        self.render_error status: 422
+      end
+    end
+  end
+
+  def setup_popup
+    @vms = VirtualMachine.all.results
+
+    @current_selections = find_current_links @object
+
+    respond_to do |format|
+      format.html
+      format.js
+    end
+  end
+
+  def manage_account
+    # repositories current user can read / write
+    repo_links = Link.
+      filter([['head_uuid', 'is_a', 'arvados#repository'],
+              ['tail_uuid', '=', current_user.uuid],
+              ['link_class', '=', 'permission'],
+             ])
+    @my_repositories = Repository.where uuid: repo_links.collect(&:head_uuid)
+    @repo_writable = {}
+    repo_links.each do |link|
+      if link.name.in? ['can_write', 'can_manage']
+        @repo_writable[link.head_uuid] = true
+      end
+    end
+
+    # virtual machines the current user can login into
+    @my_vm_logins = {}
+    Link.where(tail_uuid: current_user.uuid,
+               link_class: 'permission',
+               name: 'can_login').
+          each do |perm_link|
+            if perm_link.properties.andand[:username]
+              @my_vm_logins[perm_link.head_uuid] ||= []
+              @my_vm_logins[perm_link.head_uuid] << perm_link.properties[:username]
+            end
+          end
+    @my_virtual_machines = VirtualMachine.where(uuid: @my_vm_logins.keys)
+
+    # current user's ssh keys
+    @my_ssh_keys = AuthorizedKey.where(key_type: 'SSH', owner_uuid: current_user.uuid)
+
+    respond_to do |f|
+      f.html { render template: 'users/manage_account' }
+    end
+  end
+
+  def add_ssh_key_popup
+    respond_to do |format|
+      format.html
+      format.js
+    end
+  end
+
+  def add_ssh_key
+    respond_to do |format|
+      key_params = {'key_type' => 'SSH'}
+      key_params['authorized_user_uuid'] = current_user.uuid
+
+      if params['name'] && params['name'].size>0
+        key_params['name'] = params['name'].strip
+      end
+      if params['public_key'] && params['public_key'].size>0
+        key_params['public_key'] = params['public_key'].strip
+      end
+
+      if !key_params['name'] && params['public_key'].andand.size>0
+        split_key = key_params['public_key'].split
+        key_params['name'] = split_key[-1] if (split_key.size == 3)
+      end
+
+      new_key = AuthorizedKey.create! key_params
+      if new_key
+        format.js
+      else
+        self.render_error status: 422
+      end
+    end
+  end
+
+  protected
+
+  def find_current_links user
+    current_selections = {}
+
+    if !user
+      return current_selections
+    end
+
+    # oid login perm
+    oid_login_perms = Link.where(tail_uuid: user.email,
+                                   head_kind: 'arvados#user',
+                                   link_class: 'permission',
+                                   name: 'can_login')
+
+    if oid_login_perms.any?
+      prefix_properties = oid_login_perms.first.properties
+      current_selections[:identity_url_prefix] = prefix_properties[:identity_url_prefix]
+    end
+
+    # repo perm
+    repo_perms = Link.where(tail_uuid: user.uuid,
+                            head_kind: 'arvados#repository',
+                            link_class: 'permission',
+                            name: 'can_write')
+    if repo_perms.any?
+      repo_uuid = repo_perms.first.head_uuid
+      repos = Repository.where(head_uuid: repo_uuid)
+      if repos.any?
+        repo_name = repos.first.name
+        current_selections[:repo_name] = repo_name
+      end
+    end
+
+    # vm login perm
+    vm_login_perms = Link.where(tail_uuid: user.uuid,
+                              head_kind: 'arvados#virtualMachine',
+                              link_class: 'permission',
+                              name: 'can_login')
+    if vm_login_perms.any?
+      vm_uuid = vm_login_perms.first.head_uuid
+      current_selections[:vm_uuid] = vm_uuid
+    end
+
+    return current_selections
+  end
+
+end
diff --git a/apps/workbench/app/controllers/virtual_machines_controller.rb b/apps/workbench/app/controllers/virtual_machines_controller.rb
new file mode 100644 (file)
index 0000000..a62ba81
--- /dev/null
@@ -0,0 +1,22 @@
+class VirtualMachinesController < ApplicationController
+  def index
+    @objects ||= model_class.all
+    @vm_logins = {}
+    if @objects.andand.first
+      Link.where(tail_uuid: current_user.uuid,
+                 head_uuid: @objects.collect(&:uuid),
+                 link_class: 'permission',
+                 name: 'can_login').
+        each do |perm_link|
+        if perm_link.properties.andand[:username]
+          @vm_logins[perm_link.head_uuid] ||= []
+          @vm_logins[perm_link.head_uuid] << perm_link.properties[:username]
+        end
+      end
+      @objects.each do |vm|
+        vm.current_user_logins = @vm_logins[vm.uuid].andand.compact || []
+      end
+    end
+    super
+  end
+end
diff --git a/apps/workbench/app/controllers/websocket_controller.rb b/apps/workbench/app/controllers/websocket_controller.rb
new file mode 100644 (file)
index 0000000..a49c15f
--- /dev/null
@@ -0,0 +1,10 @@
+class WebsocketController < ApplicationController
+  skip_before_filter :find_objects_for_index
+
+  def index
+  end
+
+  def model_class
+    "Websocket"
+  end
+end
diff --git a/apps/workbench/app/helpers/application_helper.rb b/apps/workbench/app/helpers/application_helper.rb
new file mode 100644 (file)
index 0000000..f1502af
--- /dev/null
@@ -0,0 +1,422 @@
+module ApplicationHelper
+  def current_user
+    controller.current_user
+  end
+
+  def self.match_uuid(uuid)
+    /^([0-9a-z]{5})-([0-9a-z]{5})-([0-9a-z]{15})$/.match(uuid.to_s)
+  end
+
+  def current_api_host
+    Rails.configuration.arvados_v1_base.gsub /https?:\/\/|\/arvados\/v1/,''
+  end
+
+  def render_markup(markup)
+    raw RedCloth.new(markup.to_s).to_html(:refs_arvados, :textile) if markup
+  end
+
+  def human_readable_bytes_html(n)
+    return h(n) unless n.is_a? Fixnum
+    return "0 bytes" if (n == 0)
+
+    orders = {
+      1 => "bytes",
+      1024 => "KiB",
+      (1024*1024) => "MiB",
+      (1024*1024*1024) => "GiB",
+      (1024*1024*1024*1024) => "TiB"
+    }
+
+    orders.each do |k, v|
+      sig = (n.to_f/k)
+      if sig >=1 and sig < 1024
+        if v == 'bytes'
+          return "%i #{v}" % sig
+        else
+          return "%0.1f #{v}" % sig
+        end
+      end
+    end
+
+    return h(n)
+    #raw = n.to_s
+    #cooked = ''
+    #while raw.length > 3
+    #  cooked = ',' + raw[-3..-1] + cooked
+    #  raw = raw[0..-4]
+    #end
+    #cooked = raw + cooked
+  end
+
+  def resource_class_for_uuid(attrvalue, opts={})
+    ArvadosBase::resource_class_for_uuid(attrvalue, opts)
+  end
+
+  ##
+  # Returns HTML that links to the Arvados object specified in +attrvalue+
+  # Provides various output control and styling options.
+  #
+  # +attrvalue+ an Arvados model object or uuid
+  #
+  # +opts+ a set of flags to control output:
+  #
+  # [:link_text] the link text to use (may include HTML), overrides everything else
+  #
+  # [:friendly_name] whether to use the "friendly" name in the link text (by
+  # calling #friendly_link_name on the object), otherwise use the uuid
+  #
+  # [:with_class_name] prefix the link text with the class name of the model
+  #
+  # [:no_tags] disable tags in the link text (default is to show tags).
+  # Currently tags are only shown for Collections.
+  #
+  # [:thumbnail] if the object is a collection, show an image thumbnail if the
+  # collection consists of a single image file.
+  #
+  # [:no_link] don't create a link, just return the link text
+  #
+  # +style_opts+ additional HTML properties for the anchor tag, passed to link_to
+  #
+  def link_to_if_arvados_object(attrvalue, opts={}, style_opts={})
+    if (resource_class = resource_class_for_uuid(attrvalue, opts))
+      if attrvalue.is_a? ArvadosBase
+        object = attrvalue
+        link_uuid = attrvalue.uuid
+      else
+        object = nil
+        link_uuid = attrvalue
+      end
+      link_name = opts[:link_text]
+      tags = ""
+      if !link_name
+        link_name = object.andand.default_name || resource_class.default_name
+
+        if opts[:friendly_name]
+          if attrvalue.respond_to? :friendly_link_name
+            link_name = attrvalue.friendly_link_name opts[:lookup]
+          else
+            begin
+              if resource_class.name == 'Collection'
+                link_name = collections_for_object(link_uuid).andand.first.andand.friendly_link_name
+              else
+                link_name = object_for_dataclass(resource_class, link_uuid).andand.friendly_link_name
+              end
+            rescue ArvadosApiClient::NotFoundException
+              # If that lookup failed, the link will too. So don't make one.
+              return attrvalue
+            end
+          end
+        end
+        if link_name.nil? or link_name.empty?
+          link_name = attrvalue
+        end
+        if opts[:with_class_name]
+          link_name = "#{resource_class.to_s}: #{link_name}"
+        end
+        if !opts[:no_tags] and resource_class == Collection
+          links_for_object(link_uuid).each do |tag|
+            if tag.link_class.in? ["tag", "identifier"]
+              tags += ' <span class="label label-info">'
+              tags += link_to tag.name, controller: "links", filters: [["link_class", "=", "tag"], ["name", "=", tag.name]].to_json
+              tags += '</span>'
+            end
+          end
+        end
+        if opts[:thumbnail] and resource_class == Collection
+          # add an image thumbnail if the collection consists of a single image file.
+          collections_for_object(link_uuid).each do |c|
+            if c.files.length == 1 and CollectionsHelper::is_image c.files.first[1]
+              link_name += " "
+              link_name += image_tag "#{url_for c}/#{CollectionsHelper::file_path c.files.first}", style: "height: 4em; width: auto"
+            end
+          end
+        end
+      end
+      style_opts[:class] = (style_opts[:class] || '') + ' nowrap'
+      if opts[:no_link]
+        raw(link_name)
+      else
+        (link_to raw(link_name), { controller: resource_class.to_s.tableize, action: 'show', id: ((opts[:name_link].andand.uuid) || link_uuid) }, style_opts) + raw(tags)
+      end
+    else
+      # just return attrvalue if it is not recognizable as an Arvados object or uuid.
+      if attrvalue.nil? or (attrvalue.is_a? String and attrvalue.empty?)
+        "(none)"
+      else
+        attrvalue
+      end
+    end
+  end
+
+  def render_editable_attribute(object, attr, attrvalue=nil, htmloptions={})
+    attrvalue = object.send(attr) if attrvalue.nil?
+    if not object.attribute_editable?(attr)
+      if attrvalue && attrvalue.length > 0
+        return render_attribute_as_textile( object, attr, attrvalue, false )
+      else
+        return (attr == 'name' and object.andand.default_name) ||
+                '(none)'
+      end
+    end
+
+    input_type = 'text'
+    case object.class.attribute_info[attr.to_sym].andand[:type]
+    when 'text'
+      input_type = 'textarea'
+    when 'datetime'
+      input_type = 'date'
+    else
+      input_type = 'text'
+    end
+
+    attrvalue = attrvalue.to_json if attrvalue.is_a? Hash or attrvalue.is_a? Array
+    rendervalue = render_attribute_as_textile( object, attr, attrvalue, false )
+
+    ajax_options = {
+      "data-pk" => {
+        id: object.uuid,
+        key: object.class.to_s.underscore
+      }
+    }
+    if object.uuid
+      ajax_options['data-url'] = url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore)
+    else
+      ajax_options['data-url'] = url_for(action: "create", controller: object.class.to_s.pluralize.underscore)
+      ajax_options['data-pk'][:defaults] = object.attributes
+    end
+    ajax_options['data-pk'] = ajax_options['data-pk'].to_json
+    @unique_id ||= (Time.now.to_f*1000000).to_i
+    span_id = object.uuid.to_s + '-' + attr.to_s + '-' + (@unique_id += 1).to_s
+
+    span_tag = content_tag 'span', rendervalue, {
+      "data-emptytext" => '(none)',
+      "data-placement" => "bottom",
+      "data-type" => input_type,
+      "data-title" => "Edit #{attr.to_s.gsub '_', ' '}",
+      "data-name" => attr,
+      "data-object-uuid" => object.uuid,
+      "data-toggle" => "manual",
+      "data-value" => attrvalue,
+      "id" => span_id,
+      :class => "editable #{is_textile?( object, attr ) ? 'editable-textile' : ''}"
+    }.merge(htmloptions).merge(ajax_options)
+    edit_button = raw('<a href="#" class="btn btn-xs btn-default btn-nodecorate" data-toggle="x-editable tooltip" data-toggle-selector="#' + span_id + '" data-placement="top" title="' + (htmloptions[:tiptitle] || 'edit') + '"><i class="fa fa-fw fa-pencil"></i></a>')
+    if htmloptions[:btnplacement] == :left
+      edit_button + ' ' + span_tag
+    else
+      span_tag + ' ' + edit_button
+    end
+  end
+
+  def render_pipeline_component_attribute(object, attr, subattr, value_info, htmloptions={})
+    datatype = nil
+    required = true
+    attrvalue = value_info
+
+    if value_info.is_a? Hash
+      if value_info[:output_of]
+        return raw("<span class='label label-default'>#{value_info[:output_of]}</span>")
+      end
+      if value_info[:dataclass]
+        dataclass = value_info[:dataclass]
+      end
+      if value_info[:optional] != nil
+        required = (value_info[:optional] != "true")
+      end
+      if value_info[:required] != nil
+        required = value_info[:required]
+      end
+
+      # Pick a suitable attrvalue to show as the current value (i.e.,
+      # the one that would be used if we ran the pipeline right now).
+      if value_info[:value]
+        attrvalue = value_info[:value]
+      elsif value_info[:default]
+        attrvalue = value_info[:default]
+      else
+        attrvalue = ''
+      end
+      preconfigured_search_str = value_info[:search_for]
+    end
+
+    if not object.andand.attribute_editable?(attr)
+      return link_to_if_arvados_object attrvalue
+    end
+
+    if dataclass
+      begin
+        dataclass = dataclass.constantize
+      rescue NameError
+      end
+    else
+      dataclass = ArvadosBase.resource_class_for_uuid(attrvalue)
+    end
+
+    id = "#{object.uuid}-#{subattr.join('-')}"
+    dn = "[#{attr}]"
+    subattr.each do |a|
+      dn += "[#{a}]"
+    end
+    if value_info.is_a? Hash
+      dn += '[value]'
+    end
+
+    if (dataclass == Collection) or (dataclass == File)
+      selection_param = object.class.to_s.underscore + dn
+      display_value = attrvalue
+      if value_info.is_a?(Hash)
+        if (link = Link.find? value_info[:link_uuid])
+          display_value = link.name
+        elsif value_info[:link_name]
+          display_value = value_info[:link_name]
+        elsif value_info[:selection_name]
+          display_value = value_info[:selection_name]
+        end
+      end
+      if (attr == :components) and (subattr.size > 2)
+        chooser_title = "Choose a #{dataclass == Collection ? 'dataset' : 'file'} for #{object.component_input_title(subattr[0], subattr[2])}:"
+      else
+        chooser_title = "Choose a #{dataclass == Collection ? 'dataset' : 'file'}:"
+      end
+      modal_path = choose_collections_path \
+      ({ title: chooser_title,
+         filters: [['owner_uuid', '=', object.owner_uuid]].to_json,
+         action_name: 'OK',
+         action_href: pipeline_instance_path(id: object.uuid),
+         action_method: 'patch',
+         preconfigured_search_str: (preconfigured_search_str || ""),
+         action_data: {
+           merge: true,
+           use_preview_selection: dataclass == File ? true : nil,
+           selection_param: selection_param,
+           success: 'page-refresh'
+         }.to_json,
+        })
+      return content_tag('div', :class => 'input-group') do
+        html = text_field_tag(dn, display_value,
+                              :class =>
+                              "form-control #{'required' if required}")
+        html + content_tag('span', :class => 'input-group-btn') do
+          link_to('Choose',
+                  modal_path,
+                  { :class => "btn btn-primary",
+                    :remote => true,
+                    :method => 'get',
+                  })
+        end
+      end
+    end
+
+    if dataclass == 'number' or attrvalue.is_a? Fixnum or attrvalue.is_a? Float
+      datatype = 'number'
+    elsif attrvalue.is_a? String
+      datatype = 'text'
+    elsif attrvalue.is_a?(Array) or dataclass.andand.is_a?(Class)
+      # TODO: find a way to edit with x-editable
+      return attrvalue
+    end
+
+    # When datatype is a String or Fixnum, link_to the attrvalue
+    lt = link_to attrvalue, '#', {
+      "data-emptytext" => "none",
+      "data-placement" => "bottom",
+      "data-type" => datatype,
+      "data-url" => url_for(action: "update", id: object.uuid, controller: object.class.to_s.pluralize.underscore, merge: true),
+      "data-title" => "Set value for #{subattr[-1].to_s}",
+      "data-name" => dn,
+      "data-pk" => "{id: \"#{object.uuid}\", key: \"#{object.class.to_s.underscore}\"}",
+      "data-value" => attrvalue,
+      # "clear" button interferes with form-control's up/down arrows
+      "data-clear" => false,
+      :class => "editable #{'required' if required} form-control",
+      :id => id
+    }.merge(htmloptions)
+
+    lt
+  end
+
+  def render_arvados_object_list_start(list, button_text, button_href,
+                                       params={}, *rest, &block)
+    show_max = params.delete(:show_max) || 3
+    params[:class] ||= 'btn btn-xs btn-default'
+    list[0...show_max].each { |item| yield item }
+    unless list[show_max].nil?
+      link_to(h(button_text) +
+              raw(' &nbsp; <i class="fa fa-fw fa-arrow-circle-right"></i>'),
+              button_href, params, *rest)
+    end
+  end
+
+  def render_controller_partial partial, opts
+    cname = opts.delete :controller_name
+    begin
+      render opts.merge(partial: "#{cname}/#{partial}")
+    rescue ActionView::MissingTemplate
+      render opts.merge(partial: "application/#{partial}")
+    end
+  end
+
+  RESOURCE_CLASS_ICONS = {
+    "Collection" => "fa-archive",
+    "Group" => "fa-users",
+    "Human" => "fa-male",  # FIXME: Use a more inclusive icon.
+    "Job" => "fa-gears",
+    "KeepDisk" => "fa-hdd-o",
+    "KeepService" => "fa-exchange",
+    "Link" => "fa-arrows-h",
+    "Node" => "fa-cloud",
+    "PipelineInstance" => "fa-gears",
+    "PipelineTemplate" => "fa-gears",
+    "Repository" => "fa-code-fork",
+    "Specimen" => "fa-flask",
+    "Trait" => "fa-clipboard",
+    "User" => "fa-user",
+    "VirtualMachine" => "fa-terminal",
+  }
+  DEFAULT_ICON_CLASS = "fa-cube"
+
+  def fa_icon_class_for_class(resource_class, default=DEFAULT_ICON_CLASS)
+    RESOURCE_CLASS_ICONS.fetch(resource_class.to_s, default)
+  end
+
+  def fa_icon_class_for_uuid(uuid, default=DEFAULT_ICON_CLASS)
+    fa_icon_class_for_class(resource_class_for_uuid(uuid), default)
+  end
+
+  def fa_icon_class_for_object(object, default=DEFAULT_ICON_CLASS)
+    case class_name = object.class.to_s
+    when "Group"
+      object.group_class ? 'fa-folder' : 'fa-users'
+    else
+      RESOURCE_CLASS_ICONS.fetch(class_name, default)
+    end
+  end
+
+  def chooser_preview_url_for object, use_preview_selection=false
+    case object.class.to_s
+    when 'Collection'
+      polymorphic_path(object, tab_pane: 'chooser_preview', use_preview_selection: use_preview_selection)
+    else
+      nil
+    end
+  end
+
+  def render_attribute_as_textile( object, attr, attrvalue, truncate )
+    if attrvalue && (is_textile? object, attr)
+      markup = render_markup attrvalue
+      markup = markup[0,markup.index('</p>')+4] if (truncate && markup.index('</p>'))
+      return markup
+    else
+      return attrvalue
+    end
+  end
+
+  def render_localized_date(date, opts="")
+    raw("<span class='utc-date' data-utc-date='#{date}' data-utc-date-opts='noseconds'>#{date}</span>")
+  end
+
+private
+  def is_textile?( object, attr )
+    is_textile = object.textile_attributes.andand.include?(attr)
+  end
+end
diff --git a/apps/workbench/app/helpers/arvados_api_client_helper.rb b/apps/workbench/app/helpers/arvados_api_client_helper.rb
new file mode 100644 (file)
index 0000000..b6c29a9
--- /dev/null
@@ -0,0 +1,13 @@
+module ArvadosApiClientHelper
+  def arvados_api_client
+    ArvadosApiClient.new_or_current
+  end
+end
+
+# For the benefit of themes that still expect $arvados_api_client to work:
+class ArvadosClientProxyHack
+  def method_missing *args
+    ArvadosApiClient.new_or_current.send *args
+  end
+end
+$arvados_api_client = ArvadosClientProxyHack.new
diff --git a/apps/workbench/app/helpers/collections_helper.rb b/apps/workbench/app/helpers/collections_helper.rb
new file mode 100644 (file)
index 0000000..23a440a
--- /dev/null
@@ -0,0 +1,56 @@
+module CollectionsHelper
+  def d3ify_links(links)
+    links.collect do |x|
+      {source: x.tail_uuid, target: x.head_uuid, type: x.name}
+    end
+  end
+
+  ##
+  # Regex match for collection portable data hash, returns a regex match object with the
+  # hash in group 1, (optional) size in group 2, (optional) subsequent uuid
+  # fields in group 3, and (optional) file path within the collection as group
+  # 4
+  # returns nil for no match.
+  #
+  # +pdh+ the portable data hash string to match
+  #
+  def self.match(pdh)
+    /^([a-f0-9]{32})(\+\d+)(\+[^+]+)*?(\/.*)?$/.match(pdh.to_s)
+  end
+
+  ##
+  # Regex match for collection UUIDs, returns a regex match object with the
+  # uuid in group 1, empty groups 2 and 3 (for consistency with the match
+  # method above), and (optional) file path within the collection as group
+  # 4.
+  # returns nil for no match.
+  #
+  def self.match_uuid_with_optional_filepath(uuid_with_optional_file)
+    /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})()()(\/.*)?$/.match(uuid_with_optional_file.to_s)
+  end
+
+  ##
+  # Regex match for common image file extensions, returns a regex match object
+  # with the matched extension in group 1; or nil for no match.
+  #
+  # +file+ the file string to match
+  #
+  def self.is_image file
+    /\.(jpg|jpeg|gif|png|svg)$/i.match(file)
+  end
+
+  ##
+  # Generates a relative file path than can be appended to the URL of a
+  # collection to get a file download link without adding a spurious ./ at the
+  # beginning for files in the default stream.
+  #
+  # +file+ an entry in the Collection.files list in the form [stream, name, size]
+  #
+  def self.file_path file
+    f0 = file[0]
+    f0 = '' if f0 == '.'
+    f0 = f0[2..-1] if f0[0..1] == './'
+    f0 += '/' if not f0.empty?
+    file_path = "#{f0}#{file[1]}"
+  end
+end
diff --git a/apps/workbench/app/helpers/jobs_helper.rb b/apps/workbench/app/helpers/jobs_helper.rb
new file mode 100644 (file)
index 0000000..06c3d0d
--- /dev/null
@@ -0,0 +1,22 @@
+module JobsHelper
+  def stderr_log_history(job_uuids)
+    results = []
+
+    log_history = Log.where(event_type: 'stderr',
+                            object_uuid: job_uuids).order('id DESC')
+    if !log_history.results.empty?
+      reversed_results = log_history.results.reverse
+      reversed_results.each do |entry|
+        if entry.andand.properties
+          properties = entry.properties
+          text = properties[:text]
+          if text
+            results = results.concat text.split("\n")
+          end
+        end
+      end
+    end
+    return results
+  end
+
+end
diff --git a/apps/workbench/app/helpers/pipeline_components_helper.rb b/apps/workbench/app/helpers/pipeline_components_helper.rb
new file mode 100644 (file)
index 0000000..8f5dba1
--- /dev/null
@@ -0,0 +1,16 @@
+module PipelineComponentsHelper
+  def render_pipeline_components(template_suffix, fallback=nil, locals={})
+    begin
+      render(partial: "pipeline_instances/show_components_#{template_suffix}",
+             locals: locals)
+    rescue => e
+      logger.error "#{e.inspect}"
+      logger.error "#{e.backtrace.join("\n\t")}"
+      case fallback
+      when :json
+        render(partial: "pipeline_instances/show_components_json",
+               locals: {error_name: e.inspect, backtrace: e.backtrace.join("\n\t")})
+      end
+    end
+  end
+end
diff --git a/apps/workbench/app/helpers/pipeline_instances_helper.rb b/apps/workbench/app/helpers/pipeline_instances_helper.rb
new file mode 100644 (file)
index 0000000..b0d5216
--- /dev/null
@@ -0,0 +1,309 @@
+module PipelineInstancesHelper
+
+  def pipeline_jobs object=nil
+    object ||= @object
+    if object.components[:steps].is_a? Array
+      pipeline_jobs_oldschool object
+    elsif object.components.is_a? Hash
+      pipeline_jobs_newschool object
+    end
+  end
+
+  def render_pipeline_jobs
+    pipeline_jobs.collect do |pj|
+      render_pipeline_job pj
+    end
+  end
+
+  def render_pipeline_job pj
+    pj[:progress_bar] = render partial: 'job_progress', locals: {:j => pj[:job]}
+    pj[:output_link] = link_to_if_arvados_object pj[:output]
+    pj[:job_link] = link_to_if_arvados_object pj[:job][:uuid] if pj[:job]
+    pj
+  end
+
+  # Merge (started_at, finished_at) time range into the list of time ranges in
+  # timestamps (timestamps must be sorted and non-overlapping).
+  # return the updated timestamps list.
+  def merge_range timestamps, started_at, finished_at
+    # in the comments below, 'i' is the entry in the timestamps array and 'j'
+    # is the started_at, finished_at range which is passed in.
+    timestamps.each_index do |i|
+      if started_at
+        if started_at >= timestamps[i][0] and finished_at <= timestamps[i][1]
+          # 'j' started and ended during 'i'
+          return timestamps
+        end
+
+        if started_at < timestamps[i][0] and finished_at >= timestamps[i][0] and finished_at <= timestamps[i][1]
+          # 'j' started before 'i' and finished during 'i'
+          # re-merge range between when 'j' started and 'i' finished
+          finished_at = timestamps[i][1]
+          timestamps.delete_at i
+          return merge_range timestamps, started_at, finished_at
+        end
+
+        if started_at >= timestamps[i][0] and started_at <= timestamps[i][1]
+          # 'j' started during 'i' and finished sometime after
+          # move end time of 'i' back
+          # re-merge range between when 'i' started and 'j' finished
+          started_at = timestamps[i][0]
+          timestamps.delete_at i
+          return merge_range timestamps, started_at, finished_at
+        end
+
+        if finished_at < timestamps[i][0]
+          # 'j' finished before 'i' started, so insert before 'i'
+          timestamps.insert i, [started_at, finished_at]
+          return timestamps
+        end
+      end
+    end
+
+    timestamps << [started_at, finished_at]
+  end
+
+  # Accept a list of objects with [:started_at] and [:finshed_at] keys and
+  # merge overlapping ranges to compute the time spent running after periods of
+  # overlapping execution are factored out.
+  def determine_wallclock_runtime jobs
+    timestamps = []
+    jobs.each do |j|
+      insert_at = 0
+      started_at = j[:started_at]
+      finished_at = (if j[:finished_at] then j[:finished_at] else Time.now end)
+      if started_at
+        timestamps = merge_range timestamps, started_at, finished_at
+      end
+    end
+    timestamps.map { |t| t[1] - t[0] }.reduce(:+) || 0
+  end
+
+  protected
+
+  def pipeline_jobs_newschool object
+    ret = []
+    i = -1
+
+    jobuuids = object.components.values.map { |c|
+      c[:job][:uuid] if c.is_a?(Hash) and c[:job].is_a?(Hash)
+    }.compact
+    job = {}
+    Job.where(uuid: jobuuids).each do |j|
+      job[j[:uuid]] = j
+    end
+
+    object.components.each do |cname, c|
+      i += 1
+      pj = {index: i, name: cname}
+      if not c.is_a?(Hash)
+        ret << pj
+        next
+      end
+      if c[:job] and c[:job][:uuid] and job[c[:job][:uuid]]
+        pj[:job] = job[c[:job][:uuid]]
+      elsif c[:job].is_a?(Hash)
+        pj[:job] = c[:job]
+        if pj[:job][:started_at].is_a? String
+          pj[:job][:started_at] = Time.parse(pj[:job][:started_at])
+        end
+        if pj[:job][:finished_at].is_a? String
+          pj[:job][:finished_at] = Time.parse(pj[:job][:finished_at])
+        end
+        # If necessary, figure out the state based on the other fields.
+        pj[:job][:state] ||= if pj[:job][:cancelled_at]
+                               "Cancelled"
+                             elsif pj[:job][:success] == false
+                               "Failed"
+                             elsif pj[:job][:success] == true
+                               "Complete"
+                             elsif pj[:job][:running] == true
+                               "Running"
+                             else
+                               "Queued"
+                             end
+      else
+        pj[:job] = {}
+      end
+      pj[:percent_done] = 0
+      pj[:percent_running] = 0
+      if pj[:job][:success]
+        if pj[:job][:output]
+          pj[:progress] = 1.0
+          pj[:percent_done] = 100
+        else
+          pj[:progress] = 0.0
+        end
+      else
+        if pj[:job][:tasks_summary]
+          begin
+            ts = pj[:job][:tasks_summary]
+            denom = ts[:done].to_f + ts[:running].to_f + ts[:todo].to_f
+            pj[:progress] = (ts[:done].to_f + ts[:running].to_f/2) / denom
+            pj[:percent_done] = 100.0 * ts[:done].to_f / denom
+            pj[:percent_running] = 100.0 * ts[:running].to_f / denom
+            pj[:progress_detail] = "#{ts[:done]} done #{ts[:running]} run #{ts[:todo]} todo"
+          rescue
+            pj[:progress] = 0.5
+            pj[:percent_done] = 0.0
+            pj[:percent_running] = 100.0
+          end
+        else
+          pj[:progress] = 0.0
+        end
+      end
+
+      case pj[:job][:state]
+        when 'Complete'
+        pj[:result] = 'complete'
+        pj[:labeltype] = 'success'
+        pj[:complete] = true
+        pj[:progress] = 1.0
+      when 'Failed'
+        pj[:result] = 'failed'
+        pj[:labeltype] = 'danger'
+        pj[:failed] = true
+      when 'Cancelled'
+        pj[:result] = 'cancelled'
+        pj[:labeltype] = 'danger'
+        pj[:failed] = true
+      when 'Running'
+        pj[:result] = 'running'
+        pj[:labeltype] = 'primary'
+      when 'Queued'
+        pj[:result] = 'queued'
+        pj[:labeltype] = 'default'
+      else
+        pj[:result] = 'none'
+        pj[:labeltype] = 'default'
+      end
+
+      pj[:job_id] = pj[:job][:uuid]
+      pj[:script] = pj[:job][:script] || c[:script]
+      pj[:repository] = pj[:job][:script] || c[:repository]
+      pj[:script_parameters] = pj[:job][:script_parameters] || c[:script_parameters]
+      pj[:script_version] = pj[:job][:script_version] || c[:script_version]
+      pj[:nondeterministic] = pj[:job][:nondeterministic] || c[:nondeterministic]
+      pj[:output] = pj[:job][:output]
+      pj[:output_uuid] = c[:output_uuid]
+      pj[:finished_at] = pj[:job][:finished_at]
+      ret << pj
+    end
+    ret
+  end
+
+  def pipeline_jobs_oldschool object
+    ret = []
+    object.components[:steps].each_with_index do |step, i|
+      pj = {index: i, name: step[:name]}
+      if step[:complete] and step[:complete] != 0
+        if step[:output_data_locator]
+          pj[:progress] = 1.0
+        else
+          pj[:progress] = 0.0
+        end
+      else
+        if step[:progress] and
+            (re = step[:progress].match /^(\d+)\+(\d+)\/(\d+)$/)
+          pj[:progress] = (((re[1].to_f + re[2].to_f/2) / re[3].to_f) rescue 0.5)
+        else
+          pj[:progress] = 0.0
+        end
+        if step[:failed]
+          pj[:result] = 'failed'
+          pj[:failed] = true
+        end
+      end
+      if step[:warehousejob]
+        if step[:complete]
+          pj[:result] = 'complete'
+          pj[:complete] = true
+          pj[:progress] = 1.0
+        elsif step[:warehousejob][:finishtime]
+          pj[:result] = 'failed'
+          pj[:failed] = true
+        elsif step[:warehousejob][:starttime]
+          pj[:result] = 'running'
+        else
+          pj[:result] = 'queued'
+        end
+      end
+      pj[:progress_detail] = (step[:progress] rescue nil)
+      pj[:job_id] = (step[:warehousejob][:id] rescue nil)
+      pj[:job_link] = pj[:job_id]
+      pj[:script] = step[:function]
+      pj[:script_version] = (step[:warehousejob][:revision] rescue nil)
+      pj[:output] = step[:output_data_locator]
+      pj[:finished_at] = (Time.parse(step[:warehousejob][:finishtime]) rescue nil)
+      ret << pj
+    end
+    ret
+  end
+
+  MINUTE = 60
+  HOUR = 60 * MINUTE
+  DAY = 24 * HOUR
+
+  def render_runtime duration, use_words, round_to_min=true
+    days = 0
+    hours = 0
+    minutes = 0
+    seconds = 0
+
+    if duration >= DAY
+      days = (duration / DAY).floor
+      duration -= days * DAY
+    end
+
+    if duration >= HOUR
+      hours = (duration / HOUR).floor
+      duration -= hours * HOUR
+    end
+
+    if duration >= MINUTE
+      minutes = (duration / MINUTE).floor
+      duration -= minutes * MINUTE
+    end
+
+    seconds = duration.floor
+
+    if round_to_min and seconds >= 30
+      minutes += 1
+    end
+
+    if use_words
+      s = []
+      if days > 0 then
+        s << "#{days} day#{'s' if days != 1}"
+      end
+      if hours > 0 then
+        s << "#{hours} hour#{'s' if hours != 1}"
+      end
+      if minutes > 0 then
+        s << "#{minutes} minute#{'s' if minutes != 1}"
+      end
+      if not round_to_min or s.size == 0
+        s << "#{seconds} second#{'s' if seconds != 1}"
+      end
+      s = s * " "
+    else
+      s = ""
+      if days > 0
+        s += "#{days}<span class='time-label-divider'>d</span> "
+      end
+
+      if (hours > 0)
+        s += "#{hours}<span class='time-label-divider'>h</span>"
+      end
+
+      s += "#{minutes}<span class='time-label-divider'>m</span>"
+
+      if not round_to_min
+        s += "#{seconds}<span class='time-label-divider'>s</span>"
+      end
+    end
+
+    raw(s)
+  end
+
+end
diff --git a/apps/workbench/app/helpers/provenance_helper.rb b/apps/workbench/app/helpers/provenance_helper.rb
new file mode 100644 (file)
index 0000000..a4723a3
--- /dev/null
@@ -0,0 +1,332 @@
+module ProvenanceHelper
+
+  class GenerateGraph
+    def initialize(pdata, opts)
+      @pdata = pdata
+      @opts = opts
+      @visited = {}
+      @jobs = {}
+      @node_extra = {}
+    end
+
+    def self.collection_uuid(uuid)
+      Keep::Locator.parse(uuid).andand.strip_hints.andand.to_s
+    end
+
+    def url_for u
+      p = { :host => @opts[:request].host,
+        :port => @opts[:request].port,
+        :protocol => @opts[:request].protocol }
+      p.merge! u
+      Rails.application.routes.url_helpers.url_for (p)
+    end
+
+    def determine_fillcolor(n)
+      fillcolor = %w(666666 669966 666699 666666 996666)[n || 0] || '666666'
+      "style=\"filled\",color=\"#ffffff\",fillcolor=\"##{fillcolor}\",fontcolor=\"#ffffff\""
+    end
+
+    def describe_node(uuid, describe_opts={})
+      bgcolor = determine_fillcolor (describe_opts[:pip] || @opts[:pips].andand[uuid])
+
+      rsc = ArvadosBase::resource_class_for_uuid uuid
+
+      if GenerateGraph::collection_uuid(uuid) || rsc == Collection
+        if Collection.is_empty_blob_locator? uuid.to_s
+          # special case
+          return "\"#{uuid}\" [label=\"(empty collection)\"];\n"
+        end
+
+        href = url_for ({:controller => Collection.to_s.tableize,
+                          :action => :show,
+                          :id => uuid.to_s })
+
+        return "\"#{uuid}\" [label=\"#{encode_quotes(describe_opts[:label] || (@pdata[uuid] and @pdata[uuid][:name]) || uuid)}\",shape=box,href=\"#{href}\",#{bgcolor}];\n"
+      else
+        href = ""
+        if describe_opts[:href]
+          href = ",href=\"#{url_for ({:controller => describe_opts[:href][:controller],
+                            :action => :show,
+                            :id => describe_opts[:href][:id] })}\""
+        end
+        return "\"#{uuid}\" [label=\"#{encode_quotes(describe_opts[:label] || uuid)}\",#{bgcolor},shape=#{describe_opts[:shape] || 'box'}#{href}];\n"
+      end
+    end
+
+    def job_uuid(job)
+      d = Digest::MD5.hexdigest(job[:script_parameters].to_json)
+      if @opts[:combine_jobs] == :script_only
+        uuid = "#{job[:script]}_#{d}"
+      elsif @opts[:combine_jobs] == :script_and_version
+        uuid = "#{job[:script]}_#{job[:script_version]}_#{d}"
+      else
+        uuid = "#{job[:uuid]}"
+      end
+
+      @jobs[uuid] = [] unless @jobs[uuid]
+      @jobs[uuid] << job unless @jobs[uuid].include? job
+
+      uuid
+    end
+
+    def edge(tail, head, extra)
+      if @opts[:direction] == :bottom_up
+        gr = "\"#{encode_quotes head}\" -> \"#{encode_quotes tail}\""
+      else
+        gr = "\"#{encode_quotes tail}\" -> \"#{encode_quotes head}\""
+      end
+
+      if extra.length > 0
+        gr += " ["
+        extra.each do |k, v|
+          gr += "#{k}=\"#{encode_quotes v}\","
+        end
+        gr += "]"
+      end
+      gr += ";\n"
+      gr
+    end
+
+    def script_param_edges(uuid, sp)
+      gr = ""
+
+      sp.each do |k, v|
+        if @opts[:all_script_parameters]
+          if v.is_a? Array or v.is_a? Hash
+            encv = JSON.pretty_generate(v).gsub("\n", "\\l") + "\\l"
+          else
+            encv = v.to_json
+          end
+          gr += "\"#{encode_quotes encv}\" [shape=box];\n"
+          gr += edge(encv, uuid, {:label => k})
+        end
+      end
+      gr
+    end
+
+    def job_edges job, edge_opts={}
+      uuid = job_uuid(job)
+      gr = ""
+
+      ProvenanceHelper::find_collections job[:script_parameters] do |collection_hash, collection_uuid, key|
+        if collection_uuid
+          gr += describe_node(collection_uuid)
+          gr += edge(collection_uuid, uuid, {:label => key})
+        else
+          gr += describe_node(collection_hash)
+          gr += edge(collection_hash, uuid, {:label => key})
+        end
+      end
+
+      if job[:docker_image_locator] and !@opts[:no_docker]
+        gr += describe_node(job[:docker_image_locator], {label: (job[:runtime_constraints].andand[:docker_image] || job[:docker_image_locator])})
+        gr += edge(job[:docker_image_locator], uuid, {label: "docker_image"})
+      end
+
+      if @opts[:script_version_nodes]
+        gr += describe_node(job[:script_version], {:label => "git:#{job[:script_version]}"})
+        gr += edge(job[:script_version], uuid, {:label => "script_version"})
+      end
+
+      if job[:output] and !edge_opts[:no_output]
+        gr += describe_node(job[:output])
+        gr += edge(uuid, job[:output], {label: "output" })
+      end
+
+      if job[:log] and !edge_opts[:no_log]
+        gr += describe_node(job[:log])
+        gr += edge(uuid, job[:log], {label: "log"})
+      end
+
+      gr
+    end
+
+    def generate_provenance_edges(uuid)
+      gr = ""
+      m = GenerateGraph::collection_uuid(uuid)
+      uuid = m if m
+
+      if uuid.nil? or uuid.empty? or @visited[uuid]
+        return ""
+      end
+
+      if @pdata[uuid].nil?
+        return ""
+      else
+        @visited[uuid] = true
+      end
+
+      if uuid.start_with? "component_"
+        # Pipeline component inputs
+        job = @pdata[@pdata[uuid][:job].andand[:uuid]]
+
+        if job
+          gr += describe_node(job_uuid(job), {label: uuid[38..-1], pip: @opts[:pips].andand[job[:uuid]], shape: "oval",
+                                href: {controller: 'jobs', id: job[:uuid]}})
+          gr += job_edges job, {no_output: true, no_log: true}
+        end
+
+        # Pipeline component output
+        outuuid = @pdata[uuid][:output_uuid]
+        if outuuid
+          outcollection = @pdata[outuuid]
+          if outcollection
+            gr += edge(job_uuid(job), outcollection[:portable_data_hash], {label: "output"})
+            gr += describe_node(outcollection[:portable_data_hash], {label: outcollection[:name]})
+          end
+        elsif job and job[:output]
+          gr += describe_node(job[:output])
+          gr += edge(job_uuid(job), job[:output], {label: "output" })
+        end
+      else
+        rsc = ArvadosBase::resource_class_for_uuid uuid
+
+        if rsc == Job
+          job = @pdata[uuid]
+          gr += job_edges job if job
+        end
+      end
+
+      @pdata.each do |k, link|
+        if link[:head_uuid] == uuid.to_s and link[:link_class] == "provenance"
+          href = url_for ({:controller => Link.to_s.tableize,
+                            :action => :show,
+                            :id => link[:uuid] })
+
+          gr += describe_node(link[:tail_uuid])
+          gr += edge(link[:head_uuid], link[:tail_uuid], {:label => link[:name], :href => href})
+          gr += generate_provenance_edges(link[:tail_uuid])
+        end
+      end
+
+      gr
+    end
+
+    def describe_jobs
+      gr = ""
+      @jobs.each do |k, v|
+        href = url_for ({:controller => Job.to_s.tableize,
+                          :action => :index })
+
+        gr += "\"#{k}\" [href=\"#{href}?"
+
+        n = 0
+        v.each do |u|
+          gr += ";" unless gr.end_with? "?"
+          gr += "uuid%5b%5d=#{u[:uuid]}"
+          n |= @opts[:pips][u[:uuid]] if @opts[:pips] and @opts[:pips][u[:uuid]]
+        end
+
+        gr += "\",label=\""
+
+        label = "#{v[0][:script]}"
+
+        if label == "run-command" and v[0][:script_parameters][:command].is_a? Array
+          label = v[0][:script_parameters][:command].join(' ')
+        end
+
+        if not @opts[:combine_jobs]
+          label += "\\n#{v[0][:finished_at]}"
+        end
+
+        gr += encode_quotes label
+
+        gr += "\",#{determine_fillcolor n}];\n"
+      end
+      gr
+    end
+
+    def encode_quotes value
+      value.to_s.gsub("\"", "\\\"").gsub("\n", "\\n")
+    end
+  end
+
+  def self.create_provenance_graph(pdata, svgId, opts={})
+    if pdata.is_a? Array or pdata.is_a? ArvadosResourceList
+      p2 = {}
+      pdata.each do |k|
+        p2[k[:uuid]] = k if k[:uuid]
+      end
+      pdata = p2
+    end
+
+    unless pdata.is_a? Hash
+      raise "create_provenance_graph accepts Array or Hash for pdata only, pdata is #{pdata.class}"
+    end
+
+    gr = """strict digraph {
+node [fontsize=10,fontname=\"Helvetica,Arial,sans-serif\"];
+edge [fontsize=10,fontname=\"Helvetica,Arial,sans-serif\"];
+"""
+
+    if opts[:direction] == :bottom_up
+      gr += "edge [dir=back];"
+    end
+
+    begin
+      pdata = pdata.stringify_keys
+
+      g = GenerateGraph.new(pdata, opts)
+
+      pdata.each do |k, v|
+        if !opts[:only_components] or k.start_with? "component_"
+          gr += g.generate_provenance_edges(k)
+        else
+          #gr += describe_node(k)
+        end
+      end
+
+      if !opts[:only_components]
+        gr += g.describe_jobs
+      end
+
+    rescue => e
+      Rails.logger.warn "#{e.inspect}"
+      Rails.logger.warn "#{e.backtrace.join("\n\t")}"
+      raise
+    end
+
+    gr += "}"
+    svg = ""
+
+    require 'open3'
+
+    Open3.popen2("dot", "-Tsvg") do |stdin, stdout, wait_thr|
+      stdin.print(gr)
+      stdin.close
+      svg = stdout.read()
+      wait_thr.value
+      stdout.close()
+    end
+
+    svg = svg.sub(/<\?xml.*?\?>/m, "")
+    svg = svg.sub(/<!DOCTYPE.*?>/m, "")
+    svg = svg.sub(/<svg /, "<svg id=\"#{svgId}\" ")
+  end
+
+  # yields hash, uuid
+  # Position indicates whether it is a content hash or arvados uuid.
+  # One will hold a value, the other will always be nil.
+  def self.find_collections(sp, key=nil, &b)
+    case sp
+    when ArvadosBase
+      sp.class.columns.each do |c|
+        find_collections(sp[c.name.to_sym], nil, &b)
+      end
+    when Hash
+      sp.each do |k, v|
+        find_collections(v, key || k, &b)
+      end
+    when Array
+      sp.each do |v|
+        find_collections(v, key, &b)
+      end
+    when String
+      if m = /[a-f0-9]{32}\+\d+/.match(sp)
+        yield m[0], nil, key
+      elsif m = /[0-9a-z]{5}-4zz18-[0-9a-z]{15}/.match(sp)
+        yield nil, m[0], key
+      end
+    end
+  end
+end
diff --git a/apps/workbench/app/helpers/version_helper.rb b/apps/workbench/app/helpers/version_helper.rb
new file mode 100644 (file)
index 0000000..6cae78f
--- /dev/null
@@ -0,0 +1,30 @@
+module VersionHelper
+  # api_version returns the git commit hash for the API server's
+  # current version.  It is extracted from api_version_text, which
+  # returns the source_version provided by the discovery document and
+  # may have the word "-modified" appended to it (if the API server is
+  # running from a locally modified repository).
+
+  def api_version
+    api_version_text.sub(/[^[:xdigit:]].*/, '')
+  end
+
+  def api_version_text
+    arvados_api_client.discovery[:source_version]
+  end
+
+  # wb_version and wb_version_text provide the same strings for the
+  # code version that this Workbench is currently running.
+
+  def wb_version
+    Rails.configuration.source_version
+  end
+
+  def wb_version_text
+    wb_version + (Rails.configuration.local_modified or '')
+  end
+
+  def version_link_target version
+    "https://arvados.org/projects/arvados/repository/changes?rev=#{version}"
+  end
+end
diff --git a/apps/workbench/app/mailers/.gitkeep b/apps/workbench/app/mailers/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/mailers/issue_reporter.rb b/apps/workbench/app/mailers/issue_reporter.rb
new file mode 100644 (file)
index 0000000..212dd7d
--- /dev/null
@@ -0,0 +1,12 @@
+class IssueReporter < ActionMailer::Base
+  default from: Rails.configuration.issue_reporter_email_from
+  default to: Rails.configuration.issue_reporter_email_to
+
+  def send_report(user, params)
+    @user = user
+    @params = params
+    subject = 'Issue reported'
+    subject += " by #{@user.email}" if @user
+    mail(subject: subject)
+  end
+end
diff --git a/apps/workbench/app/models/.gitkeep b/apps/workbench/app/models/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/models/api_client_authorization.rb b/apps/workbench/app/models/api_client_authorization.rb
new file mode 100644 (file)
index 0000000..6d1558c
--- /dev/null
@@ -0,0 +1,8 @@
+class ApiClientAuthorization < ArvadosBase
+  def editable_attributes
+    %w(expires_at default_owner_uuid)
+  end
+  def self.creatable?
+    false
+  end
+end
diff --git a/apps/workbench/app/models/arvados_api_client.rb b/apps/workbench/app/models/arvados_api_client.rb
new file mode 100644 (file)
index 0000000..5b2311d
--- /dev/null
@@ -0,0 +1,253 @@
+require 'httpclient'
+require 'thread'
+
+class ArvadosApiClient
+  class ApiError < StandardError
+    attr_reader :api_response, :api_response_s, :api_status, :request_url
+
+    def initialize(request_url, errmsg)
+      @request_url = request_url
+      @api_response ||= {}
+      errors = @api_response[:errors]
+      if not errors.is_a?(Array)
+        @api_response[:errors] = [errors || errmsg]
+      end
+      super(errmsg)
+    end
+  end
+
+  class NoApiResponseException < ApiError
+    def initialize(request_url, exception)
+      @api_response_s = exception.to_s
+      super(request_url,
+            "#{exception.class.to_s} error connecting to API server")
+    end
+  end
+
+  class InvalidApiResponseException < ApiError
+    def initialize(request_url, api_response)
+      @api_status = api_response.status_code
+      @api_response_s = api_response.content
+      super(request_url, "Unparseable response from API server")
+    end
+  end
+
+  class ApiErrorResponseException < ApiError
+    def initialize(request_url, api_response)
+      @api_status = api_response.status_code
+      @api_response_s = api_response.content
+      @api_response = Oj.load(@api_response_s, :symbol_keys => true)
+      errors = @api_response[:errors]
+      if errors.respond_to?(:join)
+        errors = errors.join("\n\n")
+      else
+        errors = errors.to_s
+      end
+      super(request_url, "#{errors} [API: #{@api_status}]")
+    end
+  end
+
+  class AccessForbiddenException < ApiErrorResponseException; end
+  class NotFoundException < ApiErrorResponseException; end
+  class NotLoggedInException < ApiErrorResponseException; end
+
+  ERROR_CODE_CLASSES = {
+    401 => NotLoggedInException,
+    403 => AccessForbiddenException,
+    404 => NotFoundException,
+  }
+
+  @@profiling_enabled = Rails.configuration.profiling_enabled
+  @@discovery = nil
+
+  # An API client object suitable for handling API requests on behalf
+  # of the current thread.
+  def self.new_or_current
+    # If this thread doesn't have an API client yet, *or* this model
+    # has been reloaded since the existing client was created, create
+    # a new client. Otherwise, keep using the latest client created in
+    # the current thread.
+    unless Thread.current[:arvados_api_client].andand.class == self
+      Thread.current[:arvados_api_client] = new
+    end
+    Thread.current[:arvados_api_client]
+  end
+
+  def initialize *args
+    @api_client = nil
+    @client_mtx = Mutex.new
+  end
+
+  def api(resources_kind, action, data=nil, tokens={})
+
+    profile_checkpoint
+
+    if not @api_client
+      @client_mtx.synchronize do
+        @api_client = HTTPClient.new
+        if Rails.configuration.arvados_insecure_https
+          @api_client.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
+        else
+          # Use system CA certificates
+          @api_client.ssl_config.add_trust_ca('/etc/ssl/certs')
+        end
+      end
+    end
+
+    resources_kind = class_kind(resources_kind).pluralize if resources_kind.is_a? Class
+    url = "#{self.arvados_v1_base}/#{resources_kind}#{action}"
+
+    # Clean up /arvados/v1/../../discovery/v1 to /discovery/v1
+    url.sub! '/arvados/v1/../../', '/'
+
+    query = {
+      'api_token' => tokens[:arvados_api_token] || Thread.current[:arvados_api_token] || '',
+      'reader_tokens' => (tokens[:reader_tokens] || Thread.current[:reader_tokens] || []).to_json,
+    }
+    if !data.nil?
+      data.each do |k,v|
+        if v.is_a? String or v.nil?
+          query[k] = v
+        elsif v == true
+          query[k] = 1
+        elsif v == false
+          query[k] = 0
+        else
+          query[k] = JSON.dump(v)
+        end
+      end
+    else
+      query["_method"] = "GET"
+    end
+    if @@profiling_enabled
+      query["_profile"] = "true"
+    end
+
+    header = {"Accept" => "application/json"}
+
+    profile_checkpoint { "Prepare request #{url} #{query[:uuid]} #{query[:where]} #{query[:filters]} #{query[:order]}" }
+    msg = @client_mtx.synchronize do
+      begin
+        @api_client.post(url, query, header: header)
+      rescue => exception
+        raise NoApiResponseException.new(url, exception)
+      end
+    end
+    profile_checkpoint 'API transaction'
+
+    begin
+      resp = Oj.load(msg.content, :symbol_keys => true)
+    rescue Oj::ParseError
+      resp = nil
+    end
+    if not resp.is_a? Hash
+      raise InvalidApiResponseException.new(url, msg)
+    elsif msg.status_code != 200
+      error_class = ERROR_CODE_CLASSES.fetch(msg.status_code,
+                                             ApiErrorResponseException)
+      raise error_class.new(url, msg)
+    end
+
+    if resp[:_profile]
+      Rails.logger.info "API client: " \
+      "#{resp.delete(:_profile)[:request_time]} request_time"
+    end
+    profile_checkpoint 'Parse response'
+    resp
+  end
+
+  def self.patch_paging_vars(ary, items_available, offset, limit, links=nil)
+    if items_available
+      (class << ary; self; end).class_eval { attr_accessor :items_available }
+      ary.items_available = items_available
+    end
+    if offset
+      (class << ary; self; end).class_eval { attr_accessor :offset }
+      ary.offset = offset
+    end
+    if limit
+      (class << ary; self; end).class_eval { attr_accessor :limit }
+      ary.limit = limit
+    end
+    if links
+      (class << ary; self; end).class_eval { attr_accessor :links }
+      ary.links = links
+    end
+    ary
+  end
+
+  def unpack_api_response(j, kind=nil)
+    if j.is_a? Hash and j[:items].is_a? Array and j[:kind].match(/(_list|List)$/)
+      ary = j[:items].collect { |x| unpack_api_response x, x[:kind] }
+      links = ArvadosResourceList.new Link
+      links.results = (j[:links] || []).collect do |x|
+        unpack_api_response x, x[:kind]
+      end
+      self.class.patch_paging_vars(ary, j[:items_available], j[:offset], j[:limit], links)
+    elsif j.is_a? Hash and (kind || j[:kind])
+      oclass = self.kind_class(kind || j[:kind])
+      if oclass
+        j.keys.each do |k|
+          childkind = j["#{k.to_s}_kind".to_sym]
+          if childkind
+            j[k] = self.unpack_api_response(j[k], childkind)
+          end
+        end
+        oclass.new.private_reload(j)
+      else
+        j
+      end
+    else
+      j
+    end
+  end
+
+  def arvados_login_url(params={})
+    if Rails.configuration.respond_to? :arvados_login_base
+      uri = Rails.configuration.arvados_login_base
+    else
+      uri = self.arvados_v1_base.sub(%r{/arvados/v\d+.*}, '/login')
+    end
+    if params.size > 0
+      uri += '?' << params.collect { |k,v|
+        CGI.escape(k.to_s) + '=' + CGI.escape(v.to_s)
+      }.join('&')
+    end
+    uri
+  end
+
+  def arvados_logout_url(params={})
+    arvados_login_url(params).sub('/login','/logout')
+  end
+
+  def arvados_v1_base
+    Rails.configuration.arvados_v1_base
+  end
+
+  def discovery
+    @@discovery ||= api '../../discovery/v1/apis/arvados/v1/rest', ''
+  end
+
+  def kind_class(kind)
+    kind.match(/^arvados\#(.+?)(_list|List)?$/)[1].pluralize.classify.constantize rescue nil
+  end
+
+  def class_kind(resource_class)
+    resource_class.to_s.underscore
+  end
+
+  def self.class_kind(resource_class)
+    resource_class.to_s.underscore
+  end
+
+  protected
+  def profile_checkpoint label=nil
+    return if !@@profiling_enabled
+    label = yield if block_given?
+    t = Time.now
+    if label and @profile_t0
+      Rails.logger.info "API client: #{t - @profile_t0} #{label}"
+    end
+    @profile_t0 = t
+  end
+end
diff --git a/apps/workbench/app/models/arvados_base.rb b/apps/workbench/app/models/arvados_base.rb
new file mode 100644 (file)
index 0000000..bc5a9a3
--- /dev/null
@@ -0,0 +1,443 @@
+class ArvadosBase < ActiveRecord::Base
+  self.abstract_class = true
+  attr_accessor :attribute_sortkey
+  attr_accessor :create_params
+
+  def self.arvados_api_client
+    ArvadosApiClient.new_or_current
+  end
+
+  def arvados_api_client
+    ArvadosApiClient.new_or_current
+  end
+
+  def self.uuid_infix_object_kind
+    @@uuid_infix_object_kind ||=
+      begin
+        infix_kind = {}
+        arvados_api_client.discovery[:schemas].each do |name, schema|
+          if schema[:uuidPrefix]
+            infix_kind[schema[:uuidPrefix]] =
+              'arvados#' + name.to_s.camelcase(:lower)
+          end
+        end
+
+        # Recognize obsolete types.
+        infix_kind.
+          merge('mxsvm' => 'arvados#pipelineTemplate', # Pipeline
+                'uo14g' => 'arvados#pipelineInstance', # PipelineInvocation
+                'ldvyl' => 'arvados#group') # Project
+      end
+  end
+
+  def initialize raw_params={}, create_params={}
+    super self.class.permit_attribute_params(raw_params)
+    @create_params = create_params
+    @attribute_sortkey ||= {
+      'id' => nil,
+      'name' => '000',
+      'owner_uuid' => '002',
+      'event_type' => '100',
+      'link_class' => '100',
+      'group_class' => '100',
+      'tail_uuid' => '101',
+      'head_uuid' => '102',
+      'object_uuid' => '102',
+      'summary' => '104',
+      'description' => '104',
+      'properties' => '150',
+      'info' => '150',
+      'created_at' => '200',
+      'modified_at' => '201',
+      'modified_by_user_uuid' => '202',
+      'modified_by_client_uuid' => '203',
+      'uuid' => '999',
+    }
+  end
+
+  def self.columns
+    return @columns if @columns.andand.any?
+    @columns = []
+    @attribute_info ||= {}
+    schema = arvados_api_client.discovery[:schemas][self.to_s.to_sym]
+    return @columns if schema.nil?
+    schema[:properties].each do |k, coldef|
+      case k
+      when :etag, :kind
+        attr_reader k
+      else
+        if coldef[:type] == coldef[:type].downcase
+          # boolean, integer, etc.
+          @columns << column(k, coldef[:type].to_sym)
+        else
+          # Hash, Array
+          @columns << column(k, :text)
+          serialize k, coldef[:type].constantize
+        end
+        @attribute_info[k] = coldef
+      end
+    end
+    @columns
+  end
+
+  def self.column(name, sql_type = nil, default = nil, null = true)
+    ActiveRecord::ConnectionAdapters::Column.new(name.to_s, default, sql_type.to_s, null)
+  end
+
+  def self.attribute_info
+    self.columns
+    @attribute_info
+  end
+
+  def self.find(uuid, opts={})
+    if uuid.class != String or uuid.length < 27 then
+      raise 'argument to find() must be a uuid string. Acceptable formats: warehouse locator or string with format xxxxx-xxxxx-xxxxxxxxxxxxxxx'
+    end
+
+    if self == ArvadosBase
+      # Determine type from uuid and defer to the appropriate subclass.
+      return resource_class_for_uuid(uuid).find(uuid, opts)
+    end
+
+    # Only do one lookup on the API side per {class, uuid, workbench
+    # request} unless {cache: false} is given via opts.
+    cache_key = "request_#{Thread.current.object_id}_#{self.to_s}_#{uuid}"
+    if opts[:cache] == false
+      Rails.cache.write cache_key, arvados_api_client.api(self, '/' + uuid)
+    end
+    hash = Rails.cache.fetch cache_key do
+      arvados_api_client.api(self, '/' + uuid)
+    end
+    new.private_reload(hash)
+  end
+
+  def self.find?(*args)
+    find(*args) rescue nil
+  end
+
+  def self.order(*args)
+    ArvadosResourceList.new(self).order(*args)
+  end
+
+  def self.filter(*args)
+    ArvadosResourceList.new(self).filter(*args)
+  end
+
+  def self.where(*args)
+    ArvadosResourceList.new(self).where(*args)
+  end
+
+  def self.limit(*args)
+    ArvadosResourceList.new(self).limit(*args)
+  end
+
+  def self.select(*args)
+    ArvadosResourceList.new(self).select(*args)
+  end
+
+  def self.eager(*args)
+    ArvadosResourceList.new(self).eager(*args)
+  end
+
+  def self.all
+    ArvadosResourceList.new(self)
+  end
+
+  def self.permit_attribute_params raw_params
+    # strong_parameters does not provide security in Workbench: anyone
+    # who can get this far can just as well do a call directly to our
+    # database (Arvados) with the same credentials we use.
+    #
+    # The following permit! is necessary even with
+    # "ActionController::Parameters.permit_all_parameters = true",
+    # because permit_all does not permit nested attributes.
+    ActionController::Parameters.new(raw_params).permit!
+  end
+
+  def self.create raw_params={}, create_params={}
+    x = super(permit_attribute_params(raw_params))
+    x.create_params = create_params
+    x
+  end
+
+  def update_attributes raw_params={}
+    super(self.class.permit_attribute_params(raw_params))
+  end
+
+  def save
+    obdata = {}
+    self.class.columns.each do |col|
+      unless self.send(col.name.to_sym).nil? and !self.changed.include?(col.name)
+          obdata[col.name.to_sym] = self.send(col.name.to_sym)
+      end
+    end
+    obdata.delete :id
+    postdata = { self.class.to_s.underscore => obdata }
+    if etag
+      postdata['_method'] = 'PUT'
+      obdata.delete :uuid
+      resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
+    else
+      postdata.merge!(@create_params) if @create_params
+      resp = arvados_api_client.api(self.class, '', postdata)
+    end
+    return false if !resp[:etag] || !resp[:uuid]
+
+    # set read-only non-database attributes
+    @etag = resp[:etag]
+    @kind = resp[:kind]
+
+    # attributes can be modified during "save" -- we should update our copies
+    resp.keys.each do |attr|
+      if self.respond_to? "#{attr}=".to_sym
+        self.send(attr.to_s + '=', resp[attr.to_sym])
+      end
+    end
+
+    @new_record = false
+
+    self
+  end
+
+  def save!
+    self.save or raise Exception.new("Save failed")
+  end
+
+  def destroy
+    if etag || uuid
+      postdata = { '_method' => 'DELETE' }
+      resp = arvados_api_client.api(self.class, '/' + uuid, postdata)
+      resp[:etag] && resp[:uuid] && resp
+    else
+      true
+    end
+  end
+
+  def links(*args)
+    o = {}
+    o.merge!(args.pop) if args[-1].is_a? Hash
+    o[:link_class] ||= args.shift
+    o[:name] ||= args.shift
+    o[:tail_uuid] = self.uuid
+    if all_links
+      return all_links.select do |m|
+        ok = true
+        o.each do |k,v|
+          if !v.nil?
+            test_v = m.send(k)
+            if (v.respond_to?(:uuid) ? v.uuid : v.to_s) != (test_v.respond_to?(:uuid) ? test_v.uuid : test_v.to_s)
+              ok = false
+            end
+          end
+        end
+        ok
+      end
+    end
+    @links = arvados_api_client.api Link, '', { _method: 'GET', where: o, eager: true }
+    @links = arvados_api_client.unpack_api_response(@links)
+  end
+
+  def all_links
+    return @all_links if @all_links
+    res = arvados_api_client.api Link, '', {
+      _method: 'GET',
+      where: {
+        tail_kind: self.kind,
+        tail_uuid: self.uuid
+      },
+      eager: true
+    }
+    @all_links = arvados_api_client.unpack_api_response(res)
+  end
+
+  def reload
+    private_reload(self.uuid)
+  end
+
+  def private_reload(uuid_or_hash)
+    raise "No such object" if !uuid_or_hash
+    if uuid_or_hash.is_a? Hash
+      hash = uuid_or_hash
+    else
+      hash = arvados_api_client.api(self.class, '/' + uuid_or_hash)
+    end
+    hash.each do |k,v|
+      if self.respond_to?(k.to_s + '=')
+        self.send(k.to_s + '=', v)
+      else
+        # When ArvadosApiClient#schema starts telling us what to expect
+        # in API responses (not just the server side database
+        # columns), this sort of awfulness can be avoided:
+        self.instance_variable_set('@' + k.to_s, v)
+        if !self.respond_to? k
+          singleton = class << self; self end
+          singleton.send :define_method, k, lambda { instance_variable_get('@' + k.to_s) }
+        end
+      end
+    end
+    @all_links = nil
+    @new_record = false
+    self
+  end
+
+  def to_param
+    uuid
+  end
+
+  def initialize_copy orig
+    super
+    forget_uuid!
+  end
+
+  def attributes_for_display
+    self.attributes.reject { |k,v|
+      attribute_sortkey.has_key?(k) and !attribute_sortkey[k]
+    }.sort_by { |k,v|
+      attribute_sortkey[k] or k
+    }
+  end
+
+  def class_for_display
+    self.class.to_s.underscore.humanize
+  end
+
+  def self.class_for_display
+    self.to_s.underscore.humanize
+  end
+
+  # Array of strings that are names of attributes that should be rendered as textile.
+  def textile_attributes
+    []
+  end
+
+  def self.creatable?
+    current_user
+  end
+
+  def self.goes_in_projects?
+    false
+  end
+
+  # can this class of object be copied into a project?
+  # override to false on indivudal model classes for which this should not be true
+  def self.copies_to_projects?
+    self.goes_in_projects?
+  end
+
+  def editable?
+    (current_user and current_user.is_active and
+     (current_user.is_admin or
+      current_user.uuid == self.owner_uuid or
+      new_record? or
+      (respond_to?(:writable_by) ?
+       writable_by.include?(current_user.uuid) :
+       (ArvadosBase.find(owner_uuid).writable_by.include? current_user.uuid rescue false)))) or false
+  end
+
+  # Array of strings that are the names of attributes that can be edited
+  # with X-Editable.
+  def editable_attributes
+    self.class.columns.map(&:name) -
+      %w(created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at)
+  end
+
+  def attribute_editable?(attr, ever=nil)
+    if not editable_attributes.include?(attr.to_s)
+      false
+    elsif not (current_user.andand.is_active)
+      false
+    elsif attr == 'uuid'
+      current_user.is_admin
+    elsif ever
+      true
+    else
+      editable?
+    end
+  end
+
+  def self.resource_class_for_uuid(uuid, opts={})
+    if uuid.is_a? ArvadosBase
+      return uuid.class
+    end
+    unless uuid.is_a? String
+      return nil
+    end
+    if opts[:class].is_a? Class
+      return opts[:class]
+    end
+    if uuid.match /^[0-9a-f]{32}(\+[^,]+)*(,[0-9a-f]{32}(\+[^,]+)*)*$/
+      return Collection
+    end
+    resource_class = nil
+    uuid.match /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/ do |re|
+      resource_class ||= arvados_api_client.
+        kind_class(self.uuid_infix_object_kind[re[1]])
+    end
+    if opts[:referring_object] and
+        opts[:referring_attr] and
+        opts[:referring_attr].match /_uuid$/
+      resource_class ||= arvados_api_client.
+        kind_class(opts[:referring_object].
+                   attributes[opts[:referring_attr].
+                              sub(/_uuid$/, '_kind')])
+    end
+    resource_class
+  end
+
+  def resource_param_name
+    self.class.to_s.underscore
+  end
+
+  def friendly_link_name lookup=nil
+    (name if self.respond_to? :name) || default_name
+  end
+
+  def content_summary
+    self.class_for_display
+  end
+
+  def selection_label
+    friendly_link_name
+  end
+
+  def self.default_name
+    self.to_s.underscore.humanize
+  end
+
+  def controller
+    (self.class.to_s.pluralize + 'Controller').constantize
+  end
+
+  def controller_name
+    self.class.to_s.tableize
+  end
+
+  # Placeholder for name when name is missing or empty
+  def default_name
+    if self.respond_to? :name
+      "New #{class_for_display.downcase}"
+    else
+      uuid
+    end
+  end
+
+  def owner
+    ArvadosBase.find(owner_uuid) rescue nil
+  end
+
+  protected
+
+  def forget_uuid!
+    self.uuid = nil
+    @etag = nil
+    self
+  end
+
+  def self.current_user
+    Thread.current[:user] ||= User.current if Thread.current[:arvados_api_token]
+    Thread.current[:user]
+  end
+  def current_user
+    self.class.current_user
+  end
+end
diff --git a/apps/workbench/app/models/arvados_resource_list.rb b/apps/workbench/app/models/arvados_resource_list.rb
new file mode 100644 (file)
index 0000000..d989715
--- /dev/null
@@ -0,0 +1,218 @@
+class ArvadosResourceList
+  include ArvadosApiClientHelper
+  include Enumerable
+
+  attr_reader :resource_class
+
+  def initialize resource_class=nil
+    @resource_class = resource_class
+    @fetch_multiple_pages = true
+    @arvados_api_token = Thread.current[:arvados_api_token]
+    @reader_tokens = Thread.current[:reader_tokens]
+  end
+
+  def eager(bool=true)
+    @eager = bool
+    self
+  end
+
+  def limit(max_results)
+    if not max_results.nil? and not max_results.is_a? Integer
+      raise ArgumentError("argument to limit() must be an Integer or nil")
+    end
+    @limit = max_results
+    self
+  end
+
+  def offset(skip)
+    @offset = skip
+    self
+  end
+
+  def order(orderby_spec)
+    @orderby_spec = orderby_spec
+    self
+  end
+
+  def select(columns=nil)
+    # If no column arguments were given, invoke Enumerable#select.
+    if columns.nil?
+      super()
+    else
+      @select ||= []
+      @select += columns
+      self
+    end
+  end
+
+  def filter _filters
+    @filters ||= []
+    @filters += _filters
+    self
+  end
+
+  def where(cond)
+    @cond = cond.dup
+    @cond.keys.each do |uuid_key|
+      if @cond[uuid_key] and (@cond[uuid_key].is_a? Array or
+                             @cond[uuid_key].is_a? ArvadosBase)
+        # Coerce cond[uuid_key] to an array of uuid strings.  This
+        # allows caller the convenience of passing an array of real
+        # objects and uuids in cond[uuid_key].
+        if !@cond[uuid_key].is_a? Array
+          @cond[uuid_key] = [@cond[uuid_key]]
+        end
+        @cond[uuid_key] = @cond[uuid_key].collect do |item|
+          if item.is_a? ArvadosBase
+            item.uuid
+          else
+            item
+          end
+        end
+      end
+    end
+    @cond.keys.select { |x| x.match /_kind$/ }.each do |kind_key|
+      if @cond[kind_key].is_a? Class
+        @cond = @cond.merge({ kind_key => 'arvados#' + arvados_api_client.class_kind(@cond[kind_key]) })
+      end
+    end
+    self
+  end
+
+  def fetch_multiple_pages(f)
+    @fetch_multiple_pages = f
+    self
+  end
+
+  def results
+    if !@results
+      @results = []
+      self.each_page do |r|
+        @results.concat r
+      end
+    end
+    @results
+  end
+
+  def results=(r)
+    @results = r
+    @items_available = r.items_available if r.respond_to? :items_available
+    @result_limit = r.limit if r.respond_to? :limit
+    @result_offset = r.offset if r.respond_to? :offset
+    @results
+  end
+
+  def to_ary
+    results
+  end
+
+  def each(&block)
+    if not @results.nil?
+      @results.each &block
+    else
+      self.each_page do |items|
+        items.each do |i|
+          block.call i
+        end
+      end
+    end
+    self
+  end
+
+  def first
+    results.first
+  end
+
+  def last
+    results.last
+  end
+
+  def [](*x)
+    results.send('[]', *x)
+  end
+
+  def |(x)
+    if x.is_a? Hash
+      self.to_hash | x
+    else
+      results | x.to_ary
+    end
+  end
+
+  def to_hash
+    Hash[self.collect { |x| [x.uuid, x] }]
+  end
+
+  def empty?
+    self.first.nil?
+  end
+
+  def items_available
+    results
+    @items_available
+  end
+
+  def result_limit
+    results
+    @result_limit
+  end
+
+  def result_offset
+    results
+    @result_offset
+  end
+
+  # Obsolete method retained during api transition.
+  def links_for item_or_uuid, link_class=false
+    []
+  end
+
+  protected
+
+  def each_page
+    api_params = {
+      _method: 'GET'
+    }
+    api_params[:where] = @cond if @cond
+    api_params[:eager] = '1' if @eager
+    api_params[:select] = @select if @select
+    api_params[:order] = @orderby_spec if @orderby_spec
+    api_params[:filters] = @filters if @filters
+
+
+    item_count = 0
+    offset = @offset || 0
+    @result_limit = nil
+    @result_offset = nil
+
+    begin
+      api_params[:offset] = offset
+      api_params[:limit] = (@limit - item_count) if @limit
+
+      res = arvados_api_client.api(@resource_class, '', api_params,
+                                   arvados_api_token: @arvados_api_token,
+                                   reader_tokens: @reader_tokens)
+      items = arvados_api_client.unpack_api_response res
+
+      @items_available = items.items_available if items.respond_to?(:items_available)
+      @result_limit = items.limit if (@fetch_multiple_pages == false) and items.respond_to?(:limit)
+      @result_offset = items.offset if (@fetch_multiple_pages == false) and items.respond_to?(:offset)
+
+      break if items.nil? or not items.any?
+
+      item_count += items.size
+      if items.respond_to?(:offset)
+        offset = items.offset + items.size
+      else
+        offset = item_count
+      end
+
+      yield items
+
+      break if @limit and item_count >= @limit
+      break if items.respond_to? :items_available and offset >= items.items_available
+    end while @fetch_multiple_pages
+    self
+  end
+
+end
diff --git a/apps/workbench/app/models/authorized_key.rb b/apps/workbench/app/models/authorized_key.rb
new file mode 100644 (file)
index 0000000..2d804e1
--- /dev/null
@@ -0,0 +1,9 @@
+class AuthorizedKey < ArvadosBase
+  def attribute_editable?(attr, ever=nil)
+    if (attr.to_s == 'authorized_user_uuid') and (not ever)
+      current_user.andand.is_admin
+    else
+      super
+    end
+  end
+end
diff --git a/apps/workbench/app/models/collection.rb b/apps/workbench/app/models/collection.rb
new file mode 100644 (file)
index 0000000..686b816
--- /dev/null
@@ -0,0 +1,101 @@
+require "arvados/keep"
+
+class Collection < ArvadosBase
+  MD5_EMPTY = 'd41d8cd98f00b204e9800998ecf8427e'
+
+  def default_name
+    if Collection.is_empty_blob_locator? self.uuid
+      "Empty Collection"
+    else
+      super
+    end
+  end
+
+  # Return true if the given string is the locator of a zero-length blob
+  def self.is_empty_blob_locator? locator
+    !!locator.to_s.match("^#{MD5_EMPTY}(\\+.*)?\$")
+  end
+
+  def self.goes_in_projects?
+    true
+  end
+
+  def manifest
+    if @manifest.nil? or manifest_text_changed?
+      @manifest = Keep::Manifest.new(manifest_text || "")
+    end
+    @manifest
+  end
+
+  def files
+    # This method provides backwards compatibility for code that relied on
+    # the old files field in API results.  New code should use manifest
+    # methods directly.
+    manifest.files
+  end
+
+  def content_summary
+    ApplicationController.helpers.human_readable_bytes_html(total_bytes) + " " + super
+  end
+
+  def total_bytes
+    manifest.files.inject(0) { |sum, filespec| sum + filespec.last }
+  end
+
+  def files_tree
+    tree = manifest.files.group_by do |file_spec|
+      File.split(file_spec.first)
+    end
+    return [] if tree.empty?
+    # Fill in entries for empty directories.
+    tree.keys.map { |basedir, _| File.split(basedir) }.each do |splitdir|
+      until tree.include?(splitdir)
+        tree[splitdir] = []
+        splitdir = File.split(splitdir.first)
+      end
+    end
+    dir_to_tree = lambda do |dirname|
+      # First list subdirectories, with their files inside.
+      subnodes = tree.keys.select { |bd, td| (bd == dirname) and (td != '.') }
+        .sort.flat_map do |parts|
+        [parts + [nil]] + dir_to_tree.call(File.join(parts))
+      end
+      # Then extend that list with files in this directory.
+      subnodes + tree[File.split(dirname)]
+    end
+    dir_to_tree.call('.')
+  end
+
+  def editable_attributes
+    %w(name description manifest_text)
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def provenance
+    arvados_api_client.api "collections/#{self.uuid}/", "provenance"
+  end
+
+  def used_by
+    arvados_api_client.api "collections/#{self.uuid}/", "used_by"
+  end
+
+  def uuid
+    if self[:uuid].nil?
+      return self[:portable_data_hash]
+    else
+      super
+    end
+  end
+
+  def friendly_link_name lookup=nil
+    name || portable_data_hash
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+
+end
diff --git a/apps/workbench/app/models/group.rb b/apps/workbench/app/models/group.rb
new file mode 100644 (file)
index 0000000..3f5da15
--- /dev/null
@@ -0,0 +1,35 @@
+class Group < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+
+  def self.copies_to_projects?
+    false
+  end
+
+  def self.contents params={}
+    res = arvados_api_client.api self, "/contents", {
+      _method: 'GET'
+    }.merge(params)
+    ret = ArvadosResourceList.new
+    ret.results = arvados_api_client.unpack_api_response(res)
+    ret
+  end
+
+  def contents params={}
+    res = arvados_api_client.api self.class, "/#{self.uuid}/contents", {
+      _method: 'GET'
+    }.merge(params)
+    ret = ArvadosResourceList.new
+    ret.results = arvados_api_client.unpack_api_response(res)
+    ret
+  end
+
+  def class_for_display
+    group_class == 'project' ? 'Project' : super
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+end
diff --git a/apps/workbench/app/models/human.rb b/apps/workbench/app/models/human.rb
new file mode 100644 (file)
index 0000000..7c2d3e4
--- /dev/null
@@ -0,0 +1,5 @@
+class Human < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+end
diff --git a/apps/workbench/app/models/job.rb b/apps/workbench/app/models/job.rb
new file mode 100644 (file)
index 0000000..c59bb89
--- /dev/null
@@ -0,0 +1,48 @@
+class Job < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+
+  def content_summary
+    "#{script} job"
+  end
+
+  def editable_attributes
+    %w(description)
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def default_name
+    if script
+      x = "\"#{script}\" job"
+    else
+      x = super
+    end
+    if finished_at
+      x += " finished #{finished_at.strftime('%b %-d')}"
+    elsif started_at
+      x += " started #{started_at.strftime('%b %-d')}"
+    elsif created_at
+      x += " submitted #{created_at.strftime('%b %-d')}"
+    end
+  end
+
+  def cancel
+    arvados_api_client.api "jobs/#{self.uuid}/", "cancel", {}
+  end
+
+  def self.queue_size
+    arvados_api_client.api("jobs/", "queue_size", {"_method"=> "GET"})[:queue_size] rescue 0
+  end
+
+  def self.queue
+    arvados_api_client.unpack_api_response arvados_api_client.api("jobs/", "queue", {"_method"=> "GET"})
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+end
diff --git a/apps/workbench/app/models/job_task.rb b/apps/workbench/app/models/job_task.rb
new file mode 100644 (file)
index 0000000..15fc7fd
--- /dev/null
@@ -0,0 +1,2 @@
+class JobTask < ArvadosBase
+end
diff --git a/apps/workbench/app/models/keep_disk.rb b/apps/workbench/app/models/keep_disk.rb
new file mode 100644 (file)
index 0000000..8ced4eb
--- /dev/null
@@ -0,0 +1,5 @@
+class KeepDisk < ArvadosBase
+  def self.creatable?
+    current_user and current_user.is_admin
+  end
+end
diff --git a/apps/workbench/app/models/keep_service.rb b/apps/workbench/app/models/keep_service.rb
new file mode 100644 (file)
index 0000000..f27e369
--- /dev/null
@@ -0,0 +1,5 @@
+class KeepService < ArvadosBase
+  def self.creatable?
+    current_user and current_user.is_admin
+  end
+end
diff --git a/apps/workbench/app/models/link.rb b/apps/workbench/app/models/link.rb
new file mode 100644 (file)
index 0000000..271fa0f
--- /dev/null
@@ -0,0 +1,21 @@
+class Link < ArvadosBase
+  attr_accessor :head
+  attr_accessor :tail
+  def self.by_tail(t, opts={})
+    where(opts.merge :tail_uuid => t.uuid)
+  end
+
+  def default_name
+    self.class.resource_class_for_uuid(head_uuid).default_name rescue super
+  end
+
+  def self.permissions_for(thing)
+    if thing.respond_to? :uuid
+      uuid = thing.uuid
+    else
+      uuid = thing
+    end
+    result = arvados_api_client.api("permissions", "/#{uuid}")
+    arvados_api_client.unpack_api_response(result)
+  end
+end
diff --git a/apps/workbench/app/models/log.rb b/apps/workbench/app/models/log.rb
new file mode 100644 (file)
index 0000000..39d585b
--- /dev/null
@@ -0,0 +1,8 @@
+class Log < ArvadosBase
+  attr_accessor :object
+  def self.creatable?
+    # Technically yes, but not worth offering: it will be empty, and
+    # you won't be able to edit it.
+    false
+  end
+end
diff --git a/apps/workbench/app/models/node.rb b/apps/workbench/app/models/node.rb
new file mode 100644 (file)
index 0000000..e66be83
--- /dev/null
@@ -0,0 +1,8 @@
+class Node < ArvadosBase
+  def self.creatable?
+    current_user and current_user.is_admin
+  end
+  def friendly_link_name lookup=nil
+    (hostname && !hostname.empty?) ? hostname : uuid
+  end
+end
diff --git a/apps/workbench/app/models/pipeline_instance.rb b/apps/workbench/app/models/pipeline_instance.rb
new file mode 100644 (file)
index 0000000..f575e20
--- /dev/null
@@ -0,0 +1,84 @@
+class PipelineInstance < ArvadosBase
+  attr_accessor :pipeline_template
+
+  def self.goes_in_projects?
+    true
+  end
+
+  def friendly_link_name lookup=nil
+    pipeline_name = self.name
+    if pipeline_name.nil? or pipeline_name.empty?
+      template = if lookup and lookup[self.pipeline_template_uuid]
+                   lookup[self.pipeline_template_uuid]
+                 else
+                   PipelineTemplate.where(uuid: self.pipeline_template_uuid).first
+                 end
+      if template
+        template.name
+      else
+        self.uuid
+      end
+    else
+      pipeline_name
+    end
+  end
+
+  def content_summary
+    begin
+      PipelineTemplate.find(pipeline_template_uuid).name
+    rescue
+      super
+    end
+  end
+
+  def update_job_parameters(new_params)
+    self.components[:steps].each_with_index do |step, i|
+      step[:params].each do |param|
+        if new_params.has_key?(new_param_name = "#{i}/#{param[:name]}") or
+            new_params.has_key?(new_param_name = "#{step[:name]}/#{param[:name]}") or
+            new_params.has_key?(new_param_name = param[:name])
+          param_type = :value
+          %w(hash data_locator).collect(&:to_sym).each do |ptype|
+            param_type = ptype if param.has_key? ptype
+          end
+          param[param_type] = new_params[new_param_name]
+        end
+      end
+    end
+  end
+
+  def editable_attributes
+    %w(name description components)
+  end
+
+  def attribute_editable?(name, ever=nil)
+    if name.to_s == "components"
+      (ever or %w(New Ready).include?(state)) and super
+    else
+      super
+    end
+  end
+
+  def attributes_for_display
+    super.reject { |k,v| k == 'components' }
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def component_input_title(component_name, input_name)
+    component = components[component_name]
+    return nil if component.nil?
+    param_info = component[:script_parameters].andand[input_name.to_sym]
+    if param_info.is_a?(Hash) and param_info[:title]
+      param_info[:title]
+    else
+      "\"#{input_name.to_s}\" parameter for #{component[:script]} script in #{component_name} component"
+    end
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+end
diff --git a/apps/workbench/app/models/pipeline_template.rb b/apps/workbench/app/models/pipeline_template.rb
new file mode 100644 (file)
index 0000000..6e79775
--- /dev/null
@@ -0,0 +1,13 @@
+class PipelineTemplate < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+
+  def self.creatable?
+    false
+  end
+
+  def textile_attributes
+    [ 'description' ]
+  end
+end
diff --git a/apps/workbench/app/models/repository.rb b/apps/workbench/app/models/repository.rb
new file mode 100644 (file)
index 0000000..bed7edc
--- /dev/null
@@ -0,0 +1,8 @@
+class Repository < ArvadosBase
+  def self.creatable?
+    current_user and current_user.is_admin
+  end
+  def attributes_for_display
+    super.reject { |x| x[0] == 'fetch_url' }
+  end
+end
diff --git a/apps/workbench/app/models/specimen.rb b/apps/workbench/app/models/specimen.rb
new file mode 100644 (file)
index 0000000..7c611e4
--- /dev/null
@@ -0,0 +1,5 @@
+class Specimen < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+end
diff --git a/apps/workbench/app/models/trait.rb b/apps/workbench/app/models/trait.rb
new file mode 100644 (file)
index 0000000..e7a6ceb
--- /dev/null
@@ -0,0 +1,5 @@
+class Trait < ArvadosBase
+  def self.goes_in_projects?
+    true
+  end
+end
diff --git a/apps/workbench/app/models/user.rb b/apps/workbench/app/models/user.rb
new file mode 100644 (file)
index 0000000..7aaa4fe
--- /dev/null
@@ -0,0 +1,63 @@
+class User < ArvadosBase
+  def initialize(*args)
+    super(*args)
+    @attribute_sortkey['first_name'] = '050'
+    @attribute_sortkey['last_name'] = '051'
+  end
+
+  def self.current
+    res = arvados_api_client.api self, '/current'
+    arvados_api_client.unpack_api_response(res)
+  end
+
+  def self.system
+    @@arvados_system_user ||= begin
+                                res = arvados_api_client.api self, '/system'
+                                arvados_api_client.unpack_api_response(res)
+                              end
+  end
+
+  def full_name
+    (self.first_name || "") + " " + (self.last_name || "")
+  end
+
+  def activate
+    self.private_reload(arvados_api_client.api(self.class,
+                                               "/#{self.uuid}/activate",
+                                               {}))
+  end
+
+  def contents params={}
+    Group.contents params.merge(uuid: self.uuid)
+  end
+
+  def attributes_for_display
+    super.reject { |k,v| %w(owner_uuid default_owner_uuid identity_url prefs).index k }
+  end
+
+  def attribute_editable?(attr, ever=nil)
+    (ever or not (self.uuid.andand.match(/000000000000000$/) and
+                  self.is_admin)) and super
+  end
+
+  def friendly_link_name lookup=nil
+    [self.first_name, self.last_name].compact.join ' '
+  end
+
+  def unsetup
+    self.private_reload(arvados_api_client.api(self.class,
+                                               "/#{self.uuid}/unsetup",
+                                               {}))
+  end
+
+  def self.setup params
+    arvados_api_client.api(self, "/setup", params)
+  end
+
+  def update_profile params
+    self.private_reload(arvados_api_client.api(self.class,
+                                               "/#{self.uuid}/profile",
+                                               params))
+  end
+
+end
diff --git a/apps/workbench/app/models/user_agreement.rb b/apps/workbench/app/models/user_agreement.rb
new file mode 100644 (file)
index 0000000..d77038c
--- /dev/null
@@ -0,0 +1,10 @@
+class UserAgreement < ArvadosBase
+  def self.signatures
+    res = arvados_api_client.api self, '/signatures'
+    arvados_api_client.unpack_api_response(res)
+  end
+  def self.sign(params)
+    res = arvados_api_client.api self, '/sign', params
+    arvados_api_client.unpack_api_response(res)
+  end
+end
diff --git a/apps/workbench/app/models/virtual_machine.rb b/apps/workbench/app/models/virtual_machine.rb
new file mode 100644 (file)
index 0000000..3b44397
--- /dev/null
@@ -0,0 +1,21 @@
+class VirtualMachine < ArvadosBase
+  attr_accessor :current_user_logins
+  def self.creatable?
+    current_user.andand.is_admin
+  end
+  def attributes_for_display
+    super.append ['current_user_logins', @current_user_logins]
+  end
+  def editable_attributes
+    super - %w(current_user_logins)
+  end
+  def self.attribute_info
+    merger = ->(k,a,b) { a.merge(b, &merger) }
+    merger [nil,
+            {current_user_logins: {column_heading: "logins", type: 'array'}},
+            super]
+  end
+  def friendly_link_name lookup=nil
+    (hostname && !hostname.empty?) ? hostname : uuid
+  end
+end
diff --git a/apps/workbench/app/views/api_client_authorizations/_show_help.html.erb b/apps/workbench/app/views/api_client_authorizations/_show_help.html.erb
new file mode 100644 (file)
index 0000000..1481721
--- /dev/null
@@ -0,0 +1,14 @@
+<pre>
+### Pasting the following lines at a shell prompt will allow Arvados SDKs
+### to authenticate to your account, <%= current_user.email %>
+
+read ARVADOS_API_TOKEN &lt;&lt;EOF
+<%= Thread.current[:arvados_api_token] %>
+EOF
+export ARVADOS_API_TOKEN ARVADOS_API_HOST=<%= current_api_host %>
+<% if Rails.configuration.arvados_insecure_https %>
+export ARVADOS_API_HOST_INSECURE=true
+<% else %>
+unset ARVADOS_API_HOST_INSECURE
+<% end %>
+</pre>
diff --git a/apps/workbench/app/views/application/404.html.erb b/apps/workbench/app/views/application/404.html.erb
new file mode 100644 (file)
index 0000000..8a04822
--- /dev/null
@@ -0,0 +1,26 @@
+<%
+   if (controller.andand.action_name == 'show') and params[:uuid]
+     class_name = controller.model_class.to_s.underscore
+     class_name_h = class_name.humanize(capitalize: false)
+     req_item = safe_join([class_name_h, " with UUID ",
+                           raw("<code>"), params[:uuid], raw("</code>")], "")
+     req_item_plain_text = safe_join([class_name_h, " with UUID ", params[:uuid]])
+   else
+     req_item = "page you requested"
+     req_item_plain_text = "page you requested"
+   end
+%>
+
+<h2>Not Found</h2>
+
+<p>The <%= req_item %> was not found.
+
+<% if class_name %>
+Perhaps you'd like to
+<%= link_to("browse all #{class_name_h.pluralize}", action: :index, controller: class_name.tableize) %>?
+<% end %>
+
+</p>
+
+<% error_message = "The #{req_item_plain_text} was not found." %>
+<%= render :partial => "report_error", :locals => {error_message: error_message, error_type: '404'} %>
diff --git a/apps/workbench/app/views/application/404.json.erb b/apps/workbench/app/views/application/404.json.erb
new file mode 100644 (file)
index 0000000..8371ff9
--- /dev/null
@@ -0,0 +1 @@
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
diff --git a/apps/workbench/app/views/application/_arvados_attr_value.html.erb b/apps/workbench/app/views/application/_arvados_attr_value.html.erb
new file mode 100644 (file)
index 0000000..3df892f
--- /dev/null
@@ -0,0 +1,22 @@
+<% if attrvalue.is_a? Array and attrvalue.collect(&:class).uniq.compact == [String] %>
+  <% attrvalue.each do |message| %>
+    <%= message %><br />
+  <% end %>
+<% else %>
+      <% if attr and obj.attribute_editable?(attr) and (!defined?(editable) || editable) %>
+        <% if resource_class_for_uuid(attrvalue, {referring_object: obj, referring_attr: attr}) %>
+          <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: true, friendly_name: true} %>
+          <br>
+        <% end %>
+        <%= render_editable_attribute obj, attr %>
+      <% elsif attr == 'uuid' %>
+        <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: false, friendly_name: false} %>
+      <% else %>
+        <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: obj, with_class_name: true, friendly_name: true, thumbnail: true} %>
+      <% end %>
+      <!--
+      <% if resource_class_for_uuid(attrvalue, {referring_object: obj, referring_attr: attr}) %>
+        <%= link_to_if_arvados_object(attrvalue, { referring_object: obj, link_text: raw('<span class="glyphicon glyphicon-hand-right"></span>'), referring_attr: attr })  %>
+      <% end %>
+      -->
+<% end %>
diff --git a/apps/workbench/app/views/application/_arvados_object.html.erb b/apps/workbench/app/views/application/_arvados_object.html.erb
new file mode 100644 (file)
index 0000000..b4bf70d
--- /dev/null
@@ -0,0 +1,36 @@
+<% content_for :arvados_object_table do %>
+
+<% end %>
+
+<% if content_for? :page_content %>
+<%= yield :page_content %>
+<% else %>
+<%= yield :arvados_object_table %>
+<% end %>
+
+<div>
+  <ul class="nav nav-tabs">
+    <% if content_for? :page_content %>
+    <li><a href="#arvados-object-table" data-toggle="tab">Table</a></li>
+    <% end %>
+    <li class="active"><a href="#arvados-object-json" data-toggle="tab">API response JSON</a></li>
+    <% if @object.andand.uuid %>
+    <li><a href="#arvados-object-curl" data-toggle="tab">curl update example</a></li>
+    <li><a href="#arvados-object-arv" data-toggle="tab">&ldquo;arv&rdquo; CLI examples</a></li>
+    <li><a href="#arvados-object-python" data-toggle="tab">Python example</a></li>
+    <% end %>
+  </ul>
+
+  <div class="tab-content">
+    <% if content_for? :page_content %>
+    <div id="arvados-object-table" class="tab-pane fade">
+      <%= yield :arvados_object_table %>
+    </div>
+    <% end %>
+    <div id="arvados-object-json" class="tab-pane fade in active">
+
+    </div>
+
+
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_arvados_object_attr.html.erb b/apps/workbench/app/views/application/_arvados_object_attr.html.erb
new file mode 100644 (file)
index 0000000..b06dd8c
--- /dev/null
@@ -0,0 +1,17 @@
+<% object ||= @object %>
+<% if attrvalue.is_a? Hash then attrvalue.each do |infokey, infocontent| %>
+<tr class="info">
+  <td><%= attr %>[<%= infokey %>]</td>
+  <td>
+    <%= render partial: 'application/arvados_attr_value', locals: { obj: object, attr: nil, attrvalue: infocontent } %>
+  </td>
+</tr>
+<% end %>
+<% elsif attrvalue.is_a? String or attrvalue.respond_to? :to_s %>
+<tr class="<%= 'info' if %w(uuid owner_uuid created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at).include?(attr.to_s) %>">
+  <td><%= attr %></td>
+  <td>
+    <%= render partial: 'application/arvados_attr_value', locals: { obj: object, attr: attr, attrvalue: attrvalue } %>
+  </td>
+</tr>
+<% end %>
diff --git a/apps/workbench/app/views/application/_breadcrumb_page_name.html.erb b/apps/workbench/app/views/application/_breadcrumb_page_name.html.erb
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/apps/workbench/app/views/application/_breadcrumbs.html.erb b/apps/workbench/app/views/application/_breadcrumbs.html.erb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/views/application/_choose.html.erb b/apps/workbench/app/views/application/_choose.html.erb
new file mode 100644 (file)
index 0000000..4e1503b
--- /dev/null
@@ -0,0 +1,86 @@
+<div class="modal arv-choose modal-with-loading-spinner">
+  <div class="modal-dialog" style="width:80%">
+    <div class="modal-content">
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <h4 class="modal-title"><%= params[:title] || "Choose #{@objects.resource_class.andand.class_for_display}" %></h4>
+      </div>
+
+      <div class="modal-body">
+        <% project_filters, chooser_filters = (params[:filters] || []).partition do |attr, op, val|
+             attr == "owner_uuid" and op == "="
+           end %>
+        <div class="input-group">
+          <% if params[:by_project].to_s != "false" %>
+            <% if project_filters.empty?
+                 selected_project_name = 'All projects'
+               else
+                 val = project_filters.last.last
+                 if val == current_user.uuid
+                   selected_project_name = "Home"
+                 else
+                   selected_project_name = Group.find(val).name rescue val
+                 end
+               end
+               %>
+            <div class="input-group-btn" data-filterable-target=".modal.arv-choose .selectable-container">
+              <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">
+                <%= selected_project_name %> <span class="caret"></span>
+              </button>
+              <ul class="dropdown-menu" role="menu">
+                <li>
+                  <%= link_to '#', class: 'chooser-show-project' do %>
+                    All projects
+                  <% end %>
+                </li>
+                <li class="divider" />
+                <%= render partial: "projects_tree_menu", locals: {
+                      :project_link_to => Proc.new do |pnode, &block|
+                        link_to "#", {
+                          class: "chooser-show-project",
+                          data: {'project_uuid' => pnode[:object].uuid},
+                        }, &block
+                      end,
+                      :top_button => nil
+                    } %>
+              </ul>
+            </div>
+          <% end %>
+          <input type="text" value="<%=params[:preconfigured_search_str] || ''%>" class="form-control filterable-control focus-on-display" placeholder="Search" data-filterable-target=".modal.arv-choose .selectable-container"/>
+        </div>
+        <div style="height: 1em" />
+
+        <% preview_pane = (params[:preview_pane].to_s != "false")
+           pane_col_class = preview_pane ? "col-md-6" : "col-md-12" %>
+        <div class="row" style="height: 20em">
+          <div class="<%= pane_col_class %> arv-filterable-list selectable-container <%= 'multiple' if multiple %>"
+               style="height: 100%; overflow-y: scroll"
+               data-infinite-scroller="#choose-scroll"
+               id="choose-scroll"
+               data-infinite-content-params-from-chooser="<%= {filters: chooser_filters}.to_json %>"
+               <% if project_filters.any? %>
+                 data-infinite-content-params-from-project-dropdown="<%= {filters: project_filters, project_uuid: project_filters.last.last}.to_json %>"
+               <% end %>
+               <%
+                  action_data = JSON.parse params['action_data'] if params['action_data']
+                  use_preview_sel = action_data ? action_data['use_preview_selection'] : false
+                %>
+               data-infinite-content-href="<%= url_for partial: true,
+                                                       use_preview_selection: use_preview_sel %>">
+          </div>
+          <% if preview_pane %>
+            <div class="col-md-6 hidden-xs hidden-sm modal-dialog-preview-pane" style="height: 100%; overflow-y: scroll">
+            </div>
+          <% end %>
+        </div>
+
+        <div class="modal-footer">
+          <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Cancel</button>
+          <button class="btn btn-primary" aria-hidden="true" data-enable-if-selection disabled><%= raw(params[:action_name]) || 'Select' %></button>
+          <div class="modal-error hide" style="text-align: left; margin-top: 1em;">
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_choose.js.erb b/apps/workbench/app/views/application/_choose.js.erb
new file mode 100644 (file)
index 0000000..1722334
--- /dev/null
@@ -0,0 +1,27 @@
+<%
+=begin
+
+Parameters received from the caller/requestor of the modal are
+attached to the action button (.btn-primary) as follows:
+
+action_class -- string -- added as a pseudoclass to the action button.
+
+action_href -- string -- will be available at $(btn).attr('data-action-href')
+
+action_data -- json-encoded object -- will be at $(btn).data('action-data')
+
+action_data_form_params -- array -- for each X in this array, the
+value of params[X] during this "show chooser" request will be in
+$(btn).data('action-data-from-params')[X].
+
+=end
+%>
+
+$('body > .modal-container').html("<%= escape_javascript(render partial: 'choose.html', locals: {multiple: multiple}) %>");
+$('body > .modal-container .modal').modal('show');
+$('body > .modal-container .modal .modal-footer .btn-primary').
+    addClass('<%= j params[:action_class] %>').
+    attr('data-action-href', '<%= j params[:action_href] %>').
+    attr('data-method', '<%= j params[:action_method] %>').
+    data('action-data', <%= raw params[:action_data] %>).
+    data('action-data-from-params', <%= raw params.select { |k,v| k.in?(params[:action_data_from_params] || []) }.to_json %>);
diff --git a/apps/workbench/app/views/application/_content.html.erb b/apps/workbench/app/views/application/_content.html.erb
new file mode 100644 (file)
index 0000000..782a6af
--- /dev/null
@@ -0,0 +1,45 @@
+<% content_for :tab_panes do %>
+
+  <% comparable = controller.respond_to? :compare %>
+
+  <ul class="nav nav-tabs" data-tab-counts-url="<%= url_for(action: :tab_counts) rescue '' %>">
+    <% pane_list.each_with_index do |pane, i| %>
+      <% pane_name = (pane.is_a?(Hash) ? pane[:name] : pane) %>
+      <li class="<%= 'active' if i==0 %>">
+        <a href="#<%= pane_name %>"
+           id="<%= pane_name %>-tab"
+           data-toggle="tab"
+           data-tab-history=true
+           data-tab-history-update-url=true
+           >
+          <%= pane_name.gsub('_', ' ') %> <span id="<%= pane_name %>-count"></span>
+        </a>
+      </li>
+    <% end %>
+  </ul>
+
+  <div class="tab-content">
+    <% pane_list.each_with_index do |pane, i| %>
+      <% pane_name = (pane.is_a?(Hash) ? pane[:name] : pane) %>
+      <div id="<%= pane_name %>"
+           class="tab-pane fade <%= 'in active pane-loaded' if i==0 %> arv-log-event-listener arv-refresh-on-log-event arv-log-event-subscribe-to-pipeline-job-uuids"
+           <% if controller.action_name == "index" %>
+             data-object-kind="arvados#<%= ArvadosApiClient.class_kind controller.model_class %>"
+           <% else %>
+             data-object-uuid="<%= @object.uuid %>"
+           <% end %>
+           data-pane-content-url="<%= url_for(params.merge(tab_pane: pane_name)) %>"
+           style="margin-top:0.5em;"
+           >
+        <div class="pane-content">
+          <% if i == 0 %>
+            <%= render_pane pane_name, to_string: true %>
+          <% else %>
+            <div class="spinner spinner-32px spinner-h-center"></div>
+          <% end %>
+        </div>
+      </div>
+    <% end %>
+  </div>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_content_layout.html.erb b/apps/workbench/app/views/application/_content_layout.html.erb
new file mode 100644 (file)
index 0000000..ff597d9
--- /dev/null
@@ -0,0 +1,10 @@
+<div class="clearfix">
+  <%= content_for :content_top %>
+  <div class="pull-right">
+    <%= content_for :tab_line_buttons %>
+  </div>
+</div>
+
+<%= content_for :tab_panes %>
+
+<%= render :partial => 'loading_modal' %>
diff --git a/apps/workbench/app/views/application/_delete_object_button.html.erb b/apps/workbench/app/views/application/_delete_object_button.html.erb
new file mode 100644 (file)
index 0000000..6d6383e
--- /dev/null
@@ -0,0 +1,5 @@
+<% if object.editable? %>
+  <%= link_to({action: 'destroy', id: object.uuid}, method: :delete, remote: true, data: {confirm: "Really delete #{object.class_for_display.downcase} '#{object.friendly_link_name}'?"}) do %>
+    <i class="glyphicon glyphicon-trash"></i>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_index.html.erb b/apps/workbench/app/views/application/_index.html.erb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/views/application/_job_progress.html.erb b/apps/workbench/app/views/application/_job_progress.html.erb
new file mode 100644 (file)
index 0000000..efe1819
--- /dev/null
@@ -0,0 +1,51 @@
+<% if (j.andand[:state] == "Running" or defined? scaleby) and (not defined? show_progress_bar or show_progress_bar) %>
+  <%
+    failed = j[:tasks_summary][:failed] || 0 rescue 0
+    done = j[:tasks_summary][:done] || 0 rescue 0
+    running = j[:tasks_summary][:running] || 0 rescue 0
+    todo = j[:tasks_summary][:todo] || 0 rescue 0
+
+    if done + running + failed + todo == 0
+      # No tasks were ever created for this job;
+      # render an empty progress bar.
+      done_percent = 0
+    else
+      percent_total_tasks = 100.0 / (done + running + failed + todo)
+      if defined? scaleby
+        percent_total_tasks *= scaleby
+      end
+      done_percent = (done+failed) * percent_total_tasks
+    end
+    %>
+
+  <% if not defined? scaleby %>
+    <div class="progress" style="margin-bottom: 0px">
+  <% end %>
+
+  <span class="progress-bar <%= if failed == 0 then 'progress-bar-success' else 'progress-bar-warning' end %>" style="width: <%= done_percent %>%;">
+  </span>
+
+  <% if not defined? scaleby %>
+  </div>
+  <% end %>
+
+<% else %>
+
+<% to_label = {
+     "Cancelled" => "danger",
+     "Complete" => "success",
+     "Running" => "info",
+     "Failed" => "danger",
+     "Queued" => "default",
+     nil => "default"
+   } %>
+
+  <span class="label label-<%= to_label[j.andand[:state]] %>">
+    <%= if defined? title
+          title
+        else
+          if j.andand[:state] then j[:state].downcase else "Not ready" end
+        end
+        %></span>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_loading.html.erb b/apps/workbench/app/views/application/_loading.html.erb
new file mode 100644 (file)
index 0000000..870abaf
--- /dev/null
@@ -0,0 +1,190 @@
+<div class="socket">
+  <div class="gel center-gel">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c1 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c2 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c3 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c4 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c5 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c6 r1">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  
+  <div class="gel c7 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  
+  <div class="gel c8 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c9 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c10 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c11 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c12 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c13 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c14 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c15 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c16 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c17 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c18 r2">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c19 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c20 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c21 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c22 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c23 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c24 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c25 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c26 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c28 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c29 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c30 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c31 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c32 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c33 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c34 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c35 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c36 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  <div class="gel c37 r3">
+    <div class="hex-brick h1"></div>
+    <div class="hex-brick h2"></div>
+    <div class="hex-brick h3"></div>
+  </div>
+  
+</div>
diff --git a/apps/workbench/app/views/application/_loading_modal.html.erb b/apps/workbench/app/views/application/_loading_modal.html.erb
new file mode 100644 (file)
index 0000000..b8f6888
--- /dev/null
@@ -0,0 +1,12 @@
+<div id="loading-modal" class="modal fade">
+  <div class="modal-dialog">
+       <div class="modal-content">
+         <div class="modal-header">
+           <h3>Refreshing...</h3>
+         </div>
+         <div class="modal-body">
+           <p>Content may have changed.</p>
+         </div>
+       </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_name_and_description.html.erb b/apps/workbench/app/views/application/_name_and_description.html.erb
new file mode 100644 (file)
index 0000000..0144a4d
--- /dev/null
@@ -0,0 +1,12 @@
+<% if @object.respond_to? :name %>
+  <h2>
+    <%= render_editable_attribute @object, 'name', nil, { 'data-emptytext' => "New #{controller.model_class.to_s.underscore.gsub("_"," ")}" } %>
+  </h2>
+<% end %>
+
+<% if @object.respond_to? :description %>
+  <div class="arv-description-as-subtitle">
+    <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual' } %>
+  </div>
+<% end %>
+
diff --git a/apps/workbench/app/views/application/_paging.html.erb b/apps/workbench/app/views/application/_paging.html.erb
new file mode 100644 (file)
index 0000000..9c64c2b
--- /dev/null
@@ -0,0 +1,128 @@
+<% content_for :css do %>
+.index-paging {
+text-align: center;
+padding-left: 1em;
+padding-right: 1em;
+background-color: whitesmoke;
+}
+.paging-number {
+display: inline-block;
+min-width: 1.2em;
+}
+<% end %>
+
+<% results.fetch_multiple_pages(false) %>
+
+<% if results.respond_to? :result_offset and
+       results.respond_to? :result_limit and
+       results.respond_to? :items_available and
+       results.result_offset != nil and
+       results.result_limit != nil and
+       results.items_available != nil
+%>
+<div class="index-paging">
+  Displaying <%= results.result_offset+1 %> &ndash;
+  <%= if results.result_offset + results.result_limit > results.items_available
+        results.items_available
+      else
+        results.result_offset + results.result_limit
+      end %>
+ out of <%= results.items_available %>
+</div>
+
+<% if not (results.result_offset == 0 and results.items_available <= results.result_limit) %>
+
+<div class="index-paging">
+
+<% if results.result_offset > 0 %>
+  <% if results.result_offset > results.result_limit %>
+    <% prev_offset = results.result_offset - results.result_limit %>
+  <% else %>
+    <% prev_offset = 0 %>
+  <% end %>
+<% else %>
+  <% prev_offset = nil %>
+<% end %>
+
+<% this_offset = results.result_offset %>
+
+<% if (results.result_offset + results.result_limit) < results.items_available %>
+  <% next_offset = results.result_offset + results.result_limit %>
+<% else %>
+  <% next_offset = nil %>
+<% end %>
+
+<span class="pull-left">
+<% if results.result_offset > 0 %>
+  <%= link_to raw("<span class='glyphicon glyphicon-fast-backward'></span>"), {:id => object, :offset => 0, :limit => results.result_limit}  %>
+<% else %>
+  <span class='glyphicon glyphicon-fast-backward text-muted'></span>
+<% end %>
+
+<% if prev_offset %>
+  <%= link_to raw("<span class='glyphicon glyphicon-step-backward'></span>"), {:id => object, :offset => prev_offset, :limit => results.result_limit}  %>
+<% else %>
+<span class='glyphicon glyphicon-step-backward text-muted'></span>
+<% end %>
+</span>
+
+<% first = this_offset - (10 * results.result_limit) %>
+<% last = this_offset + (11 * results.result_limit) %>
+
+<% lastpage_offset = (results.items_available / results.result_limit) * results.result_limit %>
+
+<% if last > results.items_available %>
+  <% first -= (last - lastpage_offset) %>
+  <% last -= (last - results.items_available) %>
+<% end %>
+
+<% if first < 0 %>
+  <% d = -first %>
+  <% first += d %>
+  <% last += d %>
+<% end %>
+
+<% last = results.items_available if last > results.items_available %>
+
+<% i = first %>
+<% n = first / results.result_limit %>
+
+<% if first > 0 %>
+&hellip;
+<% end %>
+
+<% while i < last %>
+<% if i != this_offset %>
+  <%= link_to "#{n+1}", {:id => @object, :offset => i, :limit => results.result_limit}, class: 'paging-number' %>
+<% else %>
+  <span class="paging-number" style="font-weight: bold;"><%= n+1 %></span>
+<% end %>
+<% i += results.result_limit %>
+<% n += 1 %>
+<% end %>
+
+<% if last < results.items_available %>
+&hellip;
+<% end %>
+
+<span class="pull-right">
+<% if next_offset %>
+  <%= link_to raw("<span class='glyphicon glyphicon-step-forward'></span>"), {:id => @object, :offset => next_offset, :limit => results.result_limit}  %>
+<% else %>
+<span class='glyphicon glyphicon-forward text-muted'></span>
+<% end %>
+
+<% if (results.items_available - results.result_offset) >= results.result_limit %>
+  <%= link_to raw("<span class='glyphicon glyphicon-fast-forward'></span>"), {:id => @object, :offset => results.items_available - (results.items_available % results.result_limit),
+        :limit => results.result_limit}  %>
+<% else %>
+  <span class='glyphicon glyphicon-fast-forward text-muted'></span>
+<% end %>
+
+</span>
+
+</div>
+
+<% end %>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_pipeline_progress.html.erb b/apps/workbench/app/views/application/_pipeline_progress.html.erb
new file mode 100644 (file)
index 0000000..2ae03a0
--- /dev/null
@@ -0,0 +1,8 @@
+<% component_frac = 1.0 / p.components.length %>
+<div class="progress">
+  <% p.components.each do |k,c| %>
+    <% if c.is_a?(Hash) and c[:job] %>
+      <%= render partial: "job_progress", locals: {:j => c[:job], :scaleby => component_frac } %>
+    <% end %>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/application/_pipeline_status_label.html.erb b/apps/workbench/app/views/application/_pipeline_status_label.html.erb
new file mode 100644 (file)
index 0000000..8872272
--- /dev/null
@@ -0,0 +1,15 @@
+<% if p.state == 'Complete' %>
+  <span class="label label-success">complete</span>
+<% elsif p.state == 'Failed' %>
+  <span class="label label-danger">failed</span>
+<% elsif p.state == 'RunningOnServer' || p.state == 'RunningOnClient' %>
+  <span class="label label-info">running</span>
+<% elsif p.state == 'Paused'  %>
+  <span class="label label-default">paused</span>
+<% else %>
+  <% if not p.components.values.any? { |c| c[:job] rescue false } %>
+    <span class="label label-default">not started</span>
+  <% else %>
+    <span class="label label-default">not running</span>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_projects_tree_menu.html.erb b/apps/workbench/app/views/application/_projects_tree_menu.html.erb
new file mode 100644 (file)
index 0000000..4de3c23
--- /dev/null
@@ -0,0 +1,25 @@
+<li>
+  <%= project_link_to.call({object: current_user, depth: 0}) do %>
+    <span style="padding-left: 0">Home</span>
+  <% end %>
+</li>
+<% my_project_tree.each do |pnode| %>
+  <% next if pnode[:object].class != Group %>
+  <li>
+    <%= project_link_to.call pnode do %>
+      <span style="padding-left: <%= pnode[:depth] %>em"></span><%= pnode[:object].name %>
+    <% end %>
+  </li>
+<% end %>
+<li class="divider" />
+<li role="presentation" class="dropdown-header">
+  Projects shared with me
+</li>
+<% shared_project_tree.each do |pnode| %>
+  <% next if pnode[:object].class != Group %>
+  <li>
+    <%= project_link_to.call pnode do %>
+      <span style="padding-left: <%= pnode[:depth]-1 %>em"></span><i class="fa fa-fw fa-share-alt" style="color:#aaa"></i> <%= pnode[:object].name %>
+    <% end %>
+  </li>
+<% end %>
diff --git a/apps/workbench/app/views/application/_report_error.html.erb b/apps/workbench/app/views/application/_report_error.html.erb
new file mode 100644 (file)
index 0000000..2e449b7
--- /dev/null
@@ -0,0 +1,21 @@
+<p>
+<br/><strong>If you suspect this is a bug, you can help us fix it by sending us a problem report:</strong><br/><br/>
+<% if error_type == 'api' %>
+  <%
+    api_request_url = api_error.andand.request_url ? api_error.request_url : ''
+    api_error_response = api_error.andand.api_response ? api_error.api_response : ''
+  %>
+  Send a problem report right here. <%= link_to report_issue_popup_path(popup_type: 'report', current_location: request.url, current_path: request.fullpath, action_method: 'post', api_error_request_url: api_request_url, api_error_response: api_error_response),
+        {class: 'btn btn-primary report-issue-modal-window', :remote => true, return_to: request.url} do %>
+        <i class="fa fa-fw fa-support"></i> Report problem
+  <% end %>
+<% else %>
+  Send a problem report right here. <%= link_to report_issue_popup_path(popup_type: 'report', current_location: request.url, current_path: request.fullpath, action_method: 'post', error_message: error_message),
+        {class: 'btn btn-primary report-issue-modal-window', :remote => true, return_to: request.url} do %>
+        <i class="fa fa-fw fa-support"></i> Report problem
+  <% end %>
+<% end %>
+<% support_email = Rails.configuration.support_email_address%>
+<br/><br/>
+  If you prefer, send email to: <a href="mailto:<%=support_email%>?subject=Workbench problem report&amp;body=Problem while viewing page <%=request.url%>"><%=support_email%></a>
+</p>
diff --git a/apps/workbench/app/views/application/_report_issue_popup.html.erb b/apps/workbench/app/views/application/_report_issue_popup.html.erb
new file mode 100644 (file)
index 0000000..315116b
--- /dev/null
@@ -0,0 +1,147 @@
+<%
+  generated_at = arvados_api_client.discovery[:generatedAt]
+  arvados_base = Rails.configuration.arvados_v1_base
+  support_email = Rails.configuration.support_email_address
+
+  api_version_link = link_to api_version_text, version_link_target(api_version)
+  wb_version_link = link_to wb_version_text, version_link_target(wb_version)
+
+  additional_info = {}
+  additional_info['Current location'] = params[:current_location]
+  additional_info['User UUID'] = current_user.uuid if current_user
+
+  additional_info_str = additional_info.map {|k,v| "#{k}=#{v}"}.join("\n")
+
+  additional_info['api_version'] = api_version_text
+  additional_info['generated_at'] = generated_at
+  additional_info['workbench_version'] = wb_version_text
+  additional_info['arvados_base'] = arvados_base
+  additional_info['support_email'] = support_email
+  additional_info['error_message'] = params[:error_message] if params[:error_message]
+  additional_info['api_error_request_url'] = params[:api_error_request_url] if params[:api_error_request_url]
+  additional_info['api_error_response'] = params[:api_error_response] if params[:api_error_response]
+%>
+
+<div class="modal">
+ <div class="modal-dialog modal-with-loading-spinner">
+  <div class="modal-content">
+
+    <%= form_tag report_issue_path, {name: 'report-issue-form', method: 'post',
+        class: 'form-horizontal', remote: true} do %>
+
+      <%
+        title = 'Version / debugging info'
+        title = 'Report a problem' if params[:popup_type] == 'report'
+      %>
+
+      <div class="modal-header">
+        <button type="button" class="close" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-8"> <h4 class="modal-title"><%=title%></h4> </div>
+          <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body" style="height: 25em; overflow-y: scroll">
+        <div class="form-group">
+          <label for="support_email" class="col-sm-4 control-label"> Support email </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="support_version"><a href="mailto:<%=support_email%>?subject=Workbench problem report&amp;body=Problem while viewing page <%=params[:current_location]%>"><%=support_email%></a></p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="current_page" class="col-sm-4 control-label"> Current page </label>
+          <div class="col-sm-8">
+            <p class="form-control-static text-overflow-ellipsis" name="current_page"><%=params[:current_path]%></a></p>
+          </div>
+        </div>
+
+        <% if params[:popup_type] == 'report' %>
+          <div class="form-group">
+            <label for="report_text_label" class="col-sm-4 control-label"> Describe the problem </label>
+            <div class="col-sm-8">
+              <textarea class="form-control" rows="4" id="report_issue_text" name="report_issue_text" type="text" placeholder="Describe the problem"/>
+            </div>
+            <input type="hidden" name="report_additional_info" value="<%=additional_info.to_json%>">
+          </div>
+        <% end %>
+
+        <div class="form-group">
+          <label for="wb_version" class="col-sm-4 control-label"> Workbench version </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="wb_version"><%= wb_version_link %></p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="server_version" class="col-sm-4 control-label"> API version </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="server_version"><%= api_version_link %></p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="generated_at" class="col-sm-4 control-label"> API startup time </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="generated_at"><%=generated_at%></p>
+          </div>
+        </div>
+
+        <div class="form-group">
+          <label for="arvados_base" class="col-sm-4 control-label"> API address </label>
+          <div class="col-sm-8">
+            <p class="form-control-static" name="arvados_base"><%=arvados_base%></p>
+          </div>
+        </div>
+
+        <% if current_user %>
+          <div class="form-group">
+            <label for="user_uuid" class="col-sm-4 control-label"> User UUID </label>
+            <div class="col-sm-8">
+              <p class="form-control-static" name="user_uuid"><%=current_user.uuid%></p>
+            </div>
+          </div>
+        <% end %>
+
+        <% if params[:error_message] %>
+          <div class="form-group">
+            <label for="error_message" class="col-sm-4 control-label"> Error message </label>
+            <div class="col-sm-8">
+              <p class="form-control-static text-overflow-ellipsis" name="error_message"><%=params[:error_message]%></p>
+            </div>
+          </div>
+        <% end %>
+
+        <% if params[:api_error_request_url] %>
+          <div class="form-group">
+            <label for="api_error_url" class="col-sm-4 control-label"> API error request URL </label>
+            <div class="col-sm-8">
+              <p class="form-control-static text-overflow-ellipsis" name="api_error_url"><%=params[:api_error_request_url]%></p>
+            </div>
+          </div>
+        <% end %>
+
+        <% if params[:api_error_response] %>
+          <div class="form-group">
+            <label for="api_error_response" class="col-sm-4 control-label"> API error response </label>
+            <div class="col-sm-8">
+              <p class="form-control-static text-overflow-ellipsis" name="api_error_response"><%=params[:api_error_response]%></p>
+            </div>
+          </div>
+        <% end %>
+      </div>
+
+      <div class="modal-footer">
+        <% if params[:popup_type] == 'report' %>
+          <button class="btn btn-default report-issue-cancel" id="report-issue-cancel" data-dismiss="modal" aria-hidden="true">Cancel</button>
+          <button type="submit" id="report-issue-submit" class="btn btn-primary report-issue-submit" autofocus>Send problem report</button>
+        <% else %>
+          <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Close</button>
+        <% end %>
+      </div>
+    <% end #form %>
+  </div>
+ </div>
+</div>
diff --git a/apps/workbench/app/views/application/_selection_checkbox.html.erb b/apps/workbench/app/views/application/_selection_checkbox.html.erb
new file mode 100644 (file)
index 0000000..a234e9f
--- /dev/null
@@ -0,0 +1,19 @@
+<%if object and object.uuid and (object.class.goes_in_projects? or (object.is_a?(Link) and ArvadosBase::resource_class_for_uuid(object.head_uuid).to_s == 'Collection')) %>
+  <% fn = if defined? friendly_name and not friendly_name.nil?
+            friendly_name
+          else
+            link_to_if_arvados_object object, {no_link: true}
+          end
+     %>
+  <% # This 'fn' string may contain embedded HTML which is already marked html_safe.
+     # Since we are putting it into a tag attribute, we need to copy into an
+     # unsafe string so that rails will escape it for us.
+     fn = String.new fn %>
+<%= check_box_tag 'uuids[]', object.uuid, false, {
+      :class => 'persistent-selection',
+      :friendly_type => object.class.name,
+      :friendly_name => fn,
+      :href => "#{url_for controller: object.class.name.tableize, action: 'show', id: object.uuid }",
+      :title => "Click to add this item to your selection list"
+} %>
+<% end %>
diff --git a/apps/workbench/app/views/application/_show_advanced.html.erb b/apps/workbench/app/views/application/_show_advanced.html.erb
new file mode 100644 (file)
index 0000000..70dd96b
--- /dev/null
@@ -0,0 +1,23 @@
+<div class="panel-group" id="arv-adv-accordion">
+  <% ['API response',
+      'Metadata',
+      'Python example',
+      'CLI example',
+      'curl example'].each do |section| %>
+    <% section_id = section.gsub(" ","_").downcase %>
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a data-toggle="collapse" data-parent="#arv-adv-accordion" href="#advanced_<%=section_id%>">
+            <%= section %>
+          </a>
+        </h4>
+      </div>
+      <div id="advanced_<%=section_id%>" class="panel-collapse collapse <%#= 'in' if section == 'API response'%>">
+        <div class="panel-body">
+          <%= render partial: "show_advanced_#{section_id}", locals: {object: @object} %>
+        </div>
+      </div>
+    </div>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/application/_show_advanced_api_response.html.erb b/apps/workbench/app/views/application/_show_advanced_api_response.html.erb
new file mode 100644 (file)
index 0000000..9ef124a
--- /dev/null
@@ -0,0 +1,3 @@
+<pre>
+<%= JSON.pretty_generate(object.attributes.reject { |k,v| k == 'id' }) rescue nil %>
+</pre>
diff --git a/apps/workbench/app/views/application/_show_advanced_cli_example.html.erb b/apps/workbench/app/views/application/_show_advanced_cli_example.html.erb
new file mode 100644 (file)
index 0000000..cb29b66
--- /dev/null
@@ -0,0 +1,12 @@
+An example arv command to get a <%= object.class.to_s.underscore %> using its uuid:
+<pre>
+arv <%= object.class.to_s.underscore %> get \
+ --uuid <%= object.uuid %>
+</pre>
+
+An example arv command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
+<pre>
+arv <%= object.class.to_s.underscore %> update \
+ --uuid <%= object.uuid %> \
+ --<%= object.class.to_s.underscore.gsub '_', '-' %> '<%= JSON.generate({object.attributes.keys[-3] => object.attributes.values[-3]}).gsub("'","'\''") %>'
+</pre>
diff --git a/apps/workbench/app/views/application/_show_advanced_curl_example.html.erb b/apps/workbench/app/views/application/_show_advanced_curl_example.html.erb
new file mode 100644 (file)
index 0000000..d6b9834
--- /dev/null
@@ -0,0 +1,10 @@
+An example curl command to update the "<%= object.attributes.keys[-3] %>" attribute for the current <%= object.class.to_s.underscore %>:
+<pre>
+curl -X PUT \
+ -H "Authorization: OAuth2 $ARVADOS_API_TOKEN" \
+ --data-urlencode <%= object.class.to_s.underscore %>@/dev/stdin \
+ https://$ARVADOS_API_HOST/arvados/v1/<%= object.class.to_s.pluralize.underscore %>/<%= object.uuid %> \
+ &lt;&lt;EOF
+<%= JSON.pretty_generate({object.attributes.keys[-3] => object.attributes.values[-3]}) %>
+EOF
+</pre>
diff --git a/apps/workbench/app/views/application/_show_advanced_metadata.html.erb b/apps/workbench/app/views/application/_show_advanced_metadata.html.erb
new file mode 100644 (file)
index 0000000..c036b36
--- /dev/null
@@ -0,0 +1,56 @@
+<% outgoing = Link.where(tail_uuid: @object.uuid) %>
+<% incoming = Link.where(head_uuid: @object.uuid) %>
+
+<%
+  preload_uuids = []
+  preload_head_uuids = []
+  outgoing.results.each do |link|
+    preload_uuids << link.uuid
+    preload_uuids << link.head_uuid
+    preload_head_uuids << link.head_uuid
+  end
+  preload_collections_for_objects preload_uuids
+  preload_links_for_objects preload_head_uuids
+%>
+
+<% if (outgoing | incoming).any? %>
+<table class="table topalign">
+  <colgroup>
+    <col width="20%" />
+    <col width="10%" />
+    <col width="10%" />
+    <col width="20%" />
+    <col width="20%" />
+    <col width="20%" />
+  </colgroup>
+  <thead>
+    <tr>
+      <th></th>
+      <th>link_class</th>
+      <th>name</th>
+      <th>tail</th>
+      <th>head</th>
+      <th>properties</th>
+    </tr>
+  </thead>
+  <tbody>
+    <% (outgoing | incoming).each do |link| %>
+      <tr>
+        <td>
+          <%= render partial: 'show_object_button', locals: { object: link, size: 'xs' } %>
+          <span class="arvados-uuid"><%= link.uuid %></span>
+        </td>
+        <td><%= link.link_class %></td>
+        <td><%= link.name %></td>
+        <td><%= link.tail_uuid == object.uuid ? 'this' : (render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "tail_uuid", attrvalue: link.tail_uuid, editable: false }) %></td>
+        <td><%= link.head_uuid == object.uuid ? 'this' : (render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "head_uuid", attrvalue: link.head_uuid, editable: false }) %></td>
+        <td><%= render partial: 'application/arvados_attr_value', locals: { obj: link, attr: "properties", attrvalue: link.properties, editable: false } %></td>
+      </tr>
+    <% end %>
+  </tbody>
+</table>
+<% else %>
+<span class="deemphasize">
+  (No metadata links found)
+</span>
+<% end %>
diff --git a/apps/workbench/app/views/application/_show_advanced_python_example.html.erb b/apps/workbench/app/views/application/_show_advanced_python_example.html.erb
new file mode 100644 (file)
index 0000000..f7cb7a1
--- /dev/null
@@ -0,0 +1,6 @@
+An example python command to get a <%= object.class.to_s.underscore %> using its uuid:
+<pre>
+import arvados
+
+x = arvados.api().<%= object.class.to_s.pluralize.underscore %>().get(uuid='<%= object.uuid %>').execute()
+</pre>
diff --git a/apps/workbench/app/views/application/_show_api.html.erb b/apps/workbench/app/views/application/_show_api.html.erb
new file mode 100644 (file)
index 0000000..759db9e
--- /dev/null
@@ -0,0 +1,42 @@
+<% if @object.andand.uuid %>
+
+<div class="panel panel-default">
+  <div class="panel-heading">curl</div>
+  <div class="panel-body">
+  <pre>
+curl -X PUT \
+ -H "Authorization: OAuth2 $ARVADOS_API_TOKEN" \
+ --data-urlencode <%= @object.class.to_s.underscore %>@/dev/stdin \
+ https://$ARVADOS_API_HOST/arvados/v1/<%= @object.class.to_s.pluralize.underscore %>/<%= @object.uuid %> \
+ &lt;&lt;EOF
+<%= JSON.pretty_generate({@object.attributes.keys[-3] => @object.attributes.values[-3]}) %>
+EOF
+  </pre>
+  </div>
+</div>
+
+<div class="panel panel-default">
+  <div class="panel-heading"><b>arv</b> command line tool</div>
+  <div class="panel-body">
+  <pre>
+arv <%= @object.class.to_s.underscore %> get \
+ --uuid <%= @object.uuid %>
+
+arv <%= @object.class.to_s.underscore %> update \
+ --uuid <%= @object.uuid %> \
+ --<%= @object.class.to_s.underscore.gsub '_', '-' %> '<%= JSON.generate({@object.attributes.keys[-3] => @object.attributes.values[-3]}).gsub("'","'\''") %>'
+      </pre>
+  </div>
+</div>
+
+<div class="panel panel-default">
+  <div class="panel-heading"><b>Python</b> SDK</div>
+  <div class="panel-body">
+    <pre>
+import arvados
+
+x = arvados.api().<%= @object.class.to_s.pluralize.underscore %>().get(uuid='<%= @object.uuid %>').execute()
+      </pre>
+<% end %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_show_attributes.html.erb b/apps/workbench/app/views/application/_show_attributes.html.erb
new file mode 100644 (file)
index 0000000..965ec5a
--- /dev/null
@@ -0,0 +1,13 @@
+<%= form_for @object do |f| %>
+<table class="table topalign">
+  <thead>
+  </thead>
+  <tbody>
+    <% @object.attributes_for_display.each do |attr, attrvalue| %>
+    <%= render partial: 'application/arvados_object_attr', locals: { attr: attr, attrvalue: attrvalue } %>
+    <% end %>
+  </tbody>
+</table>
+
+<% end %>
+
diff --git a/apps/workbench/app/views/application/_show_object_button.html.erb b/apps/workbench/app/views/application/_show_object_button.html.erb
new file mode 100644 (file)
index 0000000..03a5325
--- /dev/null
@@ -0,0 +1,15 @@
+<% htmloptions = {class: ''}.merge(htmloptions || {})
+   htmloptions[:class] += " btn-#{size}" rescue nil
+   link_text = 'Show' unless defined?(link_text) and link_text
+ %>
+<%= link_to_if_arvados_object object, {
+      link_text: raw('<i class="fa fa-fw ' + fa_icon_class_for_object(object) + '"></i> ' + link_text),
+      name_link: (defined?(name_link) && name_link && name_link.uuid) ? name_link : nil
+    }, {
+      data: {
+        toggle: 'tooltip',
+        placement: 'top'
+      },
+      title: 'show ' + object.class_for_display.downcase,
+      class: 'btn btn-default ' + htmloptions[:class],
+    } %>
diff --git a/apps/workbench/app/views/application/_show_object_description_cell.html.erb b/apps/workbench/app/views/application/_show_object_description_cell.html.erb
new file mode 100644 (file)
index 0000000..7ad3f0e
--- /dev/null
@@ -0,0 +1,2 @@
+<%= object.content_summary %>
+
diff --git a/apps/workbench/app/views/application/_show_recent.html.erb b/apps/workbench/app/views/application/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..57a5b74
--- /dev/null
@@ -0,0 +1,71 @@
+<% if objects.empty? %>
+<br/>
+<p style="text-align: center">
+  No <%= controller.controller_name.humanize.downcase %> to display.
+</p>
+
+<% else %>
+
+<% attr_blacklist = ' created_at modified_at modified_by_user_uuid modified_by_client_uuid updated_at owner_uuid group_class properties' %>
+
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
+<%= form_tag do |f| %>
+
+<table class="table table-condensed arv-index">
+  <thead>
+    <tr>
+      <% if objects.first and objects.first.class.goes_in_projects? %>
+        <th></th>
+      <% end %>
+      <th></th>
+      <% objects.first.attributes_for_display.each do |attr, attrvalue| %>
+      <% next if attr_blacklist.index(" "+attr) %>
+      <th class="arv-attr-<%= attr %>">
+        <%= controller.model_class.attribute_info[attr.to_sym].andand[:column_heading] or attr.sub /_uuid/, '' %>
+      </th>
+      <% end %>
+      <th>
+        <!-- a column for delete buttons -->
+      </th>
+    </tr>
+  </thead>
+
+  <tbody>
+    <% objects.each do |object| %>
+    <tr data-object-uuid="<%= object.uuid %>">
+      <% if objects.first.class.goes_in_projects? %>
+        <td>
+          <%= render :partial => "selection_checkbox", :locals => {:object => object} %>
+        </td>
+      <% end %>
+      <td>
+        <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
+      </td>
+
+      <% object.attributes_for_display.each do |attr, attrvalue| %>
+      <% next if attr_blacklist.index(" "+attr) %>
+      <td class="arv-object-<%= object.class.to_s %> arv-attr-<%= attr %>">
+        <% if attr == 'uuid' %>
+          <span class="arvados-uuid"><%= attrvalue %></span>
+        <% else %>
+          <%= link_to_if_arvados_object attrvalue, {referring_attr: attr, referring_object: object, with_class_name: true, friendly_name: true} %>
+        <% end %>
+      </td>
+      <% end %>
+      <td>
+        <%= render partial: 'delete_object_button', locals: {object:object} %>
+      </td>
+    </tr>
+    <% end %>
+  </tbody>
+
+  <tfoot>
+  </tfoot>
+</table>
+
+<% end %>
+
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
+<% end %>
diff --git a/apps/workbench/app/views/application/_svg_div.html.erb b/apps/workbench/app/views/application/_svg_div.html.erb
new file mode 100644 (file)
index 0000000..76bedba
--- /dev/null
@@ -0,0 +1,37 @@
+<%= content_for :css do %>
+/* Need separate style for each instance of svg div because javascript will manipulate the properties. */
+#<%= divId %> {
+ padding-left: 3px;
+ overflow: auto;
+ border: solid;
+ border-width: 1px;
+ border-color: gray;
+ position: absolute;
+ left: 25px;
+ right: 25px;
+}
+path:hover {
+stroke-width: 5;
+}
+path {
+stroke-linecap: round;
+}
+<% end %>
+
+<%= content_for :js do %>
+    $(window).on('load', function() {
+      $(window).on('load resize scroll', function () { graph_zoom("<%= divId %>","<%=svgId %>", 1) } );
+    });
+<% end %>
+
+<div id="_<%= divId %>_container">
+  <div style="text-align: right">
+    <a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-out" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', .9)"></span></a>
+    <a style="cursor: pointer"><span class="glyphicon glyphicon-zoom-in" onclick="graph_zoom('<%= divId %>', '<%= svgId %>', 1./.9)"></span></a>
+  </div>
+
+  <div id="<%= divId %>" class="smart-scroll">
+    <span id="_<%= divId %>_center" style="padding-left: 0px"></span>
+    <%= raw(svg) %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/application/_tab_line_buttons.html.erb b/apps/workbench/app/views/application/_tab_line_buttons.html.erb
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/app/views/application/_title_and_buttons.html.erb b/apps/workbench/app/views/application/_title_and_buttons.html.erb
new file mode 100644 (file)
index 0000000..4a40510
--- /dev/null
@@ -0,0 +1,66 @@
+<% object_class = @object.class_for_display.downcase %>
+<% content_for :page_title do %>
+  <%= (@object.respond_to?(:properties) and !@object.properties.nil? ? @object.properties[:page_title] : nil) ||
+      @name_link.andand.name ||
+      @object.friendly_link_name %>
+<% end %>
+
+<% content_for :content_top do %>
+  <% if !['Group','User', 'Collection'].include? @object.class.to_s # projects and collections handle it themselves %>
+    <%= render partial: 'name_and_description' %>
+  <% end %>
+<% end %>
+
+<% if @object.class.goes_in_projects? && @object.uuid != current_user.uuid # Not the "Home" project %>
+  <% content_for :tab_line_buttons do %>
+    <% if @object.class.copies_to_projects? %>
+      <%= link_to(
+          choose_projects_path(
+           title: "Copy this #{object_class} to:",
+           action_name: 'Copy',
+           action_href: actions_path,
+           action_method: 'post',
+           action_data: {
+             copy_selections_into_project: true,
+             selection: @name_link.andand.uuid || @object.uuid,
+             selection_param: 'uuid',
+             success: 'redirect-to-created-object'
+           }.to_json),
+          { class: "btn btn-sm btn-primary", remote: true, method: 'get',
+            title: "Make a copy this #{object_class}" }) do %>
+        <i class="fa fa-fw fa-copy"></i> Copy to project...
+      <% end %>
+    <% end %>
+    <% if (ArvadosBase.find(@object.owner_uuid).writable_by.include?(current_user.uuid) rescue nil) %>
+      <%= link_to(
+          choose_projects_path(
+           title: "Move this #{object_class} to:",
+           action_name: 'Move',
+           action_href: actions_path,
+           action_method: 'post',
+           action_data: {
+             move_selections_into_project: true,
+             selection: @name_link.andand.uuid || @object.uuid,
+             selection_param: 'uuid',
+             success: 'redirect-to-created-object'
+           }.to_json),
+          { class: "btn btn-sm btn-primary force-cache-reload", remote: true, method: 'get',
+            title: "Move this #{object_class} to a different project"}) do %>
+        <i class="fa fa-fw fa-truck"></i> Move <%=object_class%>...
+      <% end %>
+    <% end %>
+  <% end %>
+<% end %>
+
+<%
+  # Display any flash messages in an alert. If there is any entry with "error" key, alert-danger is used.
+  flash_msg = ''
+  flash_msg_is_error = false
+  flash.each do |msg|
+    flash_msg_is_error ||= (msg[0]=='error')
+    flash_msg += ('<p class="contain-align-left">' + msg[1] + '</p>')
+  end
+  if flash_msg != ''
+%>
+<div class="flash-message alert <%= flash_msg_is_error ? 'alert-danger' : 'alert-warning' %>"><%=flash_msg.html_safe%></div>
+<% end %>
diff --git a/apps/workbench/app/views/application/api_error.html.erb b/apps/workbench/app/views/application/api_error.html.erb
new file mode 100644 (file)
index 0000000..f8edf7d
--- /dev/null
@@ -0,0 +1,25 @@
+<h2>Oh... fiddlesticks.</h2>
+
+<p>An error occurred when Workbench sent a request to the Arvados API server.  Try reloading this page.  If the problem is temporary, your request might go through next time.
+
+<% if not api_error %>
+</p>
+<% else %>
+If that doesn't work, the information below can help system administrators track down the problem.
+</p>
+
+<dl>
+  <dt>API request URL</dt>
+  <dd><code><%= api_error.request_url %></code></dd>
+
+  <% if api_error.api_response.empty? %>
+  <dt>Invalid API response</dt>
+  <dd><%= api_error.api_response_s %></dd>
+  <% else %>
+  <dt>API response</dt>
+  <dd><pre><%= Oj.dump(api_error.api_response, indent: 2) %></pre></dd>
+  <% end %>
+</dl>
+<% end %>
+
+<%= render :partial => "report_error", :locals => {api_error: api_error, error_type: 'api'} %>
diff --git a/apps/workbench/app/views/application/api_error.json.erb b/apps/workbench/app/views/application/api_error.json.erb
new file mode 100644 (file)
index 0000000..8371ff9
--- /dev/null
@@ -0,0 +1 @@
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
diff --git a/apps/workbench/app/views/application/destroy.js.erb b/apps/workbench/app/views/application/destroy.js.erb
new file mode 100644 (file)
index 0000000..5309a8b
--- /dev/null
@@ -0,0 +1,4 @@
+$(document).trigger('count-change');
+$('[data-object-uuid=<%= @object.uuid %>]').hide('slow', function() {
+    $(this).remove();
+});
diff --git a/apps/workbench/app/views/application/error.html.erb b/apps/workbench/app/views/application/error.html.erb
new file mode 100644 (file)
index 0000000..97180bf
--- /dev/null
@@ -0,0 +1,9 @@
+<h2>Oh... fiddlesticks.</h2>
+
+<p>Sorry, I had some trouble handling your request.</p>
+
+<ul>
+<% if @errors.is_a? Array then @errors.each do |error| %>
+<li><%= error %></li>
+<% end end %>
+</ul>
diff --git a/apps/workbench/app/views/application/error.json.erb b/apps/workbench/app/views/application/error.json.erb
new file mode 100644 (file)
index 0000000..8371ff9
--- /dev/null
@@ -0,0 +1 @@
+{"errors":<%= raw @errors.to_json %>}
\ No newline at end of file
diff --git a/apps/workbench/app/views/application/index.html.erb b/apps/workbench/app/views/application/index.html.erb
new file mode 100644 (file)
index 0000000..3e2a608
--- /dev/null
@@ -0,0 +1,30 @@
+<% content_for :page_title do %>
+<%= controller.controller_name.humanize.capitalize %>
+<% end %>
+
+<% content_for :tab_line_buttons do %>
+
+  <% if controller.model_class.creatable? %>
+
+    <% if controller.model_class.name == 'User' %>
+      <%= link_to setup_user_popup_path,
+        {class: 'btn btn-sm btn-primary', :remote => true, 'data-toggle' =>  "modal",
+          'data-target' => '#user-setup-modal-window', return_to: request.url} do %>
+        <i class="fa fa-fw fa-plus"></i> Add a new user
+      <% end %>
+      <div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+    <% elsif controller.controller_name == 'manage_account' %>
+      <%# No add button is needed %>
+    <% else %>
+      <%= button_to({action: 'create'}, {class: 'btn btn-sm btn-primary'}) do %>
+        <i class="fa fa-fw fa-plus"></i>
+        Add a new
+        <%= controller.controller_name.singularize.humanize.downcase %>
+      <% end %>
+    <% end %>
+
+  <% end %>
+
+<% end %>
+
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.index_pane_list }%>
diff --git a/apps/workbench/app/views/application/report_issue_popup.js.erb b/apps/workbench/app/views/application/report_issue_popup.js.erb
new file mode 100644 (file)
index 0000000..73830ee
--- /dev/null
@@ -0,0 +1,12 @@
+$("#report-issue-modal-window").html("<%= escape_javascript(render partial: 'report_issue_popup') %>");
+$("#report-issue-modal-window .modal").modal('show');
+
+// Disable the submit button on modal loading
+$submit = $('#report-issue-submit');
+$submit.prop('disabled', true);
+
+// capture events to enable submit button when applicable
+$('#report_issue_text').bind('input propertychange', function() {
+  var problem_desc = document.forms["report-issue-form"]["report_issue_text"].value;
+  $submit.prop('disabled', (problem_desc === null) || (problem_desc === ""));
+});
diff --git a/apps/workbench/app/views/application/show.html.erb b/apps/workbench/app/views/application/show.html.erb
new file mode 100644 (file)
index 0000000..9b1b002
--- /dev/null
@@ -0,0 +1,2 @@
+<%= render partial: 'title_and_buttons' %>
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.show_pane_list }%>
diff --git a/apps/workbench/app/views/authorized_keys/create.js.erb b/apps/workbench/app/views/authorized_keys/create.js.erb
new file mode 100644 (file)
index 0000000..092bc2b
--- /dev/null
@@ -0,0 +1 @@
+;
diff --git a/apps/workbench/app/views/authorized_keys/edit.html.erb b/apps/workbench/app/views/authorized_keys/edit.html.erb
new file mode 100644 (file)
index 0000000..23997ae
--- /dev/null
@@ -0,0 +1 @@
+<%= render partial: 'form' %>
diff --git a/apps/workbench/app/views/collections/_choose.js.erb b/apps/workbench/app/views/collections/_choose.js.erb
new file mode 120000 (symlink)
index 0000000..8420a7f
--- /dev/null
@@ -0,0 +1 @@
+../application/_choose.js.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/collections/_choose_rows.html.erb b/apps/workbench/app/views/collections/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..17274dd
--- /dev/null
@@ -0,0 +1,24 @@
+<% @objects.each do |object| %>
+    <div class="row filterable selectable <%= 'use-preview-selection' if params['use_preview_selection']%>" data-object-uuid="<%= object.uuid %>"
+         data-preview-href="<%= chooser_preview_url_for object, params['use_preview_selection'] %>"
+         style="margin-left: 1em; border-bottom-style: solid; border-bottom-width: 1px; border-bottom-color: #DDDDDD">
+      <i class="fa fa-fw fa-archive"></i>
+      <% if object.respond_to? :name %>
+        <% if not (object.name.nil? or object.name.empty?) %>
+          <%= object.name %>
+        <% elsif object.is_a? Collection and object.files.length > 0 %>
+          <%= object.files[0][1] %>
+          <%= "+ #{object.files.length-1} more" if object.files.length > 1 %>
+        <% else %>
+          <%= object.uuid %>
+        <% end %>
+      <% else %>
+        <%= object.uuid %>
+      <% end %>
+      <% links_for_object(object).each do |tag| %>
+        <% if tag.link_class == 'tag' %>
+          <span class="label label-info"><%= tag.name %></span>
+        <% end %>
+      <% end %>
+    </div>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_index_tbody.html.erb b/apps/workbench/app/views/collections/_index_tbody.html.erb
new file mode 100644 (file)
index 0000000..5d2fe2c
--- /dev/null
@@ -0,0 +1,52 @@
+<% @objects.each do |c| %>
+
+<tr class="collection" data-object-uuid="<%= c.uuid %>">
+  <td>
+    <%=
+       friendly_name = c.friendly_link_name
+       @collection_info[c.uuid][:tag_links].each do |tag_link|
+         friendly_name += raw(" <span class='label label-info'>#{tag_link.name}</span>")
+       end
+       render partial: "selection_checkbox", locals: {
+         object: c,
+         friendly_name: friendly_name
+       }
+    %>
+
+    <%= render :partial => "show_object_button", :locals => {object: c, size: 'xs'} %>
+  </td>
+  <td>
+    <%= c.uuid %>
+  </td>
+  <td>
+    <% i = 0 %>
+    <% while i < 3 and i < c.files.length %>
+      <% file = c.files[i] %>
+      <% file_path = "#{file[0]}/#{file[1]}" %>
+      <%= link_to file[1], {controller: 'collections', action: 'show_file', uuid: c.uuid, file: file_path, size: file[2], disposition: 'inline'}, {title: 'View in browser'} %><br />
+      <% i += 1 %>
+    <% end %>
+    <% if i < c.files.length %>
+      &vellip;
+    <% end %>
+  </td>
+  <td>
+    <%= c.created_at.to_s if c.created_at %>
+  </td>
+  <td class="add-tag-button">
+    <a class="btn btn-xs btn-info add-tag-button pull-right" data-remote-href="<%= url_for(controller: 'links', action: 'create') %>" data-remote-method="post"><i class="glyphicon glyphicon-plus"></i>&nbsp;Add</a>
+    <span class="removable-tag-container">
+    <% if @collection_info[c.uuid] %>
+      <% @collection_info[c.uuid][:tag_links].each do |tag_link| %>
+        <span class="label label-info removable-tag" data-tag-link-uuid="<%= tag_link.uuid %>"><%= tag_link.name %>
+          <% if tag_link.owner_uuid == current_user.uuid %>
+          &nbsp;<a title="Delete tag"><i class="glyphicon glyphicon-trash"></i></a>
+          <% end %>
+        </span>&nbsp;
+      <% end %>
+    <% end %>
+    </span>
+  </td>
+</tr>
+
+<% end %>
diff --git a/apps/workbench/app/views/collections/_sharing_button.html.erb b/apps/workbench/app/views/collections/_sharing_button.html.erb
new file mode 100644 (file)
index 0000000..4050e3a
--- /dev/null
@@ -0,0 +1,17 @@
+<% button_attrs = {
+     class: 'btn btn-xs btn-info',
+     remote: true,
+     method: :post,
+   } %>
+<% if @search_sharing.nil? %>
+  <p>Your API token is not authorized to manage collection sharing links.</p>
+<% elsif @search_sharing.empty? %>
+  <%= button_to("Create sharing link", {action: "share"}, button_attrs) %>
+<% else %>
+  <div>
+    <% button_attrs[:class] += " pull-right" %>
+    <%= button_to("Unshare", {action: "unshare"}, button_attrs) %>
+    Shared at:
+    <div class="smaller-text" style="clear: both; word-break: break-all"><%= link_to download_link, download_link %></div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_show_chooser_preview.html.erb b/apps/workbench/app/views/collections/_show_chooser_preview.html.erb
new file mode 100644 (file)
index 0000000..cb91f85
--- /dev/null
@@ -0,0 +1,2 @@
+<%= render partial: "show_source_summary" %>
+<%= render partial: "show_files", locals: {no_checkboxes: true, use_preview_selection: params['use_preview_selection']} %>
diff --git a/apps/workbench/app/views/collections/_show_files.html.erb b/apps/workbench/app/views/collections/_show_files.html.erb
new file mode 100644 (file)
index 0000000..603dc34
--- /dev/null
@@ -0,0 +1,119 @@
+<script>
+function select_all_files() {
+  $("#collection_files :checkbox").filter(":visible").prop("checked", true).trigger("change");
+}
+
+function unselect_all_files() {
+  $("#collection_files :checkbox").filter(":visible").prop("checked", false).trigger("change");
+}
+</script>
+
+<%
+  preview_selectable_container = ''
+  preview_selectable = ''
+  padding_left = '1em'
+  if !params['use_preview_selection'].nil? and params['use_preview_selection'] == 'true'
+    preview_selectable_container = 'preview-selectable-container selectable-container'
+    preview_selectable = 'preview-selectable selectable'
+    padding_left = '0em'
+  end
+%>
+
+<div class="selection-action-container" style="padding-left: <%=padding_left%>">
+  <% if !defined? no_checkboxes or !no_checkboxes %>
+  <div class="row">
+    <div class="pull-left">
+      <div class="btn-group btn-group-sm">
+        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+        <ul class="dropdown-menu" role="menu">
+          <li><%= link_to "Create new collection with selected files", '#',
+                  method: :post,
+                  'data-href' => combine_selected_path(
+                    action_data: {current_project_uuid: @object.owner_uuid}.to_json
+                  ),
+                  'data-selection-param-name' => 'selection[]',
+                  'data-selection-action' => 'combine-collections',
+                  'data-toggle' => 'dropdown'
+            %></li>
+        </ul>
+      </div>
+      <div class="btn-group btn-group-sm">
+       <button id="select-all" type="button" class="btn btn-default" onClick="select_all_files()">Select all</button>
+       <button id="unselect-all" type="button" class="btn btn-default" onClick="unselect_all_files()">Unselect all</button>
+      </div>
+    </div>
+    <div class="pull-right">
+      <input class="form-control filterable-control" data-filterable-target="ul#collection_files" id="file_regex" name="file_regex" placeholder="filename regex" type="text"/>
+    </div>
+  </div>
+  <p/>
+  <% end %>
+
+<% file_tree = @object.andand.files_tree %>
+<% if file_tree.nil? or file_tree.empty? %>
+  <p>This collection is empty.</p>
+<% else %>
+  <ul id="collection_files" class="collection_files <%=preview_selectable_container%>">
+  <% dirstack = [file_tree.first.first] %>
+  <% file_tree.take(10000).each_with_index do |(dirname, filename, size), index| %>
+    <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+    <% while dirstack.any? and (dirstack.last != dirname) %>
+      <% dirstack.pop %></ul></li>
+    <% end %>
+    <li>
+    <% if size.nil?  # This is a subdirectory. %>
+      <% dirstack.push(File.join(dirname, filename)) %>
+      <div class="collection_files_row">
+       <div class="collection_files_name"><i class="fa fa-fw fa-folder-open"></i> <%= filename %></div>
+      </div>
+      <ul class="collection_files">
+    <% else %>
+      <% link_params = {controller: 'collections', action: 'show_file',
+                        uuid: @object.portable_data_hash, file: file_path, size: size} %>
+       <div class="collection_files_row filterable <%=preview_selectable%>" href="<%=@object.uuid%>/<%=file_path%>">
+        <div class="collection_files_buttons pull-right">
+          <%= raw(human_readable_bytes_html(size)) %>
+          <% disable_search = (Rails.configuration.filename_suffixes_with_view_icon.include? file_path.split('.')[-1]) ? false : true %>
+          <%= link_to(raw('<i class="fa fa-search"></i>'),
+                      link_params.merge(disposition: 'inline'),
+                      {title: "View #{file_path}", class: "btn btn-info btn-sm", disabled: disable_search}) %>
+          <%= link_to(raw('<i class="fa fa-download"></i>'),
+                      link_params.merge(disposition: 'attachment'),
+                      {title: "Download #{file_path}", class: "btn btn-info btn-sm"}) %>
+        </div>
+
+        <div class="collection_files_name">
+          <% if !defined? no_checkboxes or !no_checkboxes %>
+          <%= check_box_tag 'uuids[]', "#{@object.uuid}/#{file_path}", false, {
+                :class => "persistent-selection",
+                :friendly_type => "File",
+                :friendly_name => "#{@object.uuid}/#{file_path}",
+                :href => url_for(controller: 'collections', action: 'show_file',
+                                 uuid: @object.portable_data_hash, file: file_path),
+                :title => "Include #{file_path} in your selections",
+              } %>
+          <span>&nbsp;</span>
+          <% end %>
+      <% if CollectionsHelper::is_image(filename) %>
+          <i class="fa fa-fw fa-bar-chart-o"></i> <%= filename %></div>
+        <div class="collection_files_inline">
+          <%= link_to(image_tag("#{url_for @object}/#{file_path}"),
+                      link_params.merge(disposition: 'inline'),
+                      {title: file_path}) %>
+        </div>
+       </div>
+      <% else %>
+          <i class="fa fa-fw fa-file" href="<%=@object.uuid%>/<%=file_path%>" ></i> <%= filename %></div>
+       </div>
+      <% end %>
+      </li>
+    <% end  # if file or directory %>
+  <% end  # file_tree.each %>
+  <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+<% end  # if file_tree %>
+
+<% content_for :footer_html do %>
+<div id="collection-sharing-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+<% end %>
+
+</div>
diff --git a/apps/workbench/app/views/collections/_show_provenance_graph.html.erb b/apps/workbench/app/views/collections/_show_provenance_graph.html.erb
new file mode 100644 (file)
index 0000000..977265a
--- /dev/null
@@ -0,0 +1,4 @@
+<%= render partial: 'application/svg_div', locals: {
+    divId: "provenance_graph_div", 
+    svgId: "provenance_svg", 
+    svg: @prov_svg } %>
diff --git a/apps/workbench/app/views/collections/_show_recent.html.erb b/apps/workbench/app/views/collections/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..39651cc
--- /dev/null
@@ -0,0 +1,61 @@
+<div class="selection-action-container" style="padding-left: 1em">
+  <div class="row">
+    <div class="pull-left">
+      <div class="btn-group btn-group-sm">
+        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection... <span class="caret"></span></button>
+        <ul class="dropdown-menu" role="menu">
+          <li><%= link_to "Create new collection with selected collections", '#',
+                  method: :post,
+                  'data-href' => combine_selected_path,
+                  'data-selection-param-name' => 'selection[]',
+                  'data-selection-action' => 'combine-collections',
+                  'data-toggle' => 'dropdown'
+            %></li>
+        </ul>
+      </div>
+    </div>
+  </div>
+  <p/>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+
+<div style="padding-right: 1em">
+
+<%= form_tag do |f| %>
+
+<table id="collections-index" class="topalign table table-condensed table-fixedlayout"> <!-- table-fixed-header-row -->
+  <colgroup>
+    <col width="10%" />
+    <col width="10%" />
+    <col width="40%" />
+    <col width="10%" />
+    <col width="30%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th></th>
+      <th>uuid</th>
+      <th>contents</th>
+      <th>created at</th>
+      <th>tags</th>
+    </tr>
+  </thead>
+  <tbody>
+    <%= render partial: 'index_tbody' %>
+  </tbody>
+</table>
+
+<% end %>
+
+</div>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+
+<% content_for :footer_js do %>
+$(document).on('click', 'form[data-remote] input[type=submit]', function() {
+  $('table#collections-index tbody').fadeTo(200, 0.3);
+  return true;
+});
+<% end %>
+
+</div>
diff --git a/apps/workbench/app/views/collections/_show_source_summary.html.erb b/apps/workbench/app/views/collections/_show_source_summary.html.erb
new file mode 100644 (file)
index 0000000..592e802
--- /dev/null
@@ -0,0 +1,28 @@
+<% if not (@output_of.andand.any? or @log_of.andand.any?) %>
+  <p><i>No source information available.</i></p>
+<% end %>
+
+<% if @output_of.andand.any? %>
+  <p><i>This collection was the output of:</i><br />
+    <% pipelines = PipelineInstance.filter([["components", "like", "%#{@object.uuid}%"]]).each do |pipeline| %>
+      <% pipeline.components.each do |cname, c| %>
+        <% if c[:output_uuid] == @object.uuid %>
+          <b><%= cname %></b> component of <b><%= link_to_if_arvados_object(pipeline, friendly_name: true) %></b>
+          <% if c.andand[:job].andand[:finished_at] %>
+            finished at <%= render_localized_date(c[:job][:finished_at]) %>
+          <% end %>
+          <br>
+        <% end %>
+      <% end %>
+    <% end %>
+  </p>
+<% end %>
+
+<% if @log_of.andand.any? %>
+  <p><i>This collection contains log messages from:</i><br />
+    <%= render_arvados_object_list_start(@log_of, 'Show all jobs',
+                                         jobs_path(filters: [['log', '=', @object.uuid]].to_json)) do |job| %>
+      <%= link_to_if_arvados_object(job, friendly_name: true) %><br />
+    <% end %>
+  </p>
+<% end %>
diff --git a/apps/workbench/app/views/collections/_show_upload.html.erb b/apps/workbench/app/views/collections/_show_upload.html.erb
new file mode 100644 (file)
index 0000000..40bef52
--- /dev/null
@@ -0,0 +1,66 @@
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights (in milliseconds) %>
+     ></div>
+<div ng-cloak ng-controller="UploadToCollection" arv-uuid="<%= @object.uuid %>">
+  <div class="panel panel-primary">
+    <div class="panel-body">
+      <div class="row">
+        <div class="col-sm-4">
+          <input type="file" multiple id="file_selector" ng-model="incoming" onchange="angular.element(this).scope().addFilesToQueue(this.files); $(this).val('');">
+          <div class="btn-group btn-group-sm" role="group" style="margin-top: 1.5em">
+            <button type="button" class="btn btn-default" ng-click="stop()" ng-disabled="uploader.state !== 'Running'"><i class="fa fa-fw fa-pause"></i> Pause</button>
+            <button type="button" class="btn btn-primary" ng-click="go()" ng-disabled="uploader.state === 'Running' || countInStates(['Paused', 'Queued']) === 0"><i class="fa fa-fw fa-play"></i> Start</button>
+          </div>
+        </div>
+        <div class="col-sm-8">
+          <div ng-show="uploader.state === 'Running'"
+               class="alert alert-info"
+               ><i class="fa fa-gear"></i>
+            Upload in progress.
+            <span ng-show="countInStates(['Done']) > 0">
+              {{countInStates(['Done'])}} file{{countInStates(['Done'])>1?'s':''}} finished.
+            </span>
+          </div>
+          <div ng-show="uploader.state === 'Idle' && uploader.stateReason"
+               class="alert alert-success"
+               ><i class="fa fa-fw fa-flag-checkered"></i> &nbsp; {{uploader.stateReason}}
+          </div>
+          <div ng-show="uploader.state === 'Failed'"
+               class="alert alert-danger"
+               ><i class="fa fa-fw fa-warning"></i> &nbsp; {{uploader.stateReason}}
+          </div>
+          <div ng-show="uploader.state === 'Stopped'"
+               class="alert alert-info"
+               ><i class="fa fa-fw fa-info"></i> &nbsp; Paused. Click the Start button to resume uploading.
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+  <div ng-repeat="upload in uploadQueue" class="row" ng-class="{lighten: upload.state==='Done'}">
+    <div class="col-sm-1">
+      <button class="btn btn-xs btn-default"
+              ng-show="upload.state!=='Done'"
+              ng-click="removeFileFromQueue($index)"
+              title="cancel"><i class="fa fa-fw fa-times"></i></button>
+      <span class="label label-success label-info"
+            ng-show="upload.state==='Done'">finished</span>
+    </div>
+    <div class="col-sm-4 nowrap" style="overflow-x:hidden;text-overflow:ellipsis">
+      <span title="{{upload.file.name}}">
+        {{upload.file.name}}
+      </span>
+    </div>
+    <div class="col-sm-1" style="text-align: right">
+      {{upload.file.size/1024 | number:0}}&nbsp;KiB
+    </div>
+    <div class="col-sm-2">
+      <div class="progress">
+        <span class="progress-bar" style="width: {{upload.progress}}%"></span>
+      </div>
+    </div>
+    <div class="col-sm-4" ng-class="{lighten: upload.state !== 'Uploading'}">
+      {{upload.statistics}}
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/collections/_show_used_by.html.erb b/apps/workbench/app/views/collections/_show_used_by.html.erb
new file mode 100644 (file)
index 0000000..a26b100
--- /dev/null
@@ -0,0 +1,5 @@
+<%= render partial: 'application/svg_div', locals: {
+    divId: "used_by_graph", 
+    svgId: "used_by_svg", 
+    svg: @used_by_svg } %>
+
diff --git a/apps/workbench/app/views/collections/graph.html.erb b/apps/workbench/app/views/collections/graph.html.erb
new file mode 100644 (file)
index 0000000..91598b7
--- /dev/null
@@ -0,0 +1,191 @@
+<%#= render :partial => 'nav' %>
+<table class="table table-bordered">
+  <tbody>
+    <tr>
+      <td class="d3">
+      </td>
+    </tr>
+  </tbody>
+</table>
+
+<% content_for :head do %>
+<%= javascript_include_tag '/d3.v3.min.js' %>
+
+    <style type="text/css">
+
+path.link {
+  fill: none;
+  stroke: #666;
+  stroke-width: 1.5px;
+}
+
+path.link.derived_from {
+  stroke: green;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.can_write {
+  stroke: green;
+}
+
+path.link.member_of {
+  stroke: blue;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.created {
+  stroke: red;
+}
+
+circle.node {
+  fill: #ccc;
+  stroke: #333;
+  stroke-width: 1.5px;
+}
+
+edgetext {
+  font: 12px sans-serif;
+  pointer-events: none;
+    text-align: center;
+}
+
+text {
+  font: 12px sans-serif;
+  pointer-events: none;
+}
+
+text.shadow {
+  stroke: #fff;
+  stroke-width: 3px;
+  stroke-opacity: .8;
+}
+
+    </style>
+<% end %>
+
+<% content_for :js do %>
+
+jQuery(function($){
+
+    var links = <%= raw d3ify_links(@links).to_json %>;
+
+    var nodes = {};
+
+    // Compute the distinct nodes from the links.
+    links.forEach(function(link) {
+       link.source = nodes[link.source] || (nodes[link.source] = {name: link.source});
+       link.target = nodes[link.target] || (nodes[link.target] = {name: link.target});
+    });
+
+    var fill_for = {'ldvyl': 'green',
+                   'j58dm': 'red',
+                   '4zz18': 'blue'};
+    jQuery.each(nodes, function(i, node) {
+       var m = node.name.match(/-([a-z0-9]{5})-/)
+       if (m)
+           node.fill = fill_for[m[1]] || '#ccc';
+       else if (node.name.match(/^[0-9a-f]{32}/))
+           node.fill = fill_for['4zz18'];
+       else
+           node.fill = '#ccc';
+    });
+
+    var w = 960,
+    h = 600;
+
+    var force = d3.layout.force()
+       .nodes(d3.values(nodes))
+       .links(links)
+       .size([w, h])
+       .linkDistance(150)
+       .charge(-300)
+       .on("tick", tick)
+       .start();
+
+    var svg = d3.select("td.d3").append("svg:svg")
+       .attr("width", w)
+       .attr("height", h);
+
+    // Per-type markers, as they don't inherit styles.
+    svg.append("svg:defs").selectAll("marker")
+       .data(["member_of", "owner", "derived_from"])
+       .enter().append("svg:marker")
+       .attr("id", String)
+       .attr("viewBox", "0 -5 10 10")
+       .attr("refX", 15)
+       .attr("refY", -1.5)
+       .attr("markerWidth", 6)
+       .attr("markerHeight", 6)
+       .attr("orient", "auto")
+       .append("svg:path")
+       .attr("d", "M0,-5L10,0L0,5");
+
+    var path = svg.append("svg:g").selectAll("path")
+       .data(force.links())
+       .enter().append("svg:path")
+       .attr("class", function(d) { return "link " + d.type; })
+       .attr("marker-end", function(d) { return "url(#" + d.type + ")"; });
+
+    var circle = svg.append("svg:g").selectAll("circle")
+       .data(force.nodes())
+       .enter().append("svg:circle")
+       .attr("r", 6)
+       .style("fill", function(d) { return d.fill; })
+       .call(force.drag);
+
+    var text = svg.append("svg:g").selectAll("g")
+       .data(force.nodes())
+       .enter().append("svg:g");
+
+    // A copy of the text with a thick white stroke for legibility.
+    text.append("svg:text")
+       .attr("x", 8)
+       .attr("y", ".31em")
+       .attr("class", "shadow")
+       .text(function(d) { return d.name.replace(/^([0-9a-z]{5}-){2}/,''); });
+
+    text.append("svg:text")
+       .attr("x", 8)
+       .attr("y", ".31em")
+       .text(function(d) { return d.name.replace(/^([0-9a-z]{5}-){2}/,''); });
+
+    var edgetext = svg.append("svg:g").selectAll("g")
+       .data(force.links())
+       .enter().append("svg:g");
+
+    edgetext
+       .append("svg:text")
+       .attr("x","-5em")
+       .attr("y","-0.2em")
+       .text(function(d) { return d.type; });
+
+    // Use elliptical arc path segments to doubly-encode directionality.
+    function tick() {
+       path.attr("d", function(d) {
+           var dx = d.target.x - d.source.x,
+            dy = d.target.y - d.source.y,
+            // dr = Math.sqrt(dx * dx + dy * dy);
+            dr = 0;
+           return "M" + d.source.x + "," + d.source.y + "A" + dr + "," + dr + " 0 0,1 " + d.target.x + "," + d.target.y;
+       });
+
+       circle.attr("transform", function(d) {
+           return "translate(" + d.x + "," + d.y + ")";
+       });
+
+       text.attr("transform", function(d) {
+           return "translate(" + d.x + "," + d.y + ")";
+       });
+
+       edgetext.attr("transform", function(d) {
+           return "translate(" +
+               (d.source.x + d.target.x)/2 + "," +
+               (d.source.y + d.target.y)/2 +
+               ")rotate(" +
+               (Math.atan2(d.target.y - d.source.y, d.target.x - d.source.x) * 180 / Math.PI) +
+               ")";
+       });
+    }
+
+})(jQuery);
+<% end %>
diff --git a/apps/workbench/app/views/collections/hash_matches.html.erb b/apps/workbench/app/views/collections/hash_matches.html.erb
new file mode 100644 (file)
index 0000000..7c4abb0
--- /dev/null
@@ -0,0 +1,23 @@
+<div class="row">
+  <div class="col-md-10 col-md-offset-1">
+    <div class="panel panel-info">
+      <div class="panel-heading">
+        <h3 class="panel-title"><%= params["uuid"] %></h3>
+      </div>
+      <div class="panel-body">
+        <p><i>The following collections have this content:</i></p>
+        <% @same_pdh.sort { |a,b| b.created_at <=> a.created_at }.each do |c| %>
+          <div class="row">
+            <div class="col-md-8">
+              <% owner = object_for_dataclass(Group, c.owner_uuid) || object_for_dataclass(User, c.owner_uuid) %>
+              <%= link_to_if_arvados_object owner, {:friendly_name => true} %> / <%= link_to_if_arvados_object c, {:friendly_name => true} %><br>
+            </div>
+            <div class="col-md-4">
+              <%= render_localized_date c.created_at %>
+            </div>
+          </div>
+        <% end %>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/collections/index.html.erb b/apps/workbench/app/views/collections/index.html.erb
new file mode 100644 (file)
index 0000000..061b05b
--- /dev/null
@@ -0,0 +1,14 @@
+<% content_for :tab_line_buttons do %>
+ <%= form_tag collections_path, method: 'get', remote: true, class: 'form-search' do %>
+ <div class="input-group">
+   <%= text_field_tag :search, params[:search], class: 'form-control', placeholder: 'Search collections' %>
+   <span class="input-group-btn">
+     <%= button_tag(class: 'btn btn-info') do %>
+     <span class="glyphicon glyphicon-search"></span>
+     <% end %>
+   </span>
+ </div>
+ <% end %>
+<% end %>
+
+<%= render file: 'application/index.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/collections/index.js.erb b/apps/workbench/app/views/collections/index.js.erb
new file mode 100644 (file)
index 0000000..44839f4
--- /dev/null
@@ -0,0 +1,4 @@
+if(history.replaceState)
+    history.replaceState(null, null, "<%= escape_javascript(@request_url) %>");
+$('table#collections-index tbody').html("<%= escape_javascript(render partial: 'index_tbody') %>");
+$('table#collections-index tbody').fadeTo(200, 1.0);
diff --git a/apps/workbench/app/views/collections/sharing_popup.js.erb b/apps/workbench/app/views/collections/sharing_popup.js.erb
new file mode 100644 (file)
index 0000000..d2e0d9f
--- /dev/null
@@ -0,0 +1 @@
+$("#sharing-button").html("<%= escape_javascript(render partial: 'sharing_button') %>");
diff --git a/apps/workbench/app/views/collections/show.html.erb b/apps/workbench/app/views/collections/show.html.erb
new file mode 100644 (file)
index 0000000..c3e0b7c
--- /dev/null
@@ -0,0 +1,104 @@
+<div class="row row-fill-height">
+  <div class="col-md-6">
+    <div class="panel panel-info">
+      <div class="panel-heading">
+       <h3 class="panel-title">
+          <% if @name_link.nil? and @object.uuid.match /[0-9a-f]{32}/ %>
+            Content hash <%= @object.portable_data_hash %>
+          <% else %>
+           <%= if @object.respond_to? :name
+                  render_editable_attribute @object, :name
+                else
+                  @name_link.andand.name || @object.uuid
+                end %>
+            <% end %>
+       </h3>
+      </div>
+      <div class="panel-body">
+        <div class="arv-description-as-subtitle">
+          <%= render_editable_attribute @object, 'description', nil, { 'data-emptytext' => "(No description provided)", 'data-toggle' => 'manual' } %>
+        </div>
+        <img src="/favicon.ico" class="pull-right" alt="" style="opacity: 0.3"/>
+       <p><i>Content hash:</i><br />
+         <span class="arvados-uuid"><%= link_to @object.portable_data_hash, collection_path(@object.portable_data_hash) %></span>
+        </p>
+        <%= render partial: "show_source_summary" %>
+      </div>
+    </div>
+  </div>
+  <div class="col-md-3">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+       <h3 class="panel-title">
+         Activity
+       </h3>
+      </div>
+      <div class="panel-body smaller-text">
+        <!--
+       <input type="text" class="form-control" placeholder="Search"/>
+        -->
+       <div style="height:0.5em;"></div>
+        <% name_or_object = @name_link.andand.uuid ? @name_link : @object %>
+        <% if name_or_object.created_at and not @logs.andand.any? %>
+          <p>
+            Created: <%= name_or_object.created_at.to_s(:long) if name_or_object.created_at %>
+          </p>
+          <p>
+            Last modified: <%= name_or_object.modified_at.to_s(:long) if name_or_object.modified_at %> by <%= link_to_if_arvados_object name_or_object.modified_by_user_uuid, friendly_name: true %>
+          </p>
+        <% else %>
+          <%= render_arvados_object_list_start(@logs, 'Show all activity',
+                logs_path(filters: [['object_uuid','=',name_or_object.uuid]].to_json)) do |log| %>
+          <p>
+          <%= time_ago_in_words(log.event_at) rescue 'unknown time' %> ago: <%= log.summary %>
+            <% if log.object_uuid %>
+            <%= link_to_if_arvados_object log.object_uuid, link_text: raw('<i class="fa fa-hand-o-right"></i>') %>
+            <% end %>
+          </p>
+          <% end %>
+        <% end %>
+      </div>
+    </div>
+  </div>
+  <div class="col-md-3">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+       <h3 class="panel-title">
+         Sharing and permissions
+       </h3>
+      </div>
+      <div class="panel-body">
+        <!--
+       <input type="text" class="form-control" placeholder="Search"/>
+        -->
+
+        <div id="sharing-button">
+          <%= render partial: 'sharing_button' %>
+        </div>
+
+       <div style="height:0.5em;"></div>
+        <% if @projects.andand.any? %>
+          <p>Included in projects:<br />
+          <%= render_arvados_object_list_start(@projects, 'Show all projects',
+                links_path(filters: [['head_uuid', '=', @object.uuid],
+                                     ['link_class', '=', 'name']].to_json)) do |project| %>
+            <%= link_to_if_arvados_object(project, friendly_name: true) %><br />
+          <% end %>
+          </p>
+        <% end %>
+        <% if @permissions.andand.any? %>
+          <p>Readable by:<br />
+          <%= render_arvados_object_list_start(@permissions, 'Show all permissions',
+                links_path(filters: [['head_uuid', '=', @object.uuid],
+                                    ['link_class', '=', 'permission']].to_json)) do |link| %>
+          <%= link_to_if_arvados_object(link.tail_uuid, friendly_name: true) %><br />
+          <% end %>
+          </p>
+        <% end %>
+
+      </div>
+    </div>
+  </div>
+</div>
+
+<%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/collections/show_file_links.html.erb b/apps/workbench/app/views/collections/show_file_links.html.erb
new file mode 100644 (file)
index 0000000..a829d8f
--- /dev/null
@@ -0,0 +1,82 @@
+<!DOCTYPE html>
+<html>
+<% coll_name = "Collection #{@object.uuid}" %>
+<% link_opts = {controller: 'collections', action: 'show_file',
+                uuid: @object.uuid, reader_token: params[:reader_token]} %>
+<head>
+  <meta charset="utf-8">
+  <title>
+    <%= coll_name %> / <%= Rails.configuration.site_name %>
+  </title>
+  <meta name="description" content="">
+  <meta name="author" content="">
+  <meta name="robots" content="NOINDEX">
+  <style type="text/css">
+body {
+  margin: 1.5em;
+}
+pre {
+  background-color: #D9EDF7;
+  border-radius: .25em;
+  padding: .75em;
+  overflow: auto;
+}
+.footer {
+  font-size: 82%;
+}
+.footer h2 {
+  font-size: 1.2em;
+}
+  </style>
+</head>
+<body>
+
+<h1><%= coll_name %></h1>
+
+<p>This collection of data files is being shared with you through
+Arvados.  You can download individual files listed below.  To download
+the entire collection with wget, try:</p>
+
+<pre>$ wget --mirror --no-parent --no-host --cut-dirs=3 <%=
+         url_for(link_opts.merge(action: 'show_file_links', only_path: false,
+                                 trailing_slash: true))
+       %></pre>
+
+<h2>File Listing</h2>
+
+<% file_tree = @object.andand.files_tree %>
+<% if file_tree.andand.any? %>
+  <ul id="collection_files" class="collection_files">
+  <% dirstack = [file_tree.first.first] %>
+  <% file_tree.take(10000).each_with_index do |(dirname, filename, size), index| %>
+    <% file_path = CollectionsHelper::file_path([dirname, filename]) %>
+    <% while dirstack.any? and (dirstack.last != dirname) %>
+      <% dirstack.pop %></ul></li>
+    <% end %>
+    <li>
+    <% if size.nil?  # This is a subdirectory. %>
+      <% dirstack.push(File.join(dirname, filename)) %>
+      <%= filename %>
+      <ul class="collection_files">
+    <% else %>
+      <%= link_to(filename,
+                  link_opts.merge(file: file_path),
+                  {title: "Download #{file_path}"}) %>
+      </li>
+    <% end %>
+  <% end %>
+  <%= raw(dirstack.map { |_| "</ul>" }.join("</li>")) %>
+<% else %>
+  <p>No files in this collection.</p>
+<% end %>
+
+<div class="footer">
+<h2>About Arvados</h2>
+
+<p>Arvados is a free and open source software bioinformatics platform.
+To learn more, visit arvados.org.
+Arvados is not responsible for the files listed on this page.</p>
+</div>
+
+</body>
+</html>
diff --git a/apps/workbench/app/views/groups/_choose_rows.html.erb b/apps/workbench/app/views/groups/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..fca0415
--- /dev/null
@@ -0,0 +1,9 @@
+<% icon_class = fa_icon_class_for_class(Group) %>
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw <%= icon_class %>"></i>
+      <%= object.name %>
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/groups/_show_recent.html.erb b/apps/workbench/app/views/groups/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..c709e89
--- /dev/null
@@ -0,0 +1,42 @@
+<%= render partial: "paging", locals: {results: @groups, object: @object} %>
+
+<table class="table table-hover">
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+       Group
+      </th><th>
+       Owner
+      </th><th>
+       Incoming permissions
+      </th><th>
+       Outgoing permissions
+      </th><th>
+       <!-- column for delete buttons -->
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+
+    <% @groups.sort_by { |g| g[:created_at] }.reverse.each do |g| %>
+
+    <tr>
+      <td>
+        <%= link_to_if_arvados_object g, friendly_name: true %>
+      </td><td>
+        <%= link_to_if_arvados_object g.owner_uuid, friendly_name: true %>
+      </td><td>
+        <%= @links_to.select { |x| x.head_uuid == g.uuid }.collect(&:tail_uuid).uniq.count %>
+      </td><td>
+        <%= @links_from.select { |x| x.tail_uuid == g.uuid }.collect(&:head_uuid).uniq.count %>
+      </td><td>
+        <%= render partial: 'delete_object_button', locals: {object:g} %>
+      </td>
+    </tr>
+
+    <% end %>
+
+  </tbody>
+</table>
+
+<%= render partial: "paging", locals: {results: @groups, object: @object} %>
diff --git a/apps/workbench/app/views/issue_reporter/send_report.text.erb b/apps/workbench/app/views/issue_reporter/send_report.text.erb
new file mode 100644 (file)
index 0000000..28841b0
--- /dev/null
@@ -0,0 +1,12 @@
+<% if @user %>
+Issue reported by user <%=@user.email%>
+<% else %>
+Issue reported
+<% end %>
+
+Details of the report:
+<% if @params['report_additional_info'] %>
+<%  map_to_s = JSON.parse(@params['report_additional_info']).map {|k,v| "#{k}=#{v}"}.join("\n") %>
+<%= map_to_s %>
+<% end %>
+Report text=<%=@params['report_issue_text'] %>
diff --git a/apps/workbench/app/views/jobs/_show_details.html.erb b/apps/workbench/app/views/jobs/_show_details.html.erb
new file mode 100644 (file)
index 0000000..7b6b176
--- /dev/null
@@ -0,0 +1 @@
+<%= render partial: 'application/show_attributes' %>
diff --git a/apps/workbench/app/views/jobs/_show_job_buttons.html.erb b/apps/workbench/app/views/jobs/_show_job_buttons.html.erb
new file mode 100644 (file)
index 0000000..644da77
--- /dev/null
@@ -0,0 +1,29 @@
+<% if @object.state != "Running" %>
+    <%= form_tag '/jobs', style: "display:inline; padding-left: 1em" do |f| %>
+      <% [:script, :script_version, :repository, :supplied_script_version, :nondeterministic].each do |d| %>
+        <%= hidden_field :job, d, :value => @object[d] %>
+      <% end %>
+      <% [:script_parameters, :runtime_constraints].each do |d| %>
+        <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
+      <% end %>
+      <%= button_tag ({class: 'btn btn-sm btn-primary', id: "re-run-same-job-button",
+                       title: 'Re-run job using the same script version as this run'}) do %>
+        <i class="fa fa-fw fa-gear"></i> Re-run same version
+      <% end %>
+    <% end %>
+  <% if @object.respond_to? :supplied_script_version and !@object.supplied_script_version.nil? and !@object.supplied_script_version.empty? and @object.script_version != @object.supplied_script_version%>
+      <%= form_tag '/jobs', style: "display:inline" do |f| %>
+      <% [:script, :repository, :supplied_script_version, :nondeterministic].each do |d| %>
+        <%= hidden_field :job, d, :value => @object[d] %>
+      <% end %>
+      <%= hidden_field :job, :script_version, :value => @object[:supplied_script_version] %>
+      <% [:script_parameters, :runtime_constraints].each do |d| %>
+        <%= hidden_field :job, d, :value => JSON.dump(@object[d]) %>
+      <% end %>
+      <%= button_tag ({class: 'btn btn-sm btn-primary', id: "re-run-latest-job-button",
+                       title: 'Re-run job using the latest script version'}) do%>
+        <i class="fa fa-fw fa-gear"></i> Re-run latest version
+      <% end %>
+    <% end %>
+  <% end %>
+<% end %>
diff --git a/apps/workbench/app/views/jobs/_show_log.html.erb b/apps/workbench/app/views/jobs/_show_log.html.erb
new file mode 100644 (file)
index 0000000..315c8c1
--- /dev/null
@@ -0,0 +1,256 @@
+<% if !@object.log %>
+
+<div id="log_graph_div"
+     class="arv-log-event-listener"
+     data-object-uuid="<%= @object.uuid %>"></div>
+
+<div id="event_log_div"
+     class="arv-log-event-listener arv-log-event-handler-append-logs arv-job-log-window"
+     data-object-uuid="<%= @object.uuid %>"
+     ></div>
+
+<%# Applying a long throttle suppresses the auto-refresh of this
+    partial that would normally be triggered by arv-log-event. %>
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights %>
+     ></div>
+
+<% else %>
+
+<script>
+(function() {
+var pagesize = 1000;
+var logViewer = new List('log-viewer', {
+  valueNames: [ 'id', 'timestamp', 'taskid', 'message', 'type'],
+  page: pagesize
+});
+
+logViewer.page_offset = 0;
+logViewer.on("updated", function() { updatePaging(".log-viewer-paging", logViewer, pagesize) } );
+$(".log-viewer-page-up").on("click", function() { prevPage(logViewer, pagesize, ".log-viewer-paging"); return false; });
+$(".log-viewer-page-down").on("click", function() { nextPage(logViewer, pagesize, ".log-viewer-paging"); return false; });
+
+var taskState = newTaskState();
+
+var makeFilter = function() {
+  var pass = [];
+  $(".toggle-filter, .radio-filter").each(function(i, e) {
+    if (e.checked) {
+      pass.push(e.id.substr(5));
+    }
+  });
+
+  return (function(item) {
+    var v = false;
+    if (item.values().taskid !== "") {
+      for (a in pass) {
+        if (pass[a] == "all-tasks") { v = true; }
+        else if (pass[a] == "successful-tasks" && taskState[item.values().taskid].outcome == "success") { v = true; }
+        else if (pass[a] == "failed-tasks" && taskState[item.values().taskid].outcome == "failure") { v = true; }
+      }
+    } else {
+      v = true;
+    }
+    for (a in pass) {
+      if (pass[a] == item.values().type) { return v; }
+    }
+    return false;
+  });
+}
+
+<% if @object.log and !@object.log.empty? %>
+  <% logcollection = Collection.find @object.log %>
+  <% if logcollection %>
+    log_size = <%= logcollection.files[0][2] %>
+    log_maxbytes = <%= Rails.configuration.log_viewer_max_bytes %>;
+    logcollection_url = '<%=j url_for logcollection %>/<%=j logcollection.files[0][1] %>';
+    $("#log-viewer-download-url").attr('href', logcollection_url);
+    $("#log-viewer-download-pane").show();
+    if (log_size > log_maxbytes) {
+      range_header = { 'Range': 'bytes=0-' + log_maxbytes };
+    } else {
+      range_header = null;
+    }
+    $.ajax(logcollection_url, { headers: range_header }).
+        done(function(data, status, jqxhr) {
+            logViewer.filter();
+            addToLogViewer(logViewer, data.split("\n"), taskState);
+            logViewer.filter(makeFilter());
+            content_range_hdr = jqxhr.getResponseHeader('Content-Range');
+            var v = content_range_hdr && content_range_hdr.match(/bytes \d+-(\d+)\/(.+)/);
+            short_log = v && (v[2] == '*' || parseInt(v[1]) + 1 < v[2]);
+            if (jqxhr.status == 206 && short_log) {
+              $("#log-viewer-overview").html(
+                '<p>Showing only ' + data.length + ' bytes of this log.' +
+                ' Timing information is unavailable since' +
+                ' the full log was not retrieved.</p>'
+              );
+            } else {
+              generateJobOverview("#log-viewer-overview", logViewer, taskState);
+            }
+            $("#log-viewer .spinner").detach();
+        }).
+        fail(function(jqxhr, status, error) {
+            $("#log-viewer .spinner").detach();
+        });
+  <% end %>
+<% else %>
+  <%# Live log loading not implemented yet. %>
+<% end %>
+
+$(".toggle-filter, .radio-filter").on("change", function() {
+  logViewer.filter(makeFilter());
+});
+
+$("#filter-all").on("click", function() {
+  $(".toggle-filter").each(function(i, f) { f.checked = true; });
+  logViewer.filter(makeFilter());
+});
+
+$("#filter-none").on("click", function() {
+  $(".toggle-filter").each(function(i, f) { f.checked = false; console.log(f); });
+  logViewer.filter(makeFilter());
+});
+
+$("#sort-by-time").on("change", function() {
+  logViewer.sort("id", {sortFunction: sortById});
+});
+
+$("#sort-by-task").on("change", function() {
+  logViewer.sort("taskid", {sortFunction: sortByTask});
+});
+
+$("#sort-by-node").on("change", function() {
+  logViewer.sort("node", {sortFunction: sortByNode});
+});
+
+$("#set-show-failed-only").on("click", function() {
+  $("#sort-by-task").prop("checked", true);
+  $("#show-failed-tasks").prop("checked", true);
+  $("#show-crunch").prop("checked", false);
+  $("#show-task-dispatch").prop("checked", true);
+  $("#show-script-print").prop("checked", true);
+  $("#show-crunchstat").prop("checked", false);
+  logViewer.filter(makeFilter());
+  logViewer.sort("taskid", {sortFunction: sortByTask});
+});
+
+})();
+
+</script>
+
+<div id="log-viewer">
+
+  <h3>Summary</h3>
+  <p id="log-viewer-overview">
+    <% if !logcollection %>
+      The collection containing the job log was not found.
+    <% end %>
+  </p>
+
+  <p id="log-viewer-download-pane" style="display:none">
+    <a id="log-viewer-download-url" href="">Download the full log</a>
+  </p>
+
+  <div class="h3">Log
+
+    <span class="pull-right">
+      <% if @object.andand.tasks_summary.andand[:failed] and @object.tasks_summary[:failed] > 0 %>
+        <button id="set-show-failed-only" class="btn btn-danger">
+          Show failed task diagnostics only
+        </button>
+      <% end %>
+
+      <button id="filter-all" class="btn">
+        Select all
+      </button>
+      <button id="filter-none" class="btn">
+        Select none
+      </button>
+    </span>
+  </div>
+
+  <input class="search pull-right" style="margin-top: 1em" placeholder="Search" />
+
+  <div>
+    <div class="radio-inline log-viewer-button" style="margin-left: 10px">
+      <label><input id="sort-by-time" type="radio" name="sort-radio" checked> Sort by time</label>
+    </div>
+    <div class="radio-inline log-viewer-button">
+      <label><input id="sort-by-node" type="radio" name="sort-radio" > Sort by node</label>
+    </div>
+
+    <div class="radio-inline log-viewer-button">
+      <label><input id="sort-by-task" type="radio" name="sort-radio" > Sort by task</label>
+    </div>
+  </div>
+
+  <div>
+    <div class="radio-inline log-viewer-button" style="margin-left: 10px">
+      <label><input id="show-all-tasks" type="radio" name="show-tasks-group" checked="true" class="radio-filter"> Show all tasks</label>
+    </div>
+    <div class="radio-inline log-viewer-button">
+      <label><input id="show-successful-tasks" type="radio" name="show-tasks-group" class="radio-filter"> Only successful tasks</label>
+    </div>
+    <div class="radio-inline log-viewer-button">
+      <label><input id="show-failed-tasks" type="radio" name="show-tasks-group" class="radio-filter"> Only failed tasks</label>
+    </div>
+  </div>
+
+  <div>
+    <div class="checkbox-inline log-viewer-button" style="margin-left: 10px">
+      <label><input id="show-crunch" type="checkbox" checked="true" class="toggle-filter"> Show crunch diagnostics</label>
+    </div>
+    <div class="checkbox-inline log-viewer-button">
+      <label><input id="show-task-dispatch" type="checkbox" checked="true" class="toggle-filter"> Show task dispatch</label>
+    </div>
+    <div class="checkbox-inline log-viewer-button">
+      <label><input id="show-task-print" type="checkbox" checked="true" class="toggle-filter"> Show task diagnostics</label>
+    </div>
+    <div class="checkbox-inline log-viewer-button">
+      <label><input id="show-crunchstat" type="checkbox" checked="true" class="toggle-filter"> Show compute usage</label>
+    </div>
+
+  </div>
+
+  <div class="smart-scroll" data-smart-scroll-padding-bottom="50" style="margin-bottom: 0px">
+    <table class="log-viewer-table">
+      <thead>
+        <tr>
+          <th class="id" data-sort="id"></th>
+          <th class="timestamp" data-sort="timestamp">Timestamp</th>
+          <th class="node"  data-sort="node">Node</th>
+          <th class="slot"  data-sort="slot">Slot</th>
+          <th class="type" data-sort="type">Log type</th>
+          <th class="taskid"  data-sort="taskid">Task</th>
+          <th class="message" data-sort="message">Message</th>
+        </tr>
+      </thead>
+      <tbody class="list">
+        <tr>
+          <td class="id"></td>
+          <td class="timestamp"></td>
+          <td class="node"></td>
+          <td class="slot"></td>
+          <td class="type"></td>
+          <td class="taskid"></td>
+          <td class="message"></td>
+        </tr>
+      </tbody>
+    </table>
+
+    <% if @object.log and logcollection %>
+      <div class="spinner spinner-32px"></div>
+    <% end %>
+
+  </div>
+
+  <div class="log-viewer-paging-div" style="margin-bottom: -15px">
+    <a href="#" class="log-viewer-page-up"><span class='glyphicon glyphicon-arrow-up'></span></a>
+    <span class="log-viewer-paging"></span>
+    <a href="#" class="log-viewer-page-down"><span class='glyphicon glyphicon-arrow-down'></span></a>
+  </div>
+
+</div>
+
+<% end %>
diff --git a/apps/workbench/app/views/jobs/_show_object_description_cell.html.erb b/apps/workbench/app/views/jobs/_show_object_description_cell.html.erb
new file mode 100644 (file)
index 0000000..6788fc1
--- /dev/null
@@ -0,0 +1,15 @@
+<div class="nowrap">
+  <div class="row">
+    <div class="col-sm-2 inline-progress-container">
+      <%= render partial: 'job_progress', locals: {j: object} %>
+    </div>
+    <div class="col-sm-10">
+      <%= object.script %>
+      <span class="deemphasize">
+        job
+        using <%= object.script_version %> commit
+        from <%= object.repository %> repository
+      </span>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/jobs/_show_provenance.html.erb b/apps/workbench/app/views/jobs/_show_provenance.html.erb
new file mode 100644 (file)
index 0000000..253af56
--- /dev/null
@@ -0,0 +1,4 @@
+<%= render partial: 'application/svg_div', locals: {
+      divId: "provenance_graph", 
+      svgId: "provenance_svg", 
+      svg: @svg } %>
diff --git a/apps/workbench/app/views/jobs/_show_recent.html.erb b/apps/workbench/app/views/jobs/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..d12ebb6
--- /dev/null
@@ -0,0 +1,120 @@
+<% content_for :css do %>
+  table.topalign>tbody>tr>td {
+  vertical-align: top;
+  }
+  table.topalign>thead>tr>td {
+  vertical-align: bottom;
+  }
+<% end %>
+
+<%= render partial: "paging", locals: {results: objects, object: @object} %>
+
+<table class="topalign table">
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+      </th><th>
+       status
+      </th><th>
+       uuid
+      </th><th>
+       script
+      </th><th>
+       version
+      </th><th>
+       output
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+
+    <% @objects.sort_by { |j| j[:created_at] }.reverse.each do |j| %>
+
+    <tr class="cell-noborder">
+      <td>
+        <i class="icon-plus-sign expand-collapse-row" data-id="<%= j.uuid %>" style="cursor: pointer"></i>
+      </td>
+      <td>
+        <div class="inline-progress-container">
+          <%= render partial: 'job_progress', locals: {:j => j} %>
+        </div>
+      </td>
+      <td>
+        <%= link_to_if_arvados_object j %>
+      </td>
+      <td>
+        <%= j.script %>
+      </td>
+      <td>
+        <%= j.script_version.andand[0..8] %>
+      </td>
+      <td>
+        <%= link_to_if_arvados_object j.output %>
+      </td>
+    </tr>
+    <tr class="cell-noborder" id="<%= j.uuid %>" style="display:none">
+      <td colspan="7"><table class="table table-justforlayout"><tr>
+      <td style="border-left: 1px solid black">
+        <table class="table table-condensed">
+          <tr>
+            <td>
+              queued
+            </td>
+            <td>
+             &#x2709;&nbsp;<span title="<%= j.created_at %>"><%= raw distance_of_time_in_words(Time.now, j.created_at).sub('about ','~').sub(' ','&nbsp;') + '&nbsp;ago' if j.created_at %></span>
+            </td>
+            <td>
+             <%= raw('for&nbsp;' + distance_of_time_in_words(j.started_at, j.created_at).sub('about ','~').sub(' ','&nbsp;')) if j.created_at and j.started_at %>
+            </td>
+          </tr>
+          <% if j.started_at.is_a? Time %>
+          <tr>
+            <td>
+              started
+            </td>
+            <td>
+             &#x2708;&nbsp;<span title="<%= j.created_at %>"><%= raw distance_of_time_in_words(j.started_at, Time.now).sub('about ','~').sub(' ','&nbsp;') + '&nbsp;ago' if j.started_at %></span>
+            </td>
+            <td>
+              <% if j.finished_at.is_a? Time %>
+             <%= raw('ran&nbsp;' + distance_of_time_in_words(j.finished_at, j.started_at).sub('about ','~').sub(' ','&nbsp;')) %>
+              <% elsif j.state == "Running" %>
+              <span class="badge badge-success" title="tasks finished">&#x2714;&nbsp;<%= j.tasks_summary[:done] %></span>
+              <span class="badge badge-info" title="tasks running">&#x2708;&nbsp;<%= j.tasks_summary[:running] %></span>
+              <span class="badge" title="tasks todo">&#x2709;&nbsp;<%= j.tasks_summary[:todo] %></span>
+              <% if j.tasks_summary[:failed] %>
+              <span class="badge badge-warning" title="task failures">&#x2716;&nbsp;<%= j.tasks_summary[:failed] %></span>
+              <% end %>
+              <% end %>
+            </td>
+          </tr>
+          <% end %>
+        </table>
+      </td><td>
+        <table class="table table-condensed">
+          <tr><td colspan="2">
+              <%= j.script %> <%= j.script_version %>
+          </td></tr>
+          <% j.script_parameters.sort.each do |k,v| %>
+          <tr>
+            <td><%= k %></td><td><%= link_to_if_arvados_object v %></td>
+          </tr>
+          <% end %>
+          <tr>
+            <td>output</td><td><%= link_to_if_arvados_object j.output %></td>
+          </tr>
+        </table>
+      </td><td>
+        <table class="table table-condensed">
+        <% j.runtime_constraints.sort.each do |k,v| %>
+        <tr><td><%= v %></td><td><%= k %></td></tr>
+        <% end %>
+        </table>
+      </td>
+      </tr></table></td>
+    </tr>
+
+    <% end %>
+
+  </tbody>
+</table>
diff --git a/apps/workbench/app/views/jobs/_show_status.html.erb b/apps/workbench/app/views/jobs/_show_status.html.erb
new file mode 100644 (file)
index 0000000..8075209
--- /dev/null
@@ -0,0 +1,36 @@
+<div class="arv-log-refresh-control"
+     data-load-throttle="15000"
+     ></div>
+<%=
+   pj = {}
+   pj[:job] = @object
+   pj[:name] = @object[:name] || "this job"
+   pj[:progress_bar] = render(partial: "job_progress",
+                              locals: {:j => @object })
+   tasks = JobTask.filter([['job_uuid', '=', @object.uuid]]).results
+   render(partial: 'pipeline_instances/running_component',
+          locals: { tasks: tasks, pj: pj, i: 0, expanded: true})
+%>
+
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <span class="panel-title">Used in pipelines</span>
+  </div>
+  <div class="panel-body">
+    <% pi = PipelineInstance.order("created_at desc").filter([["components", "like", "%#{@object.uuid}%"]]) %>
+
+    <% pi.each do |pipeline| %>
+      <% pipeline.components.each do |k, v| %>
+        <% if v[:job] and v[:job][:uuid] == @object.uuid %>
+          <div>
+            <b><%= k %></b>
+            component of
+            <%= link_to_if_arvados_object pipeline, friendly_name: true %>
+            created at
+            <%= render_localized_date(pipeline.created_at) %>.
+          </div>
+        <% end %>
+      <% end %>
+    <% end %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/jobs/show.html.erb b/apps/workbench/app/views/jobs/show.html.erb
new file mode 100644 (file)
index 0000000..566014e
--- /dev/null
@@ -0,0 +1,11 @@
+<% content_for :tab_line_buttons do %>
+  <div class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
+       data-pane-content-url="<%= url_for(params.merge(tab_pane: "job_buttons")) %>"
+       data-object-uuid="<%= @object.uuid %>"
+       style="display: inline">
+    <%= render partial: 'show_job_buttons', locals: {object: @object}%>
+  </div>
+<% end %>
+
+<%= render partial: 'title_and_buttons' %>
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.show_pane_list }%>
diff --git a/apps/workbench/app/views/keep_disks/_content_layout.html.erb b/apps/workbench/app/views/keep_disks/_content_layout.html.erb
new file mode 100644 (file)
index 0000000..2cf3291
--- /dev/null
@@ -0,0 +1,20 @@
+<% unless @histogram_pretty_date.nil? %>
+  <% content_for :tab_panes do %>
+  <script type="text/javascript">
+    $(document).ready(function(){
+      $.renderHistogram(<%= raw @cache_age_histogram.to_json %>);
+    });
+  </script>
+  <div class='graph'>
+    <h3>Cache Age vs. Disk Utilization</h3>
+    <h4>circa <%= @histogram_pretty_date %></h4>
+    <div id='cache-age-vs-disk-histogram'>
+    </div>
+  </div>
+  <% end %>
+<% end %>
+<%= content_for :content_top %>
+<div class="pull-right">
+  <%= content_for :tab_line_buttons %>
+</div>
+<%= content_for :tab_panes %>
diff --git a/apps/workbench/app/views/layouts/application.html.erb b/apps/workbench/app/views/layouts/application.html.erb
new file mode 100644 (file)
index 0000000..cdc47c1
--- /dev/null
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<html ng-app="Workbench">
+<head>
+  <meta charset="utf-8">
+  <title>
+    <% if content_for? :page_title %>
+    <%= yield :page_title %> / <%= Rails.configuration.site_name %>
+    <% else %>
+    <%= Rails.configuration.site_name %>
+    <% end %>
+  </title>
+  <meta name="viewport" content="width=device-width, initial-scale=1.0">
+  <link rel="icon" href="/favicon.ico" type="image/x-icon">
+  <link rel="shortcut icon" href="/favicon.ico" type="image/x-icon">
+  <meta name="description" content="">
+  <meta name="author" content="">
+  <% if current_user and $arvados_api_client.discovery[:websocketUrl] %>
+  <meta name="arv-websocket-url" content="<%=$arvados_api_client.discovery[:websocketUrl]%>?api_token=<%=Thread.current[:arvados_api_token]%>">
+  <% end %>
+  <meta name="robots" content="NOINDEX, NOFOLLOW">
+  <%= stylesheet_link_tag    "application", :media => "all" %>
+  <%= javascript_include_tag "application" %>
+  <%= csrf_meta_tags %>
+  <%= yield :head %>
+  <%= javascript_tag do %>
+    angular.module('Arvados').value('arvadosApiToken', '<%=Thread.current[:arvados_api_token]%>');
+    angular.module('Arvados').value('arvadosDiscoveryUri', '<%= Rails.configuration.arvados_v1_base.sub '/arvados/v1', '/discovery/v1/apis/arvados/v1/rest' %>');
+  <%= yield :js %>
+  <% end %>
+  <style>
+    <%= yield :css %>
+    body {
+    min-height: 100%;
+    height: 100%;
+    }
+
+    @media (max-width: 979px) { body { padding-top: 0; } }
+
+    @media (max-width: 767px) {
+    .breadcrumbs {
+    display: none;
+    }
+    }
+  </style>
+  <link href="//netdna.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.css" rel="stylesheet">
+  <%= piwik_tracking_tag if (PiwikAnalytics.configuration.url != 'localhost' rescue false) %>
+</head>
+<body>
+<%= render template: 'layouts/body' %>
+<%= javascript_tag do %>
+<%= yield :footer_js %>
+<% end %>
+</body>
+</html>
diff --git a/apps/workbench/app/views/layouts/body.html.erb b/apps/workbench/app/views/layouts/body.html.erb
new file mode 100644 (file)
index 0000000..5cfa2ca
--- /dev/null
@@ -0,0 +1,197 @@
+  <div id="wrapper" class="container-fluid">
+    <nav class="navbar navbar-default navbar-fixed-top" role="navigation">
+      <div class="navbar-header">
+        <button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-collapse">
+          <span class="sr-only">Toggle navigation</span>
+          <span class="icon-bar"></span>
+          <span class="icon-bar"></span>
+          <span class="icon-bar"></span>
+        </button>
+        <a class="navbar-brand" href="/" data-push=true><%= Rails.configuration.site_name.downcase rescue Rails.application.class.parent_name %></a>
+      </div>
+
+      <div class="collapse navbar-collapse">
+        <ul class="nav navbar-nav navbar-right">
+
+          <li>
+            <a><i class="rotating loading glyphicon glyphicon-refresh"></i></a>
+          </li>
+
+          <% if current_user %>
+            <% if current_user.is_active %>
+            <li>
+              <form class="navbar-form" role="search"
+                         data-search-modal=
+                         "<%= url_for(
+                          action: 'choose',
+                          controller: 'search',
+                          title: 'Search',
+                          action_name: 'Show',
+                          action_href: url_for(controller: :actions, action: :show),
+                          action_method: 'get',
+                          action_data: {selection_param: 'uuid', success: 'redirect-to-created-object', copy_from_search_box: true}.to_json)
+                         %>">
+                <div class="input-group" style="width: 220px">
+                  <input type="text" class="form-control" placeholder="search">
+                  <a class="input-group-addon"><span class="glyphicon glyphicon-search"></span></a>
+                </div>
+              </form>
+            </li>
+            <% end %>
+
+            <li class="dropdown notification-menu">
+              <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="notifications-menu">
+                <span class="badge badge-alert notification-count"><%= @notification_count %></span>
+                <%= current_user.email %> <span class="caret"></span>
+              </a>
+              <ul class="dropdown-menu" role="menu">
+                <% if current_user.is_active %>
+                <li role="menuitem"><a href="/manage_account" role="menuitem"><i class="fa fa-key fa-fw"></i> Manage account</a></li>
+                <% if Rails.configuration.user_profile_form_fields %>
+                  <li role="menuitem"><a href="/users/<%=current_user.uuid%>/profile" role="menuitem"><i class="fa fa-key fa-fw"></i> Manage profile</a></li>
+                <% end %>
+                <% end %>
+                <li role="menuitem"><a href="<%= logout_path %>" role="menuitem"><i class="fa fa-sign-out fa-fw"></i> Log out</a></li>
+                <% if current_user.is_active and
+                      (@notifications || []).length > 0 %>
+                  <li role="presentation" class="divider"></li>
+                  <% @notifications.each_with_index do |n, i| %>
+                    <% if i > 0 %><li class="divider"></li><% end %>
+                    <li class="notification"><%= n.call(self) %></li>
+                  <% end %>
+                <% end %>
+              </ul>
+            </li>
+
+            <% if current_user.is_admin %>
+              <li class="dropdown">
+                <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="system-menu">
+                  <span class="fa fa-lg fa-gear"></span>
+                </a>
+                <ul class="dropdown-menu" role="menu">
+                  <li role="presentation" class="dropdown-header">
+                    Settings
+                  </li>
+                  <li role="menuitem"><a href="/repositories">
+                      <i class="fa fa-lg fa-code-fork fa-fw"></i> Repositories
+                  </a></li>
+                  <li role="menuitem"><a href="/virtual_machines">
+                      <i class="fa fa-lg fa-terminal fa-fw"></i> Virtual machines
+                  </a></li>
+                  <li role="menuitem"><a href="/authorized_keys">
+                      <i class="fa fa-lg fa-key fa-fw"></i> SSH keys
+                  </a></li>
+                  <li role="menuitem"><a href="/api_client_authorizations">
+                      <i class="fa fa-lg fa-ticket fa-fw"></i> API tokens
+                  </a></li>
+                  <li role="menuitem"><a href="/links">
+                      <i class="fa fa-lg fa-arrows-h fa-fw"></i> Links
+                  </a></li>
+                  <li role="menuitem"><a href="/users">
+                      <i class="fa fa-lg fa-user fa-fw"></i> Users
+                  </a></li>
+                  <li role="menuitem"><a href="/groups">
+                      <i class="fa fa-lg fa-users fa-fw"></i> Groups
+                  </a></li>
+                  <li role="menuitem"><a href="/nodes">
+                      <i class="fa fa-lg fa-cloud fa-fw"></i> Compute nodes
+                  </a></li>
+                  <li role="menuitem"><a href="/keep_services">
+                      <i class="fa fa-lg fa-exchange fa-fw"></i> Keep services
+                  </a></li>
+                  <li role="menuitem"><a href="/keep_disks">
+                      <i class="fa fa-lg fa-hdd-o fa-fw"></i> Keep disks
+                  </a></li>
+                </ul>
+              </li>
+            <% end %>
+          <% else %>
+            <li><a href="<%= arvados_api_client.arvados_login_url(return_to: root_url) %>">Log in</a></li>
+          <% end %>
+
+          <li class="dropdown help-menu">
+            <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="arv-help">
+              <span class="fa fa-lg fa-question-circle"></span>
+            </a>
+            <ul class="dropdown-menu">
+              <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> Tutorials and User guide'), "#{Rails.configuration.arvados_docsite}/user", target: "_blank" %></li>
+              <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> API Reference'), "#{Rails.configuration.arvados_docsite}/api", target: "_blank" %></li>
+              <li><%= link_to raw('<i class="fa fa-book fa-fw"></i> SDK Reference'), "#{Rails.configuration.arvados_docsite}/sdk", target: "_blank" %></li>
+              <li role="presentation" class="divider"></li>
+              <li> <%= link_to report_issue_popup_path(popup_type: 'version', current_location: request.url, current_path: request.fullpath, action_method: 'post'),
+                      {class: 'report-issue-modal-window', remote: true, return_to: request.url} do %>
+                       <i class="fa fa-fw fa-support"></i> Show version / debugging info ...
+                      <% end %>
+              </li>
+              <li> <%= link_to report_issue_popup_path(popup_type: 'report', current_location: request.url, current_path: request.fullpath, action_method: 'post'),
+                      {class: 'report-issue-modal-window', remote: true, return_to: request.url} do %>
+                       <i class="fa fa-fw fa-support"></i> Report a problem ...
+                      <% end %>
+              </li>
+            </ul>
+          </li>
+        </ul>
+      </div><!-- /.navbar-collapse -->
+    </nav>
+
+    <% if current_user.andand.is_active %>
+      <nav class="navbar navbar-default breadcrumbs" role="navigation">
+        <ul class="nav navbar-nav navbar-left">
+          <li>
+            <a href="/">
+              <i class="fa fa-lg fa-fw fa-dashboard"></i>
+              Dashboard
+            </a>
+          </li>
+          <li class="dropdown">
+            <a href="#" class="dropdown-toggle" data-toggle="dropdown" id="projects-menu">
+              Projects
+              <span class="caret"></span>
+            </a>
+            <ul class="dropdown-menu" style="min-width: 20em" role="menu">
+              <li>
+                <%= link_to projects_path(options: {ensure_unique_name: true}), method: :post, class: 'btn btn-xs btn-default pull-right' do %>
+                  <i class="fa fa-plus"></i> Add a new project
+                <% end %>
+              </li>
+              <%= render partial: "projects_tree_menu", locals: {
+                  :project_link_to => Proc.new do |pnode, &block|
+                    link_to(project_path(pnode[:object].uuid),
+                      data: { 'object-uuid' => pnode[:object].uuid,
+                              'name' => 'name' },
+                      &block)
+                  end,
+              } %>
+            </ul>
+          </li>
+          <% if @name_link or @object %>
+            <li class="nav-separator">
+              <i class="fa fa-lg fa-angle-double-right"></i>
+            </li>
+            <li>
+              <%= link_to project_path(current_user.uuid) do %>
+                Home
+              <% end %>
+            </li>
+            <% project_breadcrumbs.each do |p| %>
+              <li class="nav-separator">
+                <i class="fa fa-lg fa-angle-double-right"></i>
+              </li>
+              <li>
+                <%= link_to(p.name, project_path(p.uuid), data: {object_uuid: p.uuid, name: 'name'}) %>
+              </li>
+            <% end %>
+          <% end %>
+        </ul>
+      </nav>
+    <% end %>
+
+    <div id="page-wrapper">
+      <%= yield %>
+    </div>
+  </div>
+
+  <%= yield :footer_html %>
+
+<div class="modal-container"></div>
+<div id="report-issue-modal-window"></div>
diff --git a/apps/workbench/app/views/links/_breadcrumb_page_name.html.erb b/apps/workbench/app/views/links/_breadcrumb_page_name.html.erb
new file mode 100644 (file)
index 0000000..8c35905
--- /dev/null
@@ -0,0 +1,8 @@
+<% if @object %>
+(<%= @object.link_class %>)
+<%= @object.name %>:
+<%= @object.tail_kind.andand.sub 'arvados#', '' %>
+&rarr;
+<%= @object.head_kind.andand.sub 'arvados#', '' %>
+<% end %>
+
diff --git a/apps/workbench/app/views/notifications/_collections_notification.html.erb b/apps/workbench/app/views/notifications/_collections_notification.html.erb
new file mode 100644 (file)
index 0000000..4ef0d31
--- /dev/null
@@ -0,0 +1,7 @@
+  <%= image_tag "dax.png", class: "dax" %>
+  <p>
+    Hi, I noticed you haven't uploaded a new collection yet. 
+    <%= link_to "Click here to learn how to upload data to Arvados Keep.", 
+       "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-keep.html", 
+       style: "font-weight: bold", target: "_blank" %>
+  </p>
diff --git a/apps/workbench/app/views/notifications/_jobs_notification.html.erb b/apps/workbench/app/views/notifications/_jobs_notification.html.erb
new file mode 100644 (file)
index 0000000..18ebd02
--- /dev/null
@@ -0,0 +1,8 @@
+  <p><%= image_tag "dax.png", class: "dax" %>
+    Hi, I noticed you haven't run a job yet. 
+    <%= link_to "Click here to learn how to run an Arvados Crunch job.", 
+       "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-job1.html", 
+       style: "font-weight: bold",
+       target: "_blank" %>
+  </p>
+
diff --git a/apps/workbench/app/views/notifications/_pipelines_notification.html.erb b/apps/workbench/app/views/notifications/_pipelines_notification.html.erb
new file mode 100644 (file)
index 0000000..781b907
--- /dev/null
@@ -0,0 +1,7 @@
+  <p><%= image_tag "dax.png", class: "dax" %>
+    Hi, I noticed you haven't run a pipeline yet.  
+    <%= link_to "Click here to learn how to run an Arvados Crunch pipeline.", 
+       "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-pipeline-workbench.html",
+       style: "font-weight: bold",
+       target: "_blank" %>
+  </p>
diff --git a/apps/workbench/app/views/notifications/_ssh_key_notification.html.erb b/apps/workbench/app/views/notifications/_ssh_key_notification.html.erb
new file mode 100644 (file)
index 0000000..989db3d
--- /dev/null
@@ -0,0 +1,6 @@
+   <%= image_tag "dax.png", class: "dax" %>
+    <div>
+      Hi, I noticed that you have not yet set up an SSH public key for use with Arvados.
+      <%= link_to "Click here to set up an SSH public key for use with Arvados.",
+      "/manage_account", style: "font-weight: bold" %>
+    </div>
diff --git a/apps/workbench/app/views/pipeline_instances/_component_labels.html.erb b/apps/workbench/app/views/pipeline_instances/_component_labels.html.erb
new file mode 100644 (file)
index 0000000..d2d824b
--- /dev/null
@@ -0,0 +1,5 @@
+<% pipeline_jobs(object).each do |pj| %>
+  <span class="label label-<%= pj[:labeltype] %>">
+    <%= pj[:name] %>
+  </span>&nbsp;
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_running_component.html.erb b/apps/workbench/app/views/pipeline_instances/_running_component.html.erb
new file mode 100644 (file)
index 0000000..c916ee9
--- /dev/null
@@ -0,0 +1,174 @@
+<% current_job = pj[:job] if pj[:job] != {} and pj[:job][:uuid] %>
+<div class="panel panel-default">
+  <div class="panel-heading">
+    <div class="container-fluid">
+      <div class="row-fluid">
+        <%# column offset 0 %>
+        <div class="col-md-3" style="word-break:break-all;">
+          <h4 class="panel-title">
+            <a data-toggle="collapse" href="#collapse<%= i %>">
+              <%= pj[:name] %> <span class="caret"></span>
+            </a>
+          </h4>
+        </div>
+
+        <%# column offset 3 %>
+        <div class="col-md-2 pipeline-instance-spacing">
+          <%= pj[:progress_bar] %>
+        </div>
+
+        <% if current_job %>
+          <%# column offset 5 %>
+          <% if current_job[:state] != "Queued" %>
+          <div class="col-md-3">
+            <% if current_job[:started_at] %>
+              <% walltime = ((if current_job[:finished_at] then current_job[:finished_at] else Time.now() end) - current_job[:started_at]) %>
+              <% cputime = tasks.map { |task|
+                   if task.started_at and task.job_uuid == current_job[:uuid]
+                     (if task.finished_at then task.finished_at else Time.now() end) - task.started_at
+                   else
+                     0
+                   end
+                 }.reduce(:+) || 0 %>
+              <%= render_runtime(walltime, false, false) %>
+              <% if cputime > 0 %> / <%= render_runtime(cputime, false, false) %> (<%= (cputime/walltime).round(1) %>&Cross;)<% end %>
+            <% end %>
+          </div>
+          <% end %>
+
+          <% if current_job[:state] == "Queued" %>
+            <%# column offset 5 %>
+            <div class="col-md-6">
+              <% queuetime = Time.now - current_job[:created_at] %>
+              Queued for <%= render_runtime(queuetime, true) %>.
+              <% begin %>
+                <% if current_job[:queue_position] == 0 %>
+                  This job is next in the queue to run.
+                <% elsif current_job[:queue_position] == 1 %>
+                  There is 1 job in the queue ahead of this one.
+                <% elsif current_job[:queue_position] %>
+                  There are <%= current_job[:queue_position] %> jobs in the queue ahead of this one.
+                <% end %>
+              <% rescue %>
+              <% end %>
+            </div>
+          <% elsif current_job[:state] == "Running" %>
+            <%# column offset 8 %>
+            <div class="col-md-3">
+              <span class="task-summary-status">
+                <%= current_job[:tasks_summary][:done] %>&nbsp;<%= "task".pluralize(current_job[:tasks_summary][:done]) %> done,
+                <%= current_job[:tasks_summary][:failed] %>&nbsp;failed,
+                <%= current_job[:tasks_summary][:running] %>&nbsp;running,
+                <%= current_job[:tasks_summary][:todo] %>&nbsp;pending
+              </span>
+            </div>
+          <% elsif current_job[:state].in? ["Complete", "Failed", "Cancelled"] %>
+            <%# column offset 8 %>
+            <div class="col-md-4 text-overflow-ellipsis">
+              <% if pj[:output_uuid] %>
+                <%= link_to_if_arvados_object pj[:output_uuid], friendly_name: true %>
+              <% elsif current_job[:output] %>
+                <%= link_to_if_arvados_object current_job[:output], link_text: "Output of #{pj[:name]}" %>
+              <% else %>
+                No output.
+              <% end %>
+            </div>
+          <% end %>
+
+          <% if current_job[:state].in? ["Queued", "Running"] %>
+            <%# column offset 11 %>
+            <div class="col-md-1 pipeline-instance-spacing">
+              <%= form_tag "/jobs/#{current_job[:uuid]}/cancel", remote: true, style: "display:inline; padding-left: 1em" do |f| %>
+                <%= hidden_field_tag :return_to, url_for(@object) %>
+                <%= button_tag "Cancel", {class: 'btn btn-xs btn-danger', id: "cancel-job-button"} %>
+              <% end %>
+            </div>
+          <% end %>
+        <% end %>
+      </div>
+    </div>
+  </div>
+
+  <div id="collapse<%= i %>" class="panel-collapse collapse <%= if expanded then 'in' end %>">
+    <div class="panel-body">
+      <div class="container">
+        <% current_component = (if current_job then current_job else pj end) %>
+        <div class="row">
+          <div class="col-md-6">
+            <table>
+              <% [:script, :repository, :script_version, :supplied_script_version, :nondeterministic].each do |k| %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    <%= k.to_s %>:
+                  </td>
+                  <td>
+                    <% if current_component[k].nil? %>
+                      (none)
+                    <% else %>
+                      <%= current_component[k] %>
+                    <% end %>
+                  </td>
+                </tr>
+              <% end %>
+              <% if current_component[:runtime_constraints].andand[:docker_image] and current_component[:docker_image_locator] %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image:
+                  </td>
+                  <td>
+                    <%= current_component[:runtime_constraints][:docker_image] %>
+                  </td>
+                </tr>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image_locator:
+                  </td>
+                  <td>
+                    <%= link_to_if_arvados_object current_component[:docker_image_locator], friendly_name: true %>
+                  </td>
+                </tr>
+              <% else %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    docker_image:
+                  </td>
+                  <td>
+                    Not run in Docker
+                  </td>
+                </tr>
+              <% end %>
+            </table>
+          </div>
+          <div class="col-md-5">
+            <table>
+              <% [:uuid, :modified_by_user_uuid, :priority, :created_at, :started_at, :finished_at].each do |k| %>
+                <tr>
+                  <td style="padding-right: 1em">
+                    <%= k.to_s %>:
+                  </td>
+                  <td>
+                    <% if k == :uuid %>
+                      <%= link_to_if_arvados_object current_component[k], link_text: current_component[k] %>
+                    <% elsif k.to_s.end_with? 'uuid' %>
+                      <%= link_to_if_arvados_object current_component[k], friendly_name: true %>
+                    <% elsif k.to_s.end_with? '_at' %>
+                      <%= render_localized_date(current_component[k]) %>
+                    <% else %>
+                      <%= current_component[k] %>
+                    <% end %>
+                  </td>
+                </tr>
+              <% end %>
+            </table>
+          </div>
+        </div>
+        <div class="row">
+          <div class="col-md-6">
+            <p>script_parameters:</p>
+            <pre><%= JSON.pretty_generate(current_component[:script_parameters]) rescue nil %></pre>
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_compare.html.erb b/apps/workbench/app/views/pipeline_instances/_show_compare.html.erb
new file mode 100644 (file)
index 0000000..567bc31
--- /dev/null
@@ -0,0 +1,66 @@
+<% pi_span = [(10.0/[@objects.count,1].max).floor,1].max %>
+
+<div class="headrow pipeline-compare-headrow">
+  <div class="row">
+  <div class="col-sm-2">
+    <%# label %>
+  </div>
+  <% @objects.each do |object| %>
+  <div class="col-sm-<%= pi_span %>" style="overflow-x: hidden; text-overflow: ellipsis;">
+    <%= render :partial => "show_object_button", :locals => {object: object, size: 'sm' } %>
+    <%= object.name || "unnamed #{object.class_for_display.downcase}" %>
+    <br />
+    <span class="deemphasize">Template:</span> <%= link_to_if_arvados_object object.pipeline_template_uuid, friendly_name: true %>
+  </div>
+  <% end %>
+  </div>
+</div>
+
+<% @rows.each do |row| %>
+<div class="row pipeline-compare-row">
+  <div class="col-sm-2">
+    <%= row[:name] %>
+  </div>
+  <% @objects.each_with_index do |_, x| %>
+    <div class="col-sm-<%= pi_span %>">
+      <div class="row">
+        <div class="col-sm-12">
+
+        <% if row[:components][x] %>
+          <% pj = render_pipeline_job row[:components][x] %>
+
+          <%= link_to_if_arvados_object pj[:job_id], {friendly_name: true, with_class_name: true}, {class: 'deemphasize'} %>
+          <br />
+
+          <% %w(script script_version script_parameters output).each do |key| %>
+              <% unless key=='output' and pj[:result] != 'complete' %>
+              <% val = pj[key.to_sym] || pj[:job].andand[key.to_sym] %>
+              <% link_name = case
+                 when !val
+                   val = ''
+                 when key == 'script_version' && val.match(/^[0-9a-f]{7,}$/)
+                   val = val[0..7] # TODO: leave val alone, make link_to handle git commits
+                 when key == 'output'
+                   val.sub! /\+K.*$/, ''
+                   val[0..12]
+                 when key == 'script_parameters'
+                   val = val.keys.sort.join(', ')
+                 end
+                 %>
+              <span class="deemphasize"><%= key %>:</span>&nbsp;<span class="<%= 'notnormal' if !pj[:is_normal][key.to_sym] %>"><%= link_to_if_arvados_object val, {friendly_name: true, link_text: link_name} %></span>
+              <% end %>
+            <br />
+          <% end %>
+          <% else %>
+          None
+        <% end %>
+        </div>
+      </div>
+    </div>
+  <% end %>
+</div>
+<div class="row" style="padding: .5em">
+</div>
+<% end %>
+
+
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components.html.erb
new file mode 100644 (file)
index 0000000..7735997
--- /dev/null
@@ -0,0 +1,19 @@
+<% if !@object.state.in? ['New', 'Ready'] %>
+
+  <%
+     job_uuids = @object.components.map { |k,j| j.is_a? Hash and j[:job].andand[:uuid] }.compact
+     throttle = @object.state.start_with?('Running') ? 5000 : 15000
+     %>
+  <div class="arv-log-refresh-control"
+       data-load-throttle="<%= throttle %>"
+       data-object-uuids="<%= @object.uuid %> <%= job_uuids.join(' ') %>"
+       ></div>
+
+  <%= render_pipeline_components("running", :json) %>
+
+<% else %>
+  <%# state is either New or Ready %>
+  <p><i>Here are all of the pipeline's components (jobs that will need to run in order to complete the pipeline). If you know what you're doing (or you're experimenting) you can modify these parameters before starting the pipeline. Usually, you only need to edit the settings presented on the "Inputs" tab above.</i></p>
+
+  <%= render_pipeline_components("editable", :json, editable: true) %>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components_editable.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components_editable.html.erb
new file mode 100644 (file)
index 0000000..f6c9e85
--- /dev/null
@@ -0,0 +1,48 @@
+<table class="table pipeline-components-table" style="margin-top: -.1em">
+  <colgroup>
+    <col style="width: 20%" />
+    <col style="width: 20%" />
+    <col style="width: 20%" />
+    <col style="width: 40%" />
+  </colgroup>
+
+  <thead>
+    <tr>
+      <th>
+        component
+      </th><th>
+        script
+      </th><th>
+        parameter
+      </th><th>
+        value
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+    <% @object.components.each do |k, component| %>
+      <% next if !component %>
+      <tr>
+        <td><%= k %></td>
+
+        <td><%= component[:script] %></td>
+
+        <td>script version</td>
+
+        <td>
+          <%= render_pipeline_component_attribute (editable && @object), :components, [k, :script_version], component[:script_version] %>
+        </td>
+      </tr>
+
+      <% component[:script_parameters].andand.each do |p, tv| %>
+        <tr>
+          <td style="border-top: none"></td>
+          <td style="border-top: none"></td>
+
+          <td class="property-edit-row"><%= p %></td>
+          <td class="property-edit-row"><%= render_pipeline_component_attribute (editable && @object), :components, [k, :script_parameters, p.to_sym], tv %></td>
+        </tr>
+      <% end %>
+    <% end %>
+  </tbody>
+</table>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components_json.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components_json.html.erb
new file mode 100644 (file)
index 0000000..9d1edbf
--- /dev/null
@@ -0,0 +1,30 @@
+<p>The components of this pipeline are in a format that Workbench does not recognize.</p>
+
+<p>Error encountered: <b><%= error_name %></b></p>
+
+    <div id="components-accordion" class="panel panel-default">
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a data-toggle="collapse" data-parent="#components-accordion" href="#components-json">
+            Show components JSON
+          </a>
+        </h4>
+      </div>
+      <div id="components-json" class="panel-collapse collapse">
+        <div class="panel-body">
+          <pre><%= Oj.dump(@object.components, indent: 2) %></pre>
+        </div>
+      </div>
+      <div class="panel-heading">
+        <h4 class="panel-title">
+          <a data-toggle="collapse" data-parent="#components-accordion" href="#components-backtrace">
+            Show backtrace
+          </a>
+        </h4>
+      </div>
+      <div id="components-backtrace" class="panel-collapse collapse">
+        <div class="panel-body">
+          <pre><%= backtrace %></pre>
+        </div>
+      </div>
+    </div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb b/apps/workbench/app/views/pipeline_instances/_show_components_running.html.erb
new file mode 100644 (file)
index 0000000..d99ac23
--- /dev/null
@@ -0,0 +1,86 @@
+<%# Summary %>
+
+<div class="pull-right" style="padding-left: 1em">
+  Current state: <span class="badge badge-info" data-pipeline-state="<%= @object.state %>">
+    <% if @object.state == "RunningOnServer" %>
+      Active
+    <% else %>
+      <%= @object.state %>
+    <% end %>
+  </span>&nbsp;
+</div>
+
+<% pipeline_jobs = render_pipeline_jobs %>
+<% job_uuids = pipeline_jobs.map { |j| j[:job].andand[:uuid] }.compact %>
+
+<% if @object.state == 'Paused' %>
+  <p>
+    This pipeline is paused.  Jobs that are
+    already running will continue to run, but no new jobs will be submitted.
+  </p>
+<% end %>
+
+<% tasks = JobTask.filter([['job_uuid', 'in', job_uuids]]).results %>
+<% runningtime = determine_wallclock_runtime(pipeline_jobs.map {|j| j[:job]}.compact) %>
+
+<p>
+  <% if @object.started_at %>
+    This pipeline started at <%= render_localized_date(@object.started_at) %>.
+    It
+    <% if @object.state == 'Complete' %>
+      completed in
+    <% elsif @object.state == 'Failed' %>
+      failed after
+    <% else %>
+      has been active for
+    <% end %>
+
+    <% walltime = if @object.finished_at then
+                    @object.finished_at - @object.started_at
+                  else
+                    Time.now - @object.started_at
+                  end %>
+
+    <%= if walltime > runningtime
+          render_runtime(walltime, true, false)
+        else
+          render_runtime(runningtime, true, false)
+        end %><% if @object.finished_at %> at <%= render_localized_date(@object.finished_at) %><% end %>.
+    <% else %>
+      This pipeline is <%= if @object.state.start_with? 'Running' then 'active' else @object.state.downcase end %>.
+        <% walltime = 0%>
+    <% end %>
+
+  <% if @object.state == 'Failed' %>
+    Check the Log tab for more detail about why this pipeline failed.
+  <% end %>
+</p>
+
+<p>
+    This pipeline
+    <% if @object.state.start_with? 'Running' %>
+      has run
+    <% else %>
+      ran
+    <% end %>
+    for
+    <% cputime = tasks.map { |task|
+         if task.started_at
+           (if task.finished_at then task.finished_at else Time.now() end) - task.started_at
+           else
+         0
+       end
+       }.reduce(:+) || 0 %>
+    <%= render_runtime(runningtime, true, false) %><% if (walltime - runningtime) > 0 %>
+      (<%= render_runtime(walltime - runningtime, true, false) %> queued)<% end %><% if cputime == 0 %>.<% else %>
+      and used
+    <%= render_runtime(cputime, true, false) %>
+    of CPU time (<%= (cputime/runningtime).round(1) %>&Cross; scaling).
+    <% end %>
+</p>
+
+<%# Components %>
+
+<% pipeline_jobs.each_with_index do |pj, i| %>
+  <%= render partial: 'running_component', locals: {tasks: tasks, pj: pj, i: i, expanded: false} %>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_graph.html.erb b/apps/workbench/app/views/pipeline_instances/_show_graph.html.erb
new file mode 100644 (file)
index 0000000..5034b28
--- /dev/null
@@ -0,0 +1,15 @@
+<% if @pipelines.count > 1 %>
+  <div style="text-align: center; padding-top: 0.5em">
+    <span class="pipeline_color_legend" style="background: #aaffaa"><%= link_to_if_arvados_object @pipelines[0], friendly_name: true %></span>
+    <span class="pipeline_color_legend" style="background: #aaaaff"><%= link_to_if_arvados_object @pipelines[1], friendly_name: true %></span>
+    <% if @pipelines.count > 2 %>
+    <span class="pipeline_color_legend" style="background: #ffaaaa"><%= link_to_if_arvados_object @pipelines[2], friendly_name: true %></span>
+    <% end %>
+    <span class="pipeline_color_legend" style="background: #aaaaaa">Common to <%= @pipelines.count > 2 ? 'multiple' : 'both' %> pipelines</span>
+  </div>
+<% end %>
+
+<%= render partial: 'application/svg_div', locals: {
+      divId: "provenance_graph", 
+      svgId: "provenance_svg", 
+      svg: @prov_svg } %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_inputs.html.erb b/apps/workbench/app/views/pipeline_instances/_show_inputs.html.erb
new file mode 100644 (file)
index 0000000..65d458b
--- /dev/null
@@ -0,0 +1,49 @@
+<% n_inputs = 0 %>
+
+<% content_for :pi_input_form do %>
+<form role="form" style="width:60%">
+  <div class="form-group">
+    <% @object.components.each do |cname, component| %>
+      <% next if !component %>
+      <% component[:script_parameters].andand.each do |pname, pvalue_spec| %>
+        <% if pvalue_spec.is_a? Hash %>
+          <% if pvalue_spec[:description] or
+                pvalue_spec[:required] or pvalue_spec[:optional] == false %>
+            <% n_inputs += 1 %>
+            <label for="<% "#{cname}-#{pname}" %>">
+              <%= @object.component_input_title(cname, pname) %>
+            </label>
+            <div>
+              <p class="form-control-static">
+                <%= render_pipeline_component_attribute @object, :components, [cname, :script_parameters, pname.to_sym], pvalue_spec %>
+              </p>
+            </div>
+            <p class="help-block">
+              <%= pvalue_spec[:description] %>
+            </p>
+          <% end %>
+        <% end %>
+      <% end %>
+    <% end %>
+  </div>
+</form>
+<% end %>
+
+<% if n_inputs == 0 %>
+  <p>This pipeline does not need any further inputs specified. You can start it by clicking the "Run" button whenever you're ready. (It's not too late to change existing settings, though.)</p>
+<% else %>
+  <p><i>Provide <%= n_inputs > 1 ? 'values' : 'a value' %> for the following <%= n_inputs > 1 ? 'parameters' : 'parameter' %>, then click the "Run" button to start the pipeline.</i></p>
+  <%= content_for :pi_input_form %>
+
+  <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
+      class: 'btn btn-primary run-pipeline-button',
+      method: :patch
+      ) do %>
+    Run <i class="fa fa-fw fa-play"></i>
+  <% end %>
+
+<% end %>
+
+<div style="margin-top: 1em;">
+  <p>Click the "Components" tab above to see a full list of pipeline settings.</p>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_log.html.erb b/apps/workbench/app/views/pipeline_instances/_show_log.html.erb
new file mode 100644 (file)
index 0000000..bb756a0
--- /dev/null
@@ -0,0 +1,12 @@
+<% log_uuids = [@object.uuid] + pipeline_jobs(@object).collect{|x|x[:job].andand[:uuid]}.compact %>
+<% log_history = stderr_log_history(log_uuids) %>
+<div id="event_log_div"
+     class="arv-log-event-listener arv-log-event-handler-append-logs arv-log-event-subscribe-to-pipeline-job-uuids arv-job-log-window"
+     data-object-uuids="<%= log_uuids.join(' ') %>"
+     ><%= log_history.join("\n") %></div>
+
+<%# Applying a long throttle suppresses the auto-refresh of this
+    partial that would normally be triggered by arv-log-event. %>
+<div class="arv-log-refresh-control"
+     data-load-throttle="86486400000" <%# 1001 nights %>
+     ></div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_object_description_cell.html.erb b/apps/workbench/app/views/pipeline_instances/_show_object_description_cell.html.erb
new file mode 100644 (file)
index 0000000..38f51a3
--- /dev/null
@@ -0,0 +1,4 @@
+<div class="nowrap">
+  <%= object.content_summary %><br />
+  <%= render partial: 'pipeline_instances/component_labels', locals: {object: object} %>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_recent.html.erb b/apps/workbench/app/views/pipeline_instances/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..7d1fd39
--- /dev/null
@@ -0,0 +1,37 @@
+<%= form_tag({}, {id: "comparedInstances"}) do |f| %>
+
+<table class="table table-condensed table-fixedlayout arv-recent-pipeline-instances">
+  <colgroup>
+    <col width="5%" />
+    <col width="15%" />
+    <col width="25%" />
+    <col width="20%" />
+    <col width="15%" />
+    <col width="15%" />
+    <col width="5%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+      </th><th>
+       Status
+      </th><th>
+       Instance
+      </th><th>
+       Template
+      </th><th>
+       Owner
+      </th><th>
+       Created at
+      </th><th>
+      </th>
+    </tr>
+  </thead>
+
+  <tbody data-infinite-scroller="#recent-pipeline-instances" id="recent-pipeline-instances"
+         data-infinite-content-href="<%= url_for partial: :recent_rows %>" >
+  </tbody>
+
+</table>
+
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_recent_rows.html.erb b/apps/workbench/app/views/pipeline_instances/_show_recent_rows.html.erb
new file mode 100644 (file)
index 0000000..f369c55
--- /dev/null
@@ -0,0 +1,32 @@
+<% @objects.sort_by { |ob| ob.created_at }.reverse.each do |ob| %>
+    <tr data-object-uuid="<%= ob.uuid %>" data-kind="<%= ob.kind %>" >
+      <td>
+        <%= check_box_tag 'uuids[]', ob.uuid, false, :class => 'persistent-selection' %>
+      </td><td>
+        <%= render partial: 'pipeline_status_label', locals: {:p => ob} %>
+      </td><td colspan="1">
+        <%= link_to_if_arvados_object ob, friendly_name: true %>
+      </td><td>
+        <%= link_to_if_arvados_object ob.pipeline_template_uuid, friendly_name: true %>
+      </td><td>
+        <%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
+      </td><td>
+        <%= ob.created_at.to_s %>
+      </td><td>
+        <%= render partial: 'delete_object_button', locals: {object:ob} %>
+      </td>
+    </tr>
+    <tr data-object-uuid="<%= ob.uuid %>">
+      <td style="border-top: 0;" colspan="2">
+      </td>
+      <td style="border-top: 0; opacity: 0.5;" colspan="6">
+        <% ob.components.each do |cname, c| %>
+          <% if c.is_a?(Hash) and c[:job] %>
+            <%= render partial: "job_progress", locals: {:j => c[:job], :title => cname.to_s, :show_progress_bar => false } %>
+          <% else %>
+            <span class="label label-default"><%= cname.to_s %></span>
+          <% end %>
+        <% end %>
+      </td>
+    </tr>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/_show_tab_buttons.html.erb b/apps/workbench/app/views/pipeline_instances/_show_tab_buttons.html.erb
new file mode 100644 (file)
index 0000000..38a7c91
--- /dev/null
@@ -0,0 +1,45 @@
+  <% if @object.state.in? ['Complete', 'Failed', 'Cancelled', 'Paused'] %>
+
+  <%= link_to(copy_pipeline_instance_path('id' => @object.uuid, 'script' => "use_latest", "components" => "use_latest", "pipeline_instance[state]" => "RunningOnServer"),
+      class: 'btn btn-primary',
+      title: 'Re-run with latest options',
+      #data: {toggle: :tooltip, placement: :top}, title: 'Re-run',
+      method: :post,
+      ) do %>
+    <i class="fa fa-fw fa-play"></i> Re-run with latest
+  <% end %>
+
+  <%= link_to raw('<i class="fa fa-fw fa-cogs"></i> Re-run options...'),
+      "#",
+      {class: 'btn btn-primary', 'data-toggle' =>  "modal",
+        'data-target' => '#clone-and-edit-modal-window',
+        title: 'Re-run with options'}  %>
+  <% end %>
+
+  <% if @object.state.in? ['New', 'Ready'] %>
+    <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
+        class: 'btn btn-primary run-pipeline-button',
+        title: 'Run this pipeline',
+        method: :patch
+        ) do %>
+      <i class="fa fa-fw fa-play"></i> Run
+    <% end %>
+  <% else %>
+    <% if @object.state.in? ['RunningOnClient', 'RunningOnServer'] %>
+      <%= link_to(url_for('pipeline_instance[state]' => 'Paused'),
+          class: 'btn btn-primary run-pipeline-button',
+          title: 'Pause this pipeline',
+          method: :patch
+          ) do %>
+        <i class="fa fa-fw fa-pause"></i> Pause
+      <% end %>
+    <% elsif @object.state == 'Paused' %>
+      <%= link_to(url_for('pipeline_instance[state]' => 'RunningOnServer'),
+          class: 'btn btn-primary run-pipeline-button',
+          title: 'Resume this pipeline',
+          method: :patch
+          ) do %>
+        <i class="fa fa-fw fa-play"></i> Resume
+      <% end %>
+    <% end %>
+  <% end %>
diff --git a/apps/workbench/app/views/pipeline_instances/compare.html.erb b/apps/workbench/app/views/pipeline_instances/compare.html.erb
new file mode 100644 (file)
index 0000000..99caf9b
--- /dev/null
@@ -0,0 +1,15 @@
+<% if (o = Group.find?(@objects.first.owner_uuid)) %>
+  <% content_for :breadcrumbs do %>
+    <li class="nav-separator"><span class="glyphicon glyphicon-arrow-right"></span></li>
+    <li>
+      <%= link_to(o.name, project_path(o.uuid)) %>
+    </li>
+    <li class="nav-separator">
+      <span class="glyphicon glyphicon-arrow-right"></span>
+    </li>
+    <li>
+      <%= link_to '#' do %>compare pipelines<% end %>
+    </li>
+  <% end %>
+<% end %>
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.compare_pane_list }  %>
diff --git a/apps/workbench/app/views/pipeline_instances/index.html.erb b/apps/workbench/app/views/pipeline_instances/index.html.erb
new file mode 100644 (file)
index 0000000..1181b3e
--- /dev/null
@@ -0,0 +1,17 @@
+<% content_for :tab_line_buttons do %>
+  <div class="input-group">
+    <input type="text" class="form-control filterable-control recent-pipeline-instances-filterable-control"
+           placeholder="Search pipeline instances"
+           data-filterable-target="#recent-pipeline-instances"
+           <%# Just for the double-load test in FilterableInfiniteScrollTest: %>
+           value="<%= params[:search] %>"
+           />
+  </div>
+
+  <%= form_tag({action: 'compare', controller: params[:controller], method: 'get'}, {method: 'get', id: 'compare', class: 'pull-right small-form-margin'}) do |f| %>
+    <%= submit_tag 'Compare 2 or 3 selected', {class: 'btn btn-primary', disabled: true} %>
+  <% end rescue nil %>
+
+<% end %>
+
+<%= render file: 'application/index.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/pipeline_instances/show.html.erb b/apps/workbench/app/views/pipeline_instances/show.html.erb
new file mode 100644 (file)
index 0000000..860e809
--- /dev/null
@@ -0,0 +1,73 @@
+<% template = PipelineTemplate.find?(@object.pipeline_template_uuid) %>
+<%= content_for :content_top do %>
+  <div class="row">
+    <div class="col-sm-6">
+      <%= render partial: 'name_and_description' %>
+    </div>
+    <% if template %>
+      <div class="alert alert-info col-sm-6">
+        This pipeline was created from the template <%= link_to_if_arvados_object template, friendly_name: true %><br />
+        <% if template.modified_at && (template.modified_at > @object.created_at) %>
+        Note: This template has been modified since this instance was created.
+        <% end %>
+      </div>
+    <% end %>
+  </div>
+<% end %>
+
+<% content_for :tab_line_buttons do %>
+
+  <div id="pipeline-instance-tab-buttons"
+       class="pane-loaded arv-log-event-listener arv-refresh-on-state-change"
+       data-pane-content-url="<%= url_for(params.merge(tab_pane: "tab_buttons")) %>"
+       data-object-uuid="<%= @object.uuid %>"
+       >
+    <%= render partial: 'show_tab_buttons', locals: {object: @object}%>
+  </div>
+
+<% end %>
+
+<%= render partial: 'content', layout: 'content_layout', locals: {pane_list: controller.show_pane_list }%>
+
+<div id="clone-and-edit-modal-window" class="modal fade" role="dialog"
+     aria-labelledby="myModalLabel" aria-hidden="true">
+  <div class="modal-dialog">
+    <div class="modal-content">
+
+    <%= form_tag copy_pipeline_instance_path do |f| %>
+
+      <div class="modal-header">
+        <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-6"> <h4 class="modal-title">Re-run pipeline</h4> </div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body">
+              <%= radio_button_tag(:script, "use_latest", true) %>
+              <%= label_tag(:script_use_latest, "Use latest script versions") %>
+              <br>
+              <%= radio_button_tag(:script, "use_same") %>
+              <%= label_tag(:script_use_same, "Use same script versions as this run") %>
+              <br>
+              <% if template %>
+              <br>
+              <%= radio_button_tag(:components, "use_latest", true) %>
+              <%= label_tag(:components_use_latest, "Update components against template") %>
+              <br>
+              <%= radio_button_tag(:components, "use_same") %>
+              <%= label_tag(:components_use_same, "Use same components as this run") %>
+              <% end %>
+      </div>
+
+      <div class="modal-footer">
+        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="New">Copy and edit inputs</button>
+        <button type="submit" class="btn btn-primary" name="pipeline_instance[state]" value="RunningOnServer">Run now</button>
+        <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+      </div>
+
+    </div>
+    <% end %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/pipeline_instances/show.js.erb b/apps/workbench/app/views/pipeline_instances/show.js.erb
new file mode 100644 (file)
index 0000000..8ab1e8f
--- /dev/null
@@ -0,0 +1,15 @@
+<% self.formats = [:html] %>
+var new_content = "<%= escape_javascript(render template: 'pipeline_instances/show') %>";
+var selected_tab_hrefs = [];
+if ($('div#page-wrapper').html() != new_content) {
+    $('.nav-tabs li.active a').each(function() {
+        selected_tab_hrefs.push($(this).attr('href'));
+    });
+
+    $('div#page-wrapper').html(new_content);
+
+    // Show the same tabs that were active before we rewrote page-wrapper
+    $.each(selected_tab_hrefs, function(i, href) {
+        $('.nav-tabs li a[href="' + href + '"]').tab('show');
+    });
+}
diff --git a/apps/workbench/app/views/pipeline_templates/_choose.js.erb b/apps/workbench/app/views/pipeline_templates/_choose.js.erb
new file mode 120000 (symlink)
index 0000000..8420a7f
--- /dev/null
@@ -0,0 +1 @@
+../application/_choose.js.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/pipeline_templates/_choose_rows.html.erb b/apps/workbench/app/views/pipeline_templates/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..9b96b47
--- /dev/null
@@ -0,0 +1,8 @@
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>" data-preview-href="<%= url_for object %>?tab_pane=chooser_preview">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw fa-gear"></i>
+      <%= object.name %>
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_attributes.html.erb b/apps/workbench/app/views/pipeline_templates/_show_attributes.html.erb
new file mode 100644 (file)
index 0000000..cc95b9d
--- /dev/null
@@ -0,0 +1,15 @@
+<%= content_for :content_top do %>
+  <h2>Template '<%= @object.name %>'</h2>
+<% end %>
+
+<table class="table topalign">
+  <thead>
+  </thead>
+  <tbody>
+    <% @object.attributes_for_display.each do |attr, attrvalue| %>
+      <% if attr != 'components' %>
+        <%= render partial: 'application/arvados_object_attr', locals: { attr: attr, attrvalue: attrvalue } %>
+      <% end %>
+    <% end %>
+  </tbody>
+</table>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_chooser_preview.html.erb b/apps/workbench/app/views/pipeline_templates/_show_chooser_preview.html.erb
new file mode 100644 (file)
index 0000000..f8c65ba
--- /dev/null
@@ -0,0 +1,4 @@
+<div class="col-sm-11 col-sm-push-1 arv-description-in-table">
+  <%= @object.description %>
+</div>
+<%= render partial: 'show_components' %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_components.html.erb b/apps/workbench/app/views/pipeline_templates/_show_components.html.erb
new file mode 100644 (file)
index 0000000..cd03a5c
--- /dev/null
@@ -0,0 +1 @@
+<%= render_pipeline_components("editable", :json, editable: false) %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_pipelines.html.erb b/apps/workbench/app/views/pipeline_templates/_show_pipelines.html.erb
new file mode 100644 (file)
index 0000000..8ff42a7
--- /dev/null
@@ -0,0 +1,2 @@
+
+  <%= render partial: 'pipeline_instances/show_recent' %>
diff --git a/apps/workbench/app/views/pipeline_templates/_show_recent.html.erb b/apps/workbench/app/views/pipeline_templates/_show_recent.html.erb
new file mode 100644 (file)
index 0000000..a1749a7
--- /dev/null
@@ -0,0 +1,68 @@
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
+
+<table class="table table-condensed arv-index">
+  <colgroup>
+    <col width="8%" />
+    <col width="10%" />
+    <col width="22%" />
+    <col width="45%" />
+    <col width="15%" />
+  </colgroup>
+  <thead>
+    <tr class="contain-align-left">
+      <th>
+      </th><th>
+      </th><th>
+        name
+      </th><th>
+        description/components
+      </th><th>
+        owner
+      </th>
+    </tr>
+  </thead>
+  <tbody>
+
+    <% @objects.sort_by { |ob| ob[:created_at] }.reverse.each do |ob| %>
+
+    <tr>
+      <td>
+        <%= button_to(choose_projects_path(id: "run-pipeline-button",
+                                     title: 'Choose project',
+                                     editable: true,
+                                     action_name: 'Choose',
+                                     action_href: pipeline_instances_path,
+                                     action_method: 'post',
+                                     action_data: {selection_param: 'pipeline_instance[owner_uuid]',
+                                                   'pipeline_instance[pipeline_template_uuid]' => ob.uuid,
+                                                   'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (ob.name.andand.size.andand>0 ? " using the pipeline template *#{ob.name}*" : ""),
+                                                   'success' => 'redirect-to-created-object'
+                                                  }.to_json),
+                { class: "btn btn-default btn-xs", title: "Run #{ob.name}", remote: true, method: :get }
+            ) do %>
+               <i class="fa fa-fw fa-play"></i> Run
+              <% end %>
+      </td>
+      <td>
+        <%= render :partial => "show_object_button", :locals => {object: ob, size: 'xs'} %>
+      </td><td>
+        <%= render_editable_attribute ob, 'name' %>
+      </td><td>
+        <% if ob.respond_to?(:description) and ob.description %>
+          <%= render_attribute_as_textile(ob, "description", ob.description, false) %>
+          <br />
+        <% end %>
+        <% ob.components.collect { |k,v| k.to_s }.each do |k| %>
+          <span class="label label-default"><%= k %></span>
+        <% end %>
+      </td><td>
+        <%= link_to_if_arvados_object ob.owner_uuid, friendly_name: true %>
+      </td>
+    </tr>
+
+    <% end %>
+
+  </tbody>
+</table>
+
+<%= render partial: "paging", locals: {results: @objects, object: @object} %>
diff --git a/apps/workbench/app/views/pipeline_templates/show.html.erb b/apps/workbench/app/views/pipeline_templates/show.html.erb
new file mode 100644 (file)
index 0000000..0faa48f
--- /dev/null
@@ -0,0 +1,19 @@
+<% content_for :tab_line_buttons do %>
+  <%= link_to(choose_projects_path(id: "run-pipeline-button",
+                                     title: 'Choose project',
+                                     editable: true,
+                                     action_name: 'Choose',
+                                     action_href: pipeline_instances_path,
+                                     action_method: 'post',
+                                     action_data: {selection_param: 'pipeline_instance[owner_uuid]',
+                                                   'pipeline_instance[pipeline_template_uuid]' => @object.uuid,
+                                                   'pipeline_instance[description]' => "Created at #{Time.now.localtime}" + (@object.name.andand.size.andand>0 ? " using the pipeline template *#{@object.name}*" : ""),
+                                                   'success' => 'redirect-to-created-object'
+                                                  }.to_json),
+                { class: "btn btn-primary btn-sm", remote: true, title: 'Run this pipeline' }
+               ) do %>
+                   <i class="fa fa-gear"></i> Run this pipeline
+                 <% end %>
+<% end %>
+
+<%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/projects/_choose.html.erb b/apps/workbench/app/views/projects/_choose.html.erb
new file mode 100644 (file)
index 0000000..c0759ed
--- /dev/null
@@ -0,0 +1,49 @@
+<div class="modal modal-with-loading-spinner">
+  <div class="modal-dialog">
+    <div class="modal-content">
+
+      <div class="modal-header">
+        <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-6"> <h4 class="modal-title"><%= params[:title] || 'Choose project' %></h4> </div>
+          <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body">
+        <div class="selectable-container" style="height: 15em; overflow-y: scroll">
+          <% [my_project_tree, shared_project_tree].each do |tree| %>
+            <% tree.each do |projectnode| %>
+              <% if projectnode[:object].is_a? String %>
+                <div class="row" style="padding-left: <%= 1 + projectnode[:depth] %>em; margin-right: 0px">
+                  <i class="fa fa-fw fa-share-alt"></i>
+                  <%= projectnode[:object] %>
+                </div>
+              <% else
+                 row_selectable = !params[:editable] || projectnode[:object].editable?
+                 if projectnode[:object].uuid == current_user.uuid
+                   row_name = "Home"
+                   row_selectable = true
+                 else
+                   row_name = projectnode[:object].friendly_link_name || 'New project'
+                 end %>
+                <div class="<%= 'selectable project' if row_selectable %> row"
+                     style="padding-left: <%= 1 + projectnode[:depth] %>em; margin-right: 0px" data-object-uuid="<%= projectnode[:object].uuid %>">
+                  <i class="fa fa-fw fa-folder-o"></i> <%= row_name %>
+                </div>
+              <% end %>
+            <% end %>
+          <% end %>
+        </div>
+      </div>
+
+      <div class="modal-footer">
+        <button class="btn btn-default" data-dismiss="modal" aria-hidden="true">Cancel</button>
+        <button class="btn btn-primary" aria-hidden="true" data-enable-if-selection disabled><%= params[:action_name] || 'Select' %></button>
+        <div class="modal-error hide" style="text-align: left; margin-top: 1em;">
+        </div>
+      </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/projects/_choose.js.erb b/apps/workbench/app/views/projects/_choose.js.erb
new file mode 120000 (symlink)
index 0000000..8420a7f
--- /dev/null
@@ -0,0 +1 @@
+../application/_choose.js.erb
\ No newline at end of file
diff --git a/apps/workbench/app/views/projects/_compute_node_status.html.erb b/apps/workbench/app/views/projects/_compute_node_status.html.erb
new file mode 100644 (file)
index 0000000..527dc64
--- /dev/null
@@ -0,0 +1,61 @@
+<h4>Queue</h4>
+<% queue = Job.queue %>
+<% if queue.any? %>
+
+<% queue.each do |j| %>
+  <div class="row">
+    <div class="col-md-3 text-overflow-ellipsis">
+      <%= link_to_if_arvados_object j, friendly_name: true %>
+    </div>
+    <div class="col-md-4">
+      <%= render_localized_date(j[:created_at]) %>
+    </div>
+    <div class="col-md-3">
+      <%= render_runtime(Time.now - j[:created_at], false) %>
+    </div>
+    <div class="col-md-2">
+      <%= j[:priority] %>
+    </div>
+  </div>
+<% end %>
+  <div class="row">
+    <div class="col-md-3">
+      <b>Job</b>
+    </div>
+    <div class="col-md-4">
+      <b>Submitted</b>
+    </div>
+    <div class="col-md-3">
+      <b>Queued</b>
+    </div>
+    <div class="col-md-2">
+      <b>Priority</b>
+    </div>
+  </div>
+  <% if Job.queue_size > queue.size %>
+    <i>Note: some items in the queue are not visible to you.</i>
+  <% end %>
+  <div>    
+  </div>
+<% else %>
+  There are currently no jobs in your queue.
+<% end %>
+
+<h4>Node status</h4>
+<div class="compute-summary-nodelist">
+    <% nodes.sort_by { |n| n.hostname || "" }.each do |n| %>
+      <% if n.crunch_worker_state.in? ["busy", "idle"] and (Time.now - n[:last_ping_at]) < 3600 %>
+        <div class="compute-summary">
+          <a data-toggle="collapse" href="#detail_<%= n.hostname %>" class="compute-summary-head label label-<%= if n.crunch_worker_state == 'busy' then 'primary' else 'default' end %>">
+            <%= n.hostname %>
+          </a>
+          <div id="detail_<%= n.hostname %>" class="collapse compute-detail">
+            state: <%= n.crunch_worker_state %><br>
+            <% [:total_cpu_cores, :total_ram_mb, :total_scratch_mb].each do |i| %>
+              <%= i.to_s.gsub '_', ' ' %>: <%= n.info[i] %><br>
+            <% end %>
+          </div>
+        </div>
+      <% end %>
+    <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_compute_node_summary.html.erb b/apps/workbench/app/views/projects/_compute_node_summary.html.erb
new file mode 100644 (file)
index 0000000..7d6a4fb
--- /dev/null
@@ -0,0 +1,19 @@
+<div class="compute-summary-numbers">
+    <table>
+      <colgroup>
+        <col width="25%">
+        <col width="25%">
+        <col width="25%">
+      </colgroup>
+      <tr>
+        <td><%= Job.queue_size %></td>
+        <td><%= nodes.select {|n| n.crunch_worker_state == "busy" }.size %></td>
+        <td><%= nodes.select {|n| n.crunch_worker_state == "idle" }.size %></td>
+      </tr>
+      <tr>
+        <th>Queued jobs</th>
+        <th>Busy nodes</th>
+        <th>Idle nodes</th>
+      </tr>
+    </table>
+</div>
diff --git a/apps/workbench/app/views/projects/_index_jobs_and_pipelines.html.erb b/apps/workbench/app/views/projects/_index_jobs_and_pipelines.html.erb
new file mode 100644 (file)
index 0000000..fb9a305
--- /dev/null
@@ -0,0 +1,26 @@
+<div>
+  <% any = false %>
+  <% recent_jobs_and_pipelines[0..9].each do |object| %>
+    <% any = true %>
+    <div class="row" style="height: 4.5em">
+      <div class="col-sm-4">
+        <%= render :partial => "show_object_button", :locals => {object: object, size: 'xs'} %>
+        <% if object.respond_to?(:name) %>
+          <%= render_editable_attribute object, 'name', nil, {tiptitle: 'rename'} %>
+        <% else %>
+          <%= object.class_for_display %> <%= object.uuid %>
+        <% end %>
+      </div>
+      <div class="col-sm-8 arv-description-in-table">
+        <%= render_controller_partial(
+            'show_object_description_cell.html',
+            controller_name: object.controller_name,
+            locals: {object: object})
+            %>
+      </div>
+    </div>
+  <% end %>
+  <% if not any %>
+    <span class="deemphasize">No jobs or pipelines to display.</span>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_index_projects.html.erb b/apps/workbench/app/views/projects/_index_projects.html.erb
new file mode 100644 (file)
index 0000000..bf92f34
--- /dev/null
@@ -0,0 +1,32 @@
+<div class="container-fluid arv-project-list">
+  <% tree.each do |projectnode| %>
+    <% rowtype = projectnode[:object].class %>
+    <% next if rowtype != Group and !show_root_node %>
+    <div class="<%= 'project' if rowtype.in?([Group,User]) %> row">
+      <div class="col-md-4" style="padding-left: <%= projectnode[:depth] - (show_root_node ? 0 : 1) %>em;">
+        <% if show_root_node and rowtype == String %>
+          <i class="fa fa-fw fa-share-alt"></i>
+          <%= projectnode[:object] %>
+        <% elsif show_root_node and rowtype == User %>
+          <% if projectnode[:object].uuid == current_user.andand.uuid %>
+            <i class="fa fa-fw fa-folder-o"></i>
+            <%= link_to project_path(id: projectnode[:object].uuid) do %>
+              Home
+            <% end %>
+          <% else %>
+            <i class="fa fa-fw fa-folder-o"></i>
+            <%= projectnode[:object].friendly_link_name %>
+          <% end %>
+        <% elsif rowtype == Group %>
+          <i class="fa fa-fw fa-folder-o"></i>
+          <%= link_to projectnode[:object] do %>
+            <%= projectnode[:object].friendly_link_name %>
+          <% end %>
+        <% end %>
+      </div>
+      <% if projectnode[:object].respond_to?(:description) and not projectnode[:object].description.blank? %>
+        <div class="col-md-8 small"><%= render_attribute_as_textile(projectnode[:object], "description", projectnode[:object].description, true) %></div>
+      <% end %>
+    </div>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_show_contents_rows.html.erb b/apps/workbench/app/views/projects/_show_contents_rows.html.erb
new file mode 100644 (file)
index 0000000..e1996a7
--- /dev/null
@@ -0,0 +1,40 @@
+<% get_objects_and_names.each do |object, name_link| %>
+  <% name_object = (object.respond_to?(:name) || !name_link) ? object : name_link %>
+  <tr class="filterable"
+      data-object-uuid="<%= name_object.uuid %>"
+      data-kind="<%= object.kind %>"
+      data-object-created-at="<%= object.created_at %>"
+      >
+    <td>
+      <div style="width:1em; display:inline-block;">
+        <%= render partial: 'selection_checkbox', locals: {object: name_object, friendly_name: ((name_object.name rescue '') || '')} %>
+      </div>
+    </td>
+
+    <td>
+      <% if @object.editable? %>
+        <%= link_to({action: 'remove_item', id: @object.uuid, item_uuid: ((name_link && name_link.uuid) || object.uuid)}, method: :delete, remote: true, data: {confirm: "Remove #{object.class_for_display.downcase} #{name_object.name rescue object.uuid} from this project?", toggle: 'tooltip', placement: 'top'}, class: 'btn btn-sm btn-default btn-nodecorate', title: 'remove') do %>
+          <i class="fa fa-fw fa-trash-o"></i>
+        <% end %>
+      <% else %>
+        <i class="fa fa-fw"></i><%# placeholder %>
+      <% end %>
+    </td>
+
+    <td>
+      <%= render :partial => "show_object_button", :locals => {object: object, size: 'sm', name_link: name_link} %>
+    </td>
+
+    <td>
+      <%= render_editable_attribute (name_link || object), 'name', nil, {tiptitle: 'rename'} %>
+    </td>
+
+    <td class="arv-description-in-table">
+      <%= render_controller_partial(
+          'show_object_description_cell.html',
+          controller_name: object.controller_name,
+          locals: {object: object})
+          %>
+    </td>
+  </tr>
+<% end %>
diff --git a/apps/workbench/app/views/projects/_show_dashboard.html.erb b/apps/workbench/app/views/projects/_show_dashboard.html.erb
new file mode 100644 (file)
index 0000000..1fbe505
--- /dev/null
@@ -0,0 +1,178 @@
+
+  <div class="row">
+    <div class="col-md-6">
+      <div class="panel panel-default" style="min-height: 10.5em">
+        <div class="panel-heading"><span class="panel-title">Active pipelines</span>
+          <span class="pull-right">
+    <%= link_to(
+          choose_pipeline_templates_path(
+            title: 'Choose a pipeline to run:',
+            action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
+            action_href: pipeline_instances_path,
+            action_method: 'post',
+            action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => current_user.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+          { class: "btn btn-primary btn-xs", remote: true }) do %>
+      <i class="fa fa-fw fa-gear"></i> Run a pipeline...
+    <% end %>
+    </span>
+        </div>
+
+        <% _running_pipelines = running_pipelines %>
+        <% _finished_pipelines = finished_pipelines(8) %>
+        <% lookup = preload_objects_for_dataclass PipelineTemplate, (_running_pipelines.map(&:pipeline_template_uuid) + _finished_pipelines.map(&:pipeline_template_uuid)) %>
+
+        <div class="panel-body">
+          <% if _running_pipelines.empty? %>
+            No pipelines are currently running.
+          <% else %>
+          <% _running_pipelines.each do |p| %>
+            <div class="dashboard-panel-info-row">
+              <div class="clearfix">
+                <%= link_to_if_arvados_object p, {friendly_name: true, lookup: lookup} %>
+
+                <div class="pull-right" style="width: 40%">
+                  <div class="progress" style="margin-bottom: 0px">
+                    <% p.components.each do |k, v| %>
+                      <% if v.is_a? Hash and v[:job] %>
+                        <%= render partial: 'job_progress', locals: {:j => v[:job], :scaleby => (1.0/p.components.size)} %>
+                      <% end %>
+                    <% end %>
+                  </div>
+                </div>
+              </div>
+
+              <%
+                running = p.components.select { |k, c| c.is_a? Hash and c[:job].andand[:state] == "Running" }
+                queued = p.components.select { |k, c| c.is_a? Hash and c[:job].andand[:state] == "Queued" }
+                %>
+
+              <div class="clearfix">
+                Started at <%= render_localized_date(p[:started_at] || p[:created_at], "noseconds") %>.
+                <% pipeline_time = Time.now - (p[:started_at] || p[:created_at]) %>
+                Active for <%= render_runtime(pipeline_time, false) %>.
+
+                <div class="pull-right">
+                  <% running.each do |k,v| %>
+                    <%= render partial: 'job_progress', locals: {:j => v[:job], :show_progress_bar => false, :title => k} %>
+                  <% end %>
+                  <% queued.each do |k,v| %>
+                    <%= render partial: 'job_progress', locals: {:j => v[:job], :show_progress_bar => false, :title => k} %>
+                  <% end %>
+                </div>
+              </div>
+            </div>
+          <% end %>
+          <% end %>
+          </div>
+      </div>
+
+      <div class="panel panel-default">
+        <div class="panel-heading"><span class="panel-title">Recently finished pipelines</span>
+          <span class="pull-right">
+            <%= link_to pipeline_instances_path, class: 'btn btn-default btn-xs' do %>
+              All pipelines <i class="fa fa-fw fa-arrow-circle-right"></i>
+            <% end %>
+          </span>
+        </div>
+        <div class="panel-body">
+          <% _finished_pipelines.each do |p| %>
+            <div class="dashboard-panel-info-row">
+              <div class="row">
+                <div class="col-md-6 text-overflow-ellipsis">
+                  <%= link_to_if_arvados_object p, {friendly_name: true, lookup: lookup} %>
+                </div>
+                <div class="col-md-2">
+                  <%= render partial: "pipeline_status_label", locals: {p: p}%>
+                </div>
+                <div class="col-md-4">
+                  <%= render_localized_date(p[:finished_at] || p[:modified_at], "noseconds") %>
+                </div>
+              </div>
+              <div class="row">
+                <div class="col-md-12">
+                  <% if p[:started_at] and p[:finished_at] %>
+                    <% pipeline_time = p[:finished_at] - p[:started_at] %>
+                    Active for <%= render_runtime(pipeline_time, false) %>
+                  <% end %>
+
+                  <span class="pull-right text-overflow-ellipsis" style="max-width: 100%">
+                    <% outputs = [] %>
+                    <% p.components.each do |k, c| %>
+                      <% outputs << c[:output_uuid] if c[:output_uuid] %>
+                    <% end %>
+                    <% if outputs.size == 0 %>
+                      No output.
+                    <% elsif outputs.size == 1 %>
+                      <i class="fa fa-fw fa-archive"></i> <%= link_to_if_arvados_object outputs[0], friendly_name: true %>
+                    <% else %>
+                      <a href="#<%= p[:uuid] %>-outputs" data-toggle="collapse">Outputs <span class="caret"></span></a>
+                    <% end %>
+                  </span>
+                </div>
+              </div>
+
+              <div class="row collapse" id="<%= p[:uuid] %>-outputs" >
+                <div class="col-md-12">
+                  <div class="pull-right" style="max-width: 100%">
+                    <% outputs.each do |out| %>
+                      <div class="text-overflow-ellipsis">
+                        <i class="fa fa-fw fa-archive"></i> <%= link_to_if_arvados_object out, friendly_name: true %>
+                      </div>
+                    <% end %>
+                  </div>
+                </div>
+              </div>
+            </div>
+          <% end %>
+        </div>
+      </div>
+    </div>
+
+    <div class="col-md-6">
+      <% nodes = Node.all %>
+      <div class="panel panel-default" style="min-height: 10.5em">
+        <div class="panel-heading"><span class="panel-title">Compute and job status</span>
+          <span class="pull-right">
+            <%= link_to jobs_path, class: 'btn btn-default btn-xs' do %>
+              All jobs <i class="fa fa-fw fa-arrow-circle-right"></i>
+            <% end %>
+          </span>
+        </div>
+        <div class="panel-body">
+          <div>
+            <%= render partial: 'compute_node_summary', locals: {nodes: nodes} %>
+            <div style="text-align: center">
+              <a data-toggle="collapse" href="#compute_node_status">Details <span class="caret"></span></a>
+            </div>
+          </div>
+          <div id="compute_node_status" class="collapse">
+            <%= render partial: 'compute_node_status', locals: {nodes: nodes} %>
+          </div>
+        </div>
+      </div>
+      <div class="panel panel-default">
+        <div class="panel-heading"><span class="panel-title">Recent collections</span>
+          <span class="pull-right">
+            <%= link_to collections_path, class: 'btn btn-default btn-xs' do %>
+              All collections <i class="fa fa-fw fa-arrow-circle-right"></i>
+            <% end %>
+          </span>
+        </div>
+        <div class="panel-body">
+          <% r = recent_collections(8) %>
+          <% r[:collections].each do |p| %>
+            <div class="dashboard-panel-info-row">
+            <div>
+              <i class="fa fa-fw fa-folder-o"></i><%= link_to_if_arvados_object r[:owners][p[:owner_uuid]], friendly_name: true %>/
+              <span class="pull-right"><%= render_localized_date(p[:modified_at], "noseconds") %></span>
+            </div>
+            <div class="text-overflow-ellipsis" style="margin-left: 1em; width: 100%"><%= link_to_if_arvados_object p, {friendly_name: true, no_tags: true} %>
+            </div>
+            </div>
+          <% end %>
+        </div>
+      </div>
+    </div>
+  </div>
+
+</div>
diff --git a/apps/workbench/app/views/projects/_show_data_collections.html.erb b/apps/workbench/app/views/projects/_show_data_collections.html.erb
new file mode 100644 (file)
index 0000000..991e9b1
--- /dev/null
@@ -0,0 +1,4 @@
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    filters: [['uuid', 'is_a', "arvados#collection"]],
+    sortable_columns: { 'name' => 'collections.name', 'description' => 'collections.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_featured.html.erb b/apps/workbench/app/views/projects/_show_featured.html.erb
new file mode 100644 (file)
index 0000000..6547b5e
--- /dev/null
@@ -0,0 +1,18 @@
+<div class="row">
+  <% @objects[0..3].each do |object| %>
+  <div class="card arvados-object">
+    <div class="card-top blue">
+      <a href="#">
+        <img src="/favicon.ico" alt=""/>
+      </a>
+    </div>
+    <div class="card-info">
+      <span class="title"><%= @objects.name_for(object) || object.class_for_display %></span>
+      <div class="desc"><%= object.respond_to?(:description) ? object.description : object.uuid %></div>
+    </div>
+    <div class="card-bottom">
+      <%= render :partial => "show_object_button", :locals => {object: object, htmloptions: {class: 'btn-default btn-block'}} %>
+    </div>
+  </div>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb b/apps/workbench/app/views/projects/_show_jobs_and_pipelines.html.erb
new file mode 100644 (file)
index 0000000..3637ef4
--- /dev/null
@@ -0,0 +1,5 @@
+<%= render_pane 'tab_contents', to_string: true, locals: {
+        limit: 50,
+           filters: [['uuid', 'is_a', ["arvados#job", "arvados#pipelineInstance"]]],
+           sortable_columns: { 'name' => 'jobs.script, pipeline_instances.name', 'description' => 'jobs.description, pipeline_instances.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_other_objects.html.erb b/apps/workbench/app/views/projects/_show_other_objects.html.erb
new file mode 100644 (file)
index 0000000..114ee5b
--- /dev/null
@@ -0,0 +1,4 @@
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    filters: [['uuid', 'is_a', ["arvados#human", "arvados#specimen", "arvados#trait"]]],
+       sortable_columns: { 'name' => 'humans.uuid, specimens.uuid, traits.name' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_pipeline_templates.html.erb b/apps/workbench/app/views/projects/_show_pipeline_templates.html.erb
new file mode 100644 (file)
index 0000000..402ce26
--- /dev/null
@@ -0,0 +1,4 @@
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    filters: [['uuid', 'is_a', ["arvados#pipelineTemplate"]]],
+       sortable_columns: { 'name' => 'pipeline_templates.name', 'description' => 'pipeline_templates.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_sharing.html.erb b/apps/workbench/app/views/projects/_show_sharing.html.erb
new file mode 100644 (file)
index 0000000..480f401
--- /dev/null
@@ -0,0 +1,114 @@
+<%
+   uuid_map = {}
+   if @share_links
+     [User, Group].each do |type|
+       type
+         .filter([['uuid','in',@share_links.collect(&:tail_uuid)]])
+         .each do |o|
+         uuid_map[o.uuid] = o
+       end
+     end
+   end
+   perm_name_desc_map = {}
+   perm_desc_name_map = {}
+   perms_json = []
+   ['Read', 'Write', 'Manage'].each do |link_desc|
+     link_name = "can_#{link_desc.downcase}"
+     perm_name_desc_map[link_name] = link_desc
+     perm_desc_name_map[link_desc] = link_name
+     perms_json << {value: link_name, text: link_desc}
+   end
+   perms_json = perms_json.to_json
+   choose_filters = {
+     "groups" => [["group_class", "=", "role"]],
+   }
+   choose_filters.default = []
+   owner_icon = fa_icon_class_for_uuid(@object.owner_uuid)
+   if owner_icon == "fa-users"
+     owner_icon = "fa-folder"
+     owner_type = "parent project"
+   else
+     owner_type = "owning user"
+   end
+%>
+
+<div class="pull-right">
+  <% ["users", "groups"].each do |share_class| %>
+
+  <%= link_to(send("choose_#{share_class}_path",
+      title: "Share with #{share_class}",
+      by_project: false,
+      preview_pane: false,
+      multiple: true,
+      filters: choose_filters[share_class].to_json,
+      action_method: 'post',
+      action_href: share_with_project_path,
+      action_name: 'Add',
+      action_data: {selection_param: 'uuids[]', success: 'tab-refresh'}.to_json),
+      class: "btn btn-primary btn-sm", remote: true) do %>
+  <i class="fa fa-fw fa-plus"></i> Share with <%= share_class %>&hellip;
+  <% end %>
+
+  <% end %>
+</div>
+
+<p>Permissions for this project are inherited from the <%= owner_type %>
+  <i class="fa fa-fw <%= owner_icon %>"></i>
+  <%= link_to_if_arvados_object @object.owner_uuid, friendly_name: true %>.
+</p>
+
+<table id="project_sharing" class="topalign table" style="clear: both; margin-top: 1em;">
+  <tr>
+    <th>User/Group Name</th>
+    <th>Email Address</th>
+    <th colspan="2">Project Access</th>
+  </tr>
+
+  <% @share_links.andand.each do |link|
+       shared_with = uuid_map[link.tail_uuid]
+       if shared_with.nil?
+         link_name = link.tail_uuid
+       elsif shared_with.respond_to?(:full_name)
+         link_name = shared_with.full_name
+       else
+         link_name = shared_with.name
+       end
+       if shared_with && shared_with.respond_to?(:email)
+         email = shared_with.email
+       end
+  %>
+  <tr data-object-uuid="<%= link.uuid %>">
+    <td>
+      <i class="fa fa-fw <%= fa_icon_class_for_uuid(link.tail_uuid) %>"></i>
+      <%= link_to_if_arvados_object(link.tail_uuid, link_text: link_name) %>
+    </td>
+    <td>
+      <%= email %>
+    </td>
+    <td><%= link_to perm_name_desc_map[link.name], '#', {
+      "data-emptytext" => "Read",
+      "data-placement" => "bottom",
+      "data-type" => "select",
+      "data-url" => url_for(action: "update", id: link.uuid, controller: "links", merge: true),
+      "data-title" => "Set #{link_name}'s access level",
+      "data-name" => "[name]",
+      "data-pk" => {id: link.tail_uuid, key: "link"}.to_json,
+      "data-value" => link.name,
+      "data-clear" => false,
+      "data-source" => perms_json,
+      "data-tpl" => "<select id=\"share_change_level\"></select>",
+      "class" => "editable form-control",
+      } %>
+    </td>
+    <td>
+      <%= link_to(
+          {action: 'destroy', id: link.uuid, controller: "links"},
+          {title: 'Revoke', class: 'btn btn-default btn-nodecorate', method: :delete,
+           data: {confirm: "Revoke #{link_name}'s access to this project?",
+                  remote: true}}) do %>
+      <i class="fa fa-fw fa-trash-o"></i>
+      <% end %>
+    </td>
+  </tr>
+  <% end %>
+</table>
diff --git a/apps/workbench/app/views/projects/_show_subprojects.html.erb b/apps/workbench/app/views/projects/_show_subprojects.html.erb
new file mode 100644 (file)
index 0000000..7d65639
--- /dev/null
@@ -0,0 +1,4 @@
+<%= render_pane 'tab_contents', to_string: true, locals: {
+    filters: [['uuid', 'is_a', ["arvados#group"]]],
+       sortable_columns: { 'name' => 'groups.name', 'description' => 'groups.description' }
+    }.merge(local_assigns) %>
diff --git a/apps/workbench/app/views/projects/_show_tab_contents.html.erb b/apps/workbench/app/views/projects/_show_tab_contents.html.erb
new file mode 100644 (file)
index 0000000..fe9595a
--- /dev/null
@@ -0,0 +1,103 @@
+<% sortable_columns = {} if local_assigns[:sortable_columns].nil? %>
+<div class="selection-action-container">
+  <div class="row">
+    <div class="col-sm-5">
+      <div class="btn-group btn-group-sm">
+        <button type="button" class="btn btn-default dropdown-toggle" data-toggle="dropdown">Selection <span class="caret"></span></button>
+        <ul class="dropdown-menu" role="menu">
+          <li><%= link_to "Create new collection with selected collections", '#',
+                  'data-href' => combine_selected_path(
+                    action_data: {current_project_uuid: @object.uuid}.to_json
+                  ),
+                  'id' => 'combine_selections_button',
+                  method: :post,
+                  'data-selection-param-name' => 'selection[]',
+                  'data-selection-action' => 'combine-project-contents',
+                  'data-toggle' => 'dropdown'
+            %></li>
+          <li><%= link_to "Compare selected", 'action',
+                  'data-href' => compare_pipeline_instances_path,
+                  'data-selection-param-name' => 'uuids[]',
+                  'data-selection-action' => 'compare'
+            %></li>
+          <li><%= link_to "Copy selected...", '#',
+                  'data-href' => choose_projects_path(
+                    title: 'Copy selected items to...',
+                    editable: true,
+                    action_name: 'Copy',
+                    action_href: actions_path,
+                    action_method: 'post',
+                    action_data_from_params: ['selection'],
+                    action_data: {
+                      copy_selections_into_project: true,
+                      selection_param: 'uuid',
+                      success: 'page-refresh'}.to_json),
+                  'data-remote' => true,
+                  'data-selection-param-name' => 'selection[]',
+                  'data-selection-action' => 'copy'
+            %></li>
+          <% if @object.editable? %>
+          <li><%= link_to "Move selected...", '#',
+                  'data-href' => choose_projects_path(
+                    title: 'Move selected items to...',
+                    editable: true,
+                    action_name: 'Move',
+                    action_href: actions_path,
+                    action_method: 'post',
+                    action_data_from_params: ['selection'],
+                    action_data: {
+                      move_selections_into_project: true,
+                      selection_param: 'uuid',
+                      success: 'page-refresh'}.to_json),
+                  'data-remote' => true,
+                  'data-selection-param-name' => 'selection[]',
+                  'data-selection-action' => 'move'
+            %></li>
+          <li><%= link_to "Remove selected", '#',
+                  method: :delete,
+                  'data-href' => url_for(action: :remove_items),
+                  'data-selection-param-name' => 'item_uuids[]',
+                  'data-selection-action' => 'remove',
+                  'data-remote' => true,
+                  'data-toggle' => 'dropdown'
+            %></li>
+          <% end %>
+        </ul>
+      </div>
+    </div>
+    <div class="col-sm-4 pull-right">
+      <input type="text" class="form-control filterable-control" placeholder="Search project contents" data-filterable-target="table.arv-index.arv-project-<%= tab_pane %> tbody"/>
+    </div>
+  </div>
+
+  <table class="table table-condensed arv-index arv-project-<%= tab_pane %>">
+    <colgroup>
+      <col width="0*" style="max-width: fit-content;" />
+      <col width="0*" style="max-width: fit-content;" />
+      <col width="0*" style="max-width: fit-content;" />
+      <col width="60%" style="width: 60%;" />
+      <col width="40%" style="width: 40%;" />
+    </colgroup>
+    <tbody data-infinite-scroller="#<%= tab_pane %>-scroll" data-infinite-content-href="<%= url_for partial: :contents_rows %>" data-infinite-content-params-projecttab="<%= local_assigns.select{|k| [:order, :limit, :filters].include? k }.to_json %>" data-infinite-content-params-attr="projecttab">
+    </tbody>
+    <thead>
+      <tr>
+        <th></th>
+        <th></th>
+        <th></th>
+        <% sort_order = sortable_columns['name'].gsub(/\s/,'') if sortable_columns['name'] %>
+        <th <% if !sort_order.nil? %>
+              data-sort-order='<%= sort_order %>'
+            <% end %> >
+          name
+        </th>
+        <% sort_order = sortable_columns['description'].gsub(/\s/,'') if sortable_columns['description'] %>
+        <th <% if !sort_order.nil? %>
+              data-sort-order='<%= sort_order %>'
+            <% end %> >
+          description
+        </th>
+      </tr>
+    </thead>
+  </table>
+</div>
diff --git a/apps/workbench/app/views/projects/index.html.erb b/apps/workbench/app/views/projects/index.html.erb
new file mode 100644 (file)
index 0000000..dc70da8
--- /dev/null
@@ -0,0 +1,7 @@
+<div class="pane-loaded arv-log-event-listener arv-refresh-on-log-event"
+     data-pane-content-url="<%= root_url tab_pane: "dashboard" %>"
+     data-object-uuid="all"
+     data-load-throttle="15000"
+     >
+  <%= render partial: 'show_dashboard' %>
+</div>
diff --git a/apps/workbench/app/views/projects/remove_items.js.erb b/apps/workbench/app/views/projects/remove_items.js.erb
new file mode 100644 (file)
index 0000000..664bd51
--- /dev/null
@@ -0,0 +1,6 @@
+$(document).trigger('count-change');
+<% @removed_uuids.each do |uuid| %>
+       $('[data-object-uuid=<%= uuid %>]').hide('slow', function() {
+           $(this).remove();
+       });
+<% end %>
diff --git a/apps/workbench/app/views/projects/show.html.erb b/apps/workbench/app/views/projects/show.html.erb
new file mode 100644 (file)
index 0000000..0cab117
--- /dev/null
@@ -0,0 +1,51 @@
+<% if @object.uuid != current_user.uuid # Not the "Home" project %>
+<% content_for :content_top do %>
+  <%= render partial: 'name_and_description' %>
+<% end %>
+<% end %>
+
+<% content_for :tab_line_buttons do %>
+  <% if @object.editable? %>
+    <div class="btn-group btn-group-sm">
+      <button type="button" class="btn btn-primary dropdown-toggle" data-toggle="dropdown"><i class="fa fa-fw fa-plus"></i> Add data <span class="caret"></span></button>
+      <ul class="dropdown-menu pull-right" role="menu">
+        <li>
+          <%= link_to(
+                choose_collections_path(
+                  title: 'Choose a collection to copy into this project:',
+                  multiple: true,
+                  action_name: 'Copy',
+                  action_href: actions_path(id: @object.uuid),
+                  action_method: 'post',
+                  action_data: {selection_param: 'selection[]', copy_selections_into_project: @object.uuid, success: 'page-refresh'}.to_json),
+                { remote: true, data: {'event-after-select' => 'page-refresh', 'toggle' => 'dropdown'} }) do %>
+            <i class="fa fa-fw fa-clipboard"></i> Copy data from another project
+          <% end %>
+        </li>
+        <li>
+          <%= link_to(collections_path(options: {ensure_unique_name: true}, collection: {manifest_text: "", name: "New collection", owner_uuid: @object.uuid}, redirect_to_anchor: 'Upload'), {
+              method: 'post',
+              data: {toggle: 'dropdown'}}) do %>
+            <i class="fa fa-fw fa-upload"></i> Upload files from my computer
+          <% end %>
+        </li>
+      </ul>
+    </div>
+    <%= link_to(
+          choose_pipeline_templates_path(
+            title: 'Choose a pipeline to run:',
+            action_name: 'Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i>',
+            action_href: pipeline_instances_path,
+            action_method: 'post',
+            action_data: {'selection_param' => 'pipeline_instance[pipeline_template_uuid]', 'pipeline_instance[owner_uuid]' => @object.uuid, 'success' => 'redirect-to-created-object'}.to_json),
+          { class: "btn btn-primary btn-sm", remote: true, title: "Run a pipeline in this project" }) do %>
+      <i class="fa fa-fw fa-gear"></i> Run a pipeline...
+    <% end %>
+    <%= link_to projects_path({'project[owner_uuid]' => @object.uuid, 'options' => {'ensure_unique_name' => true}}), method: :post, title: "Add a subproject to this project", class: 'btn btn-sm btn-primary' do %>
+      <i class="fa fa-fw fa-plus"></i>
+      Add a subproject
+    <% end %>
+  <% end %>
+<% end %>
+
+<%= render file: 'application/show.html.erb', locals: local_assigns %>
diff --git a/apps/workbench/app/views/projects/tab_counts.js.erb b/apps/workbench/app/views/projects/tab_counts.js.erb
new file mode 100644 (file)
index 0000000..6502e2e
--- /dev/null
@@ -0,0 +1,3 @@
+<% @tab_counts.each do |pane_name, tab_count| %>
+  $('span#<%= pane_name %>-count').html('(<%= tab_count %>)');
+<% end %>
\ No newline at end of file
diff --git a/apps/workbench/app/views/repositories/_show_help.html.erb b/apps/workbench/app/views/repositories/_show_help.html.erb
new file mode 100644 (file)
index 0000000..50ec880
--- /dev/null
@@ -0,0 +1,27 @@
+<% if (example = @objects.select(&:push_url).first) %>
+
+<p>
+Sample git quick start:
+</p>
+
+<pre>
+git clone <%= example.push_url %> <%= example.name unless example.push_url.match(/:(\S+)\.git$/).andand[1] == example.name %>
+cd <%= example.name %>
+# edit files
+git add the/files/you/changed
+git commit
+git push
+</pre>
+
+<% end %>
+
+<p>
+  See also:
+  <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; SSH access'),
+  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+      target: "_blank"%> and 
+  <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; Writing a Crunch
+  Script'),
+  "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-firstscript.html",
+  target: "_blank"%>.
+</p>
diff --git a/apps/workbench/app/views/search/_choose_rows.html.erb b/apps/workbench/app/views/search/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..61f9300
--- /dev/null
@@ -0,0 +1,26 @@
+<% current_class = params[:last_object_class] %>
+<% @objects.each do |object| %>
+  <% icon_class = fa_icon_class_for_class(object.class) %>
+  <% if object.class.to_s != current_class %>
+    <% current_class = object.class.to_s %>
+    <div class="row class-separator" data-section-heading="true" data-section-name="<%= object.class.to_s %>">
+      <div class="col-sm-12">
+        <%= object.class_for_display.pluralize.downcase %>
+      </div>
+    </div>
+  <% end %>
+  <div class="row filterable selectable" data-section-name="<%= object.class.to_s %>" data-object-uuid="<%= object.uuid %>" data-preview-href="<%= chooser_preview_url_for object %>">
+    <div class="col-sm-12" style="overflow-x:hidden; white-space: nowrap">
+      <i class="fa fa-fw <%= icon_class %>"></i>
+      <% if (name_link = @objects.links_for(object, 'name').first) %>
+        <%= name_link.name %>
+        <span style="display:none"><%= object.uuid %></span>
+      <% elsif object.respond_to?(:name) and object.name and object.name.length > 0 %>
+        <%= object.name %>
+        <span style="display:none"><%= object.uuid %></span>
+      <% else %>
+        <span class="arvados-uuid"><%= object.uuid %></span>
+      <% end %>
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/sessions/index.html.erb b/apps/workbench/app/views/sessions/index.html.erb
new file mode 100644 (file)
index 0000000..bb0e943
--- /dev/null
@@ -0,0 +1 @@
+<p>You have logged out.</p>
diff --git a/apps/workbench/app/views/user_agreements/index.html.erb b/apps/workbench/app/views/user_agreements/index.html.erb
new file mode 100644 (file)
index 0000000..4c80caf
--- /dev/null
@@ -0,0 +1,41 @@
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+
+<% n_files = unsigned_user_agreements.collect(&:files).flatten(1).count %>
+<% content_for :page_title do %>
+<% if n_files == 1 %>
+<%= unsigned_user_agreements.first.files.first[1].sub(/\.[a-z]{3,4}$/,'') %>
+<% else %>
+User agreements
+<% end %>
+<% end %>
+
+<%= form_for(unsigned_user_agreements.first, {url: {action: 'sign', controller: 'user_agreements'}, method: :post}) do |f| %>
+<%= hidden_field_tag :return_to, request.url %>
+<div id="open_user_agreement">
+  <div class="alert alert-info">
+    <strong>Please check <%= n_files > 1 ? 'each' : 'the' %> box below</strong> to indicate that you have read and accepted the user agreement<%= 's' if n_files > 1 %>.
+  </div>
+  <% if n_files == 1 and (Rails.configuration.show_user_agreement_inline rescue false) %>
+  <% ua = unsigned_user_agreements.first; file = ua.files.first %>
+  <object data="<%= url_for(controller: 'collections', action: 'show_file', uuid: ua.uuid, file: "#{file[0]}/#{file[1]}") %>" type="<%= Rack::Mime::MIME_TYPES[file[1].match(/\.\w+$/)[0]] rescue '' %>" width="100%" height="400px">
+  </object>
+  <% end %>
+  <div>
+    <% unsigned_user_agreements.each do |ua| %>
+    <% ua.files.each do |file| %>
+    <div class="checkbox">
+      <%= f.label 'checked[]' do %>
+      <%= check_box_tag 'checked[]', "#{ua.uuid}/#{file[0]}/#{file[1]}", false %>
+      Accept <%= file[1].sub(/\.[a-z]{3,4}$/,'') %>
+      <%= link_to 'View agreement', {controller: 'collections', action: 'show_file', uuid: ua.uuid, file: "#{file[0]}/#{file[1]}"}, {target: '_blank', class: 'btn btn-xs btn-info'} %>
+      <% end %>
+    </div>
+    <% end %>
+    <% end %>
+  </div>
+  <div style="height: 1em"></div>
+  <div>
+    <%= f.submit 'Continue', {class: 'btn btn-primary'} %>
+  </div>
+</div>
+<% end %>
diff --git a/apps/workbench/app/views/users/_add_ssh_key_popup.html.erb b/apps/workbench/app/views/users/_add_ssh_key_popup.html.erb
new file mode 100644 (file)
index 0000000..98f54ef
--- /dev/null
@@ -0,0 +1,38 @@
+<div class="modal-dialog modal-with-loading-spinner">
+  <div class="modal-content">
+
+    <%= form_tag add_ssh_key_path, {method: 'get', id: 'add_new_key_form', name: 'add_new_key_form', class: 'form-search, new_authorized_key', remote: true} do %>
+
+      <div class="modal-header">
+        <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+        <div>
+          <div class="col-sm-6"> <h4 class="modal-title">Add SSH Key</h4> </div>
+          <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+        </div>
+        <br/>
+      </div>
+
+      <div class="modal-body">
+        <div> <%= link_to "Click here to learn about SSH keys in Arvados.",
+                  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+                  style: "font-weight: bold",
+                  target: "_blank" %>
+        </div>
+        <div class="form-group">
+          <label for="public_key">Public Key</label>
+          <textarea class="form-control" id="public_key" rows="4" name="public_key" type="text"/>
+        </div>
+        <div class="form-group">
+          <label for="name">Name</label>
+          <input class="form-control" id="name" maxlength="250" name="name" type="text"/>
+        </div>
+      </div>
+
+      <div class="modal-footer">
+        <button type="submit" class="btn btn-primary" autofocus>Submit</button>
+        <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+      </div>
+
+    <% end #form %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/users/_choose_rows.html.erb b/apps/workbench/app/views/users/_choose_rows.html.erb
new file mode 100644 (file)
index 0000000..f32d4b9
--- /dev/null
@@ -0,0 +1,9 @@
+<% icon_class = fa_icon_class_for_class(User) %>
+<% @objects.each do |object| %>
+  <div class="row filterable selectable" data-object-uuid="<%= object.uuid %>">
+    <div class="col-sm-12" style="overflow-x:hidden">
+      <i class="fa fa-fw <%= icon_class %>"></i>
+      <%= object.full_name %> &lt;<%= object.email %>&gt;
+    </div>
+  </div>
+<% end %>
diff --git a/apps/workbench/app/views/users/_home.html.erb b/apps/workbench/app/views/users/_home.html.erb
new file mode 100644 (file)
index 0000000..0c5739c
--- /dev/null
@@ -0,0 +1,34 @@
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+<% content_for :css do %>
+      .dash-list {
+        padding: 9px 0;
+      }
+      .dash-list>ul>li>a>span {
+      min-width: 1.5em;
+      margin-left: auto;
+      margin-right: auto;
+      }
+      .centerme {
+      margin-left: auto;
+      margin-right: auto;
+      text-align: center;
+      }
+      .bigfatnumber {
+      font-size: 4em;
+      font-weight: bold;
+      }
+      .dax {
+      max-width: 10%;
+      margin-right: 1em;
+      float: left
+      }
+      .daxalert {
+      overflow: hidden;
+      }
+<% end %>
+
+<div id="home-tables">
+
+    <%= render :partial => 'tables' %>
+
+</div>
diff --git a/apps/workbench/app/views/users/_manage_account.html.erb b/apps/workbench/app/views/users/_manage_account.html.erb
new file mode 100644 (file)
index 0000000..5024fce
--- /dev/null
@@ -0,0 +1,52 @@
+<div class="col-sm-6">
+  <div class="panel-group" id="arv-adv-accordion">
+    <% ['Virtual Machines',
+       'Repositories'].each do |section| %>
+      <% section_id = section.gsub(" ","_").downcase %>
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          <h4 class="panel-title">
+            <a data-parent="#arv-adv-accordion" href="#manage_<%=section_id%>">
+              <%= section %>
+            </a>
+          </h4>
+        </div>
+        <div id="manage_<%=section_id%>">
+          <div class="panel-body">
+            <%= render partial: "manage_#{section_id}" %>
+          </div>
+        </div>
+      </div>
+    <% end %>
+  </div>
+</div>
+<div class="col-sm-6">
+  <div class="panel-group" id="arv-adv-accordion">
+    <% ['SSH Keys',
+      'Current Token'].each do |section| %>
+      <% section_id = section.gsub(" ","_").downcase %>
+      <div class="panel panel-default">
+        <div class="panel-heading">
+          <% if section_id == 'ssh_keys' %>
+            <div class="pull-right">
+              <%= link_to raw('<i class="fa fa-plus"></i>' " Add new SSH key"), add_ssh_key_popup_url,
+                           {class: 'btn btn-xs btn-primary', :remote => true, 'data-toggle' =>  "modal",
+                            'data-target' => '#add-ssh-key-modal-window'}  %>
+            </div>
+          <% end %>
+          <h4 class="panel-title">
+            <a data-parent="#arv-adv-accordion" href="#manage_<%=section_id%>">
+              <%= section %>
+            </a>
+          </h4>
+        </div>
+        <div id="manage_<%=section_id%>">
+          <div class="panel-body">
+            <%= render partial: "manage_#{section_id}" %>
+          </div>
+        </div>
+      </div>
+    <% end %>
+  </div>
+  <div id="add-ssh-key-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
+</div>
diff --git a/apps/workbench/app/views/users/_manage_current_token.html.erb b/apps/workbench/app/views/users/_manage_current_token.html.erb
new file mode 100644 (file)
index 0000000..71c6bd2
--- /dev/null
@@ -0,0 +1,13 @@
+<p>The Arvados API token is a secret key that enables the Arvados SDKs to access Arvados with the proper permissions. For more information see <%= link_to raw('Getting an API token'), "#{Rails.configuration.arvados_docsite}/user/reference/api-tokens.html", target: "_blank"%>.</p>
+<p>Paste the following lines at a shell prompt to set up the necessary environment for Arvados SDKs to authenticate to your account, <b><%= current_user.email %></b></p>
+
+<pre>
+HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
+export ARVADOS_API_TOKEN=<%= Thread.current[:arvados_api_token] %>
+export ARVADOS_API_HOST=<%= current_api_host %>
+<% if Rails.configuration.arvados_insecure_https %>
+export ARVADOS_API_HOST_INSECURE=true
+<% else %>
+unset ARVADOS_API_HOST_INSECURE
+<% end %>
+</pre>
diff --git a/apps/workbench/app/views/users/_manage_repositories.html.erb b/apps/workbench/app/views/users/_manage_repositories.html.erb
new file mode 100644 (file)
index 0000000..d20498f
--- /dev/null
@@ -0,0 +1,41 @@
+<div>
+  <p>
+    For more information see <%= link_to raw('Writing a pipeline'),
+    "#{Rails.configuration.arvados_docsite}/user/tutorials/tutorial-firstscript.html", target: "_blank"%>.
+  </p>
+
+  <% if !@my_repositories.any? %>
+    You do not seem to have access to any repositories. If you would like to request access, please contact your system admin.
+  <% else %>
+    <table class="table repositories-table">
+      <colgroup>
+        <col style="width: 30%" />
+        <col style="width: 10%" />
+        <col style="width: 60%" />
+      </colgroup>
+      <thead>
+        <tr>
+          <th> Name </th>
+          <th> Read/Write </th>
+          <th> URL </th>
+        </tr>
+      </thead>
+      <tbody>
+        <% @my_repositories.andand.each do |repo| %>
+          <% writable = @repo_writable[repo.uuid] %>
+          <tr>
+            <td style="word-break:break-all;">
+              <%= repo[:name] %>
+            </td>
+            <td>
+              <%= writable ? 'writable' : 'read-only' %>
+            </td>
+            <td style="word-break:break-all;">
+              <code><%= writable ? repo[:push_url] : repo[:fetch_url] %></code>
+            </td>
+          </tr>
+        <% end %>
+      </tbody>
+    </table>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/users/_manage_ssh_keys.html.erb b/apps/workbench/app/views/users/_manage_ssh_keys.html.erb
new file mode 100644 (file)
index 0000000..1ea8f0b
--- /dev/null
@@ -0,0 +1,55 @@
+<div>
+  <% if !@my_ssh_keys.any? %>
+     <p> You have not yet set up an SSH public key for use with Arvados. </p>
+     <p>  <%= link_to "Click here",
+                  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+                  style: "font-weight: bold",
+                  target: "_blank" %>  to learn about SSH keys in Arvados.
+     </p>
+     <p> When you have an SSH key you would like to use, add it using the <b>Add</b> button. </p>
+  <% else %>
+    <table class="table manage-ssh-keys-table">
+      <colgroup>
+        <col style="width: 35%" />
+        <col style="width: 55%" />
+        <col style="width: 10%" />
+      </colgroup>
+      <thead>
+        <tr>
+          <th> Name </th>
+          <th> Key Fingerprint </th>
+          <th> </th>
+        </tr>
+      </thead>
+      <tbody>
+        <% @my_ssh_keys.andand.each do |key| %>
+          <tr style="word-break:break-all;">
+            <td>
+              <%= key[:name] %>
+            </td>
+            <td style="word-break:break-all;">
+              <% if key[:public_key] && key[:public_key].size > 0 %>
+                <div>
+                  <span title="<%=key[:public_key]%>"> <%=
+                    begin
+                      SSHKey.fingerprint key[:public_key]
+                    rescue
+                      "INVALID KEY: " + key[:public_key]
+                    end
+                   %> </span>
+                </div>
+              <% else %>
+                  <%= key[:public_key] %>
+              <% end %>
+            </td>
+            <td>
+              <%= link_to(authorized_key_path(id: key[:uuid]), method: :delete, class: 'btn btn-sm', data: {confirm: "Really delete key?"}) do %>
+                  <i class="fa fa-fw fa-trash-o"></i>
+              <% end %>
+            </td>
+          </tr>
+        <% end %>
+      </tbody>
+    </table>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/users/_manage_virtual_machines.html.erb b/apps/workbench/app/views/users/_manage_virtual_machines.html.erb
new file mode 100644 (file)
index 0000000..c6190ec
--- /dev/null
@@ -0,0 +1,52 @@
+<div>
+  <p>
+    For more information see <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; SSH access'),
+  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+  target: "_blank"%>.  A sample <i>~/.ssh/config</i> entry is provided below.
+  </p>
+
+  <% if !@my_virtual_machines.any? %>
+    You do not seem to have access to any virtual machines. If you would like to request access, please contact your system admin.
+  <% else %>
+    <table class="table virtual-machines-table">
+      <colgroup>
+        <col style="width: 25%" />
+        <col style="width: 25%" />
+        <col style="width: 50%" />
+      </colgroup>
+      <thead>
+        <tr>
+          <th> Host name </th>
+          <th> Login name </th>
+          <th> Command line </th>
+        </tr>
+      </thead>
+      <tbody>
+        <% @my_virtual_machines.andand.each do |vm| %>
+          <tr>
+            <td style="word-break:break-all;">
+              <%= vm[:hostname] %>
+            </td>
+            <td style="word-break:break-all;">
+              <%= @my_vm_logins[vm[:uuid]].andand.compact.andand.join(", ") %>
+            </td>
+            <td style="word-break:break-all;">
+              <% if @my_vm_logins[vm[:uuid]] %>
+                <% @my_vm_logins[vm[:uuid]].each do |login| %>
+                  <code>ssh&nbsp;<%= login %>@<%= vm[:hostname] %>.arvados</code>
+                <% end %>
+              <% end %>
+            </td>
+          </tr>
+        <% end %>
+      </tbody>
+    </table>
+
+    <p><i>~/.ssh/config:</i></p>
+    <pre>Host *.arvados
+      TCPKeepAlive yes
+      ServerAliveInterval 60
+      ProxyCommand ssh -p2222 turnout@switchyard.<%= current_api_host || 'xyzzy.arvadosapi.com' %> -x -a $SSH_PROXY_FLAGS %h
+    </pre>
+  <% end %>
+</div>
diff --git a/apps/workbench/app/views/users/_setup_popup.html.erb b/apps/workbench/app/views/users/_setup_popup.html.erb
new file mode 100644 (file)
index 0000000..e9429cf
--- /dev/null
@@ -0,0 +1,73 @@
+<div class="modal-dialog modal-with-loading-spinner">
+  <div class="modal-content">
+
+    <%= form_tag setup_user_path, {id: 'setup_form', name: 'setup_form', method: 'get',
+        class: 'form-search', remote: true} do %>
+
+    <div class="modal-header">
+      <button type="button" class="close" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">&times;</button>
+      <div>
+        <div class="col-sm-6"> <h4 class="modal-title">Setup User</h4> </div>
+        <div class="spinner spinner-32px spinner-h-center col-sm-1" hidden="true"></div>
+      </div>
+      <br/>
+    </div>
+
+    <div class="modal-body">
+      <% if @object%>
+        <% uuid = @object.uuid %>
+        <% email = @object.email %>
+      <% end %>
+      <% disable_email = uuid != nil %>
+      <% identity_url_prefix = @current_selections[:identity_url_prefix] %>
+      <% disable_url_prefix = identity_url_prefix != nil %>
+      <% selected_repo = @current_selections[:repo_name] %>
+      <% selected_vm = @current_selections[:vm_uuid] %>
+
+      <input id="user_uuid" maxlength="250" name="user_uuid" type="hidden" value="<%=uuid%>">
+      <div class="form-group">
+       <label for="email">Email</label>
+        <% if disable_email %>
+        <input class="form-control" id="email" maxlength="250" name="email" type="text" value="<%=email%>" disabled>
+        <% else %>
+        <input class="form-control" id="email" maxlength="250" name="email" type="text">
+        <% end %>
+      </div>
+      <div class="form-group">
+        <label for="openid_prefix">Identity URL Prefix</label>
+        <% if disable_url_prefix %>
+        <input class="form-control" id="openid_prefix" maxlength="250" name="openid_prefix" type="text"
+               value="<%=identity_url_prefix%>" disabled=true>
+        <% else %>
+        <input class="form-control" id="openid_prefix" maxlength="250" name="openid_prefix" type="text"
+               value="<%= Rails.configuration.default_openid_prefix %>">
+        <% end %>
+      </div>
+      <div class="form-group">
+        <label for="repo_name">Repository Name and Shell Login</label>
+        <input class="form-control" id="repo_name" maxlength="250" name="repo_name" type="text" value="<%=selected_repo%>">
+      </div>
+      <div class="form-group">
+        <label for="vm_uuid">Virtual Machine</label>
+        <select class="form-control" name="vm_uuid">
+          <option value="" <%= 'selected' unless selected_vm %>>
+           Choose One:
+         </option>
+          <% @vms.each do |vm| %>
+            <option value="<%=vm.uuid%>"
+                   <%= 'selected' if selected_vm == vm.uuid %>>
+             <%= vm.hostname %>
+           </option>
+          <% end %>
+        </select>
+      </div>
+    </div>
+
+    <div class="modal-footer">
+      <button type="submit" id="register" class="btn btn-primary" autofocus>Submit</button>
+      <button class="btn btn-default" onClick="reset_form()" data-dismiss="modal" aria-hidden="true">Cancel</button>
+    </div>
+
+    <% end #form %>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/users/_show_activity.html.erb b/apps/workbench/app/views/users/_show_activity.html.erb
new file mode 100644 (file)
index 0000000..9f714be
--- /dev/null
@@ -0,0 +1,4 @@
+<p>
+  As an admin user, you can <%= link_to "view recent user activity", activity_users_url %> and <%= link_to "view user storage activity", storage_users_url %>.
+</p>
+
diff --git a/apps/workbench/app/views/users/_show_admin.html.erb b/apps/workbench/app/views/users/_show_admin.html.erb
new file mode 100644 (file)
index 0000000..a34d7e6
--- /dev/null
@@ -0,0 +1,108 @@
+<div class="row">
+  <div class="col-md-6">
+    <p>
+      As an admin, you can log in as this user. When you&rsquo;ve
+      finished, you will need to log out and log in again with your
+      own account.
+    </p>
+
+    <blockquote>
+      <%= button_to "Log in as #{@object.full_name}", sudo_user_url(id: @object.uuid), class: 'btn btn-primary' %>
+    </blockquote>
+
+    <p>
+      As an admin, you can setup this user. Please input a VM and
+      repository for the user. If you had previously provided any of
+      these items, they are pre-filled for you and you can leave them
+      as is if you would like to reuse them.
+    </p>
+
+    <blockquote>
+      <%= link_to "Setup #{@object.full_name}", setup_popup_user_url(id: @object.uuid),  {class: 'btn btn-primary', :remote => true, 'data-toggle' =>  "modal", 'data-target' => '#user-setup-modal-window'}  %>
+    </blockquote>
+
+    <p>
+      As an admin, you can deactivate and reset this user. This will
+      remove all repository/VM permissions for the user. If you
+      "setup" the user again, the user will have to sign the user
+      agreement again.
+    </p>
+
+    <blockquote>
+      <%= button_to "Deactivate #{@object.full_name}", unsetup_user_url(id: @object.uuid), class: 'btn btn-primary', data: {confirm: "Are you sure you want to deactivate #{@object.full_name}?"} %>
+    </blockquote>
+  </div>
+  <div class="col-md-6">
+    <div class="panel panel-default">
+      <div class="panel-heading">
+        Group memberships
+      </div>
+      <div class="panel-body">
+        <div class="alert alert-info">
+          <b>Tip:</b> in most cases, you want <i>both permissions at once</i> for a given group.
+          <br/>
+          The user&rarr;group permission is can_manage.
+          <br/>
+          The group&rarr;user permission is can_read.
+        </div>
+        <form>
+          <% permitted_group_perms = {}
+             Link.filter([
+             ['tail_uuid', '=', @object.uuid],
+             ['head_uuid', 'is_a', 'arvados#group'],
+             ['link_class', '=', 'permission'],
+             ]).each do |perm|
+               permitted_group_perms[perm.head_uuid] = perm.uuid
+             end %>
+          <% member_group_perms = {}
+             Link.permissions_for(@object).each do |perm|
+               member_group_perms[perm.tail_uuid] = perm.uuid
+             end %>
+          <% Group.order(['name']).where(group_class: 'role').each do |group| %>
+            <div>
+              <label class="checkbox-inline" data-toggle-permission="true" data-permission-tail="<%= @object.uuid %>" data-permission-name="can_manage">
+                <%= check_box_tag(
+                    'group_uuids[]',
+                    group.uuid,
+                    permitted_group_perms[group.uuid],
+                    disabled: (group.owner_uuid == @object.uuid),
+                    data: {
+                      permission_head: group.uuid,
+                      permission_uuid: permitted_group_perms[group.uuid]}) %>
+                <small>user&rarr;group</small>
+              </label>
+              <label class="checkbox-inline" data-toggle-permission="true" data-permission-head="<%= @object.uuid %>" data-permission-name="can_read">
+                <%= check_box_tag(
+                    'group_uuids[]',
+                    group.uuid,
+                    member_group_perms[group.uuid],
+                    disabled: (group.owner_uuid == @object.uuid),
+                    data: {
+                      permission_tail: group.uuid,
+                      permission_uuid: member_group_perms[group.uuid]}) %>
+                <small>group&rarr;user</small>
+              </label>
+              <label class="checkbox-inline">
+                <%= group.name || '(unnamed)' %> <span class="deemphasize">(owned by <%= User.find?(group.owner_uuid).andand.full_name %>)</span>
+              </label>
+            </div>
+          <% end.empty? and begin %>
+            <div>
+              (No groups defined.)
+            </div>
+          <% end %>
+        </form>
+      </div>
+      <div class="panel-footer">
+        To manage these groups (roles), use:
+        <ul>
+          <li><code>arv group create \<br/>--group '{"group_class":"role","name":"New group"}'</code></li>
+          <li><code>arv group list \<br/>--filters '[["group_class","=","role"]]' \<br/>--select '["uuid","name"]'</code></li>
+          <li><code>arv edit <i>uuid</i></code></li>
+        </ul>
+      </div>
+    </div>
+  </div>
+</div>
+
+<div id="user-setup-modal-window" class="modal fade" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true"></div>
diff --git a/apps/workbench/app/views/users/_tables.html.erb b/apps/workbench/app/views/users/_tables.html.erb
new file mode 100644 (file)
index 0000000..b0f5753
--- /dev/null
@@ -0,0 +1,266 @@
+<% if current_user.andand.is_active %>
+  <div>
+    <strong>Recent jobs</strong>
+    <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
+    <%= link_to raw("Show all jobs &rarr;"), jobs_path, class: 'pull-right' %>
+    <% if not current_user.andand.is_active or @my_jobs.empty? %>
+      <p>(None)</p>
+    <% else %>
+      <table class="table table-bordered table-condensed table-fixedlayout">
+        <colgroup>
+          <col width="20%" />
+          <col width="20%" />
+          <col width="20%" />
+          <col width="13%" />
+          <col width="13%" />
+          <col width="20%" />
+        </colgroup>
+
+        <tr>
+          <th>Script</th>
+          <th>Output</th>
+          <th>Log</th>
+          <th>Created at</th>
+          <th>Status</th>
+        </tr>
+
+        <%# Preload collections, logs, and pipeline instance objects %>
+        <%
+          collection_uuids = []
+          log_uuids = []
+          @my_jobs[0..6].each do |j|
+            collection_uuids << j.output
+            log_uuids << j.log
+          end
+
+          @my_collections[0..6].each do |c|
+            collection_uuids << c.uuid
+          end
+
+          preload_collections_for_objects collection_uuids
+          preload_log_collections_for_objects log_uuids
+
+          pi_uuids = []
+          @my_pipelines[0..6].each do |p|
+            pi_uuids << p.uuid
+          end
+          resource_class = resource_class_for_uuid(pi_uuids.first, friendly_name: true)
+          preload_objects_for_dataclass resource_class, pi_uuids
+        %>
+
+        <% @my_jobs[0..6].each do |j| %>
+          <tr data-object-uuid="<%= j.uuid %>">
+            <td>
+              <small>
+                <%= link_to((j.script.andand[0..31] || j.uuid), job_path(j.uuid)) %>
+              </small>
+            </td>
+
+            <td>
+              <small>
+                <% if j.state == "Complete" and j.output %>
+                  <a href="<%= collection_path(j.output) %>">
+                    <% collections = collections_for_object(j.output) %>
+                      <% if collections && !collections.empty? %>
+                      <% c = collections.first %>
+                      <% c.files.each do |file| %>
+                        <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
+                      <% end %>
+                     <% end %>
+                  </a>
+              <% end %>
+            </small>
+          </td>
+
+<td>
+  <small>
+    <% if j.log %>
+      <% log_collections = log_collections_for_object(j.log) %>
+      <% if log_collections && !log_collections.empty? %>
+        <% c = log_collections.first %>
+        <% c.files.each do |file| %>
+          <a href="<%= collection_path(j.log) %>/<%= file[1] %>?disposition=inline&size=<%= file[2] %>">Log</a>
+        <% end %>
+      <% end %>
+    <% elsif j.respond_to? :log_buffer and j.log_buffer.is_a? String %>
+      <% buf = j.log_buffer.strip.split("\n").last %>
+      <span title="<%= buf %>"><%= buf %></span>
+    <% end %>
+  </small>
+</td>
+
+<td>
+  <small>
+    <%= j.created_at.to_s if j.created_at %>
+  </small>
+</td>
+
+<td>
+  <div class="inline-progress-container">
+  <%= render partial: 'job_progress', locals: {:j => j} %>
+  </div>
+</td>
+
+</tr>
+<% end %>
+</table>
+<% end %>
+</div>
+
+<div>
+  <strong>Recent pipeline instances</strong>
+  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
+  <%= link_to raw("Show all pipeline instances &rarr;"), pipeline_instances_path, class: 'pull-right' %>
+  <% if not current_user.andand.is_active or @my_pipelines.empty? %>
+    <p>(None)</p>
+  <% else %>
+    <table class="table table-bordered table-condensed table-fixedlayout">
+      <colgroup>
+        <col width="30%" />
+        <col width="30%" />
+        <col width="13%" />
+        <col width="13%" />
+        <col width="20%" />
+      </colgroup>
+
+      <tr>
+        <th>Instance</th>
+        <th>Template</th>
+        <th>Created at</th>
+        <th>Status</th>
+        <th>Progress</th>
+      </tr>
+
+      <% @my_pipelines[0..6].each do |p| %>
+        <tr data-object-uuid="<%= p.uuid %>">
+          <td>
+            <small>
+              <%= link_to_if_arvados_object p.uuid, friendly_name: true %>
+            </small>
+          </td>
+
+          <td>
+            <small>
+              <%= link_to_if_arvados_object p.pipeline_template_uuid, friendly_name: true %>
+            </small>
+          </td>
+
+          <td>
+            <small>
+              <%= (p.created_at.to_s) if p.created_at %>
+            </small>
+          </td>
+
+          <td>
+            <%= render partial: 'pipeline_status_label', locals: {:p => p} %>
+          </td>
+
+          <td>
+            <div class="inline-progress-container">
+              <%= render partial: 'pipeline_progress', locals: {:p => p} %>
+            </div>
+          </td>
+        </tr>
+      <% end %>
+    </table>
+  <% end %>
+</div>
+
+<div>
+  <strong>Recent collections</strong>
+  <%= link_to '(refresh)', {format: :js}, {class: 'refresh', remote: true} %>
+  <%= link_to raw("Show all collections &rarr;"), collections_path, class: 'pull-right' %>
+  <div class="pull-right" style="padding-right: 1em; width: 30%;">
+    <%= form_tag collections_path,
+          method: 'get',
+          class: 'form-search small-form-margin' do %>
+    <div class="input-group input-group-sm">
+      <%= text_field_tag :search, params[:search], class: 'form-control', placeholder: 'Search' %>
+      <span class="input-group-btn">
+        <%= button_tag(class: 'btn btn-info') do %>
+        <span class="glyphicon glyphicon-search"></span>
+        <% end %>
+      </span>
+    </div>
+    <% end %>
+  </div>
+  <% if not current_user.andand.is_active or @my_collections.empty? %>
+    <p>(None)</p>
+  <% else %>
+    <table class="table table-bordered table-condensed table-fixedlayout">
+      <colgroup>
+        <col width="46%" />
+        <col width="32%" />
+        <col width="10%" />
+        <col width="12%" />
+      </colgroup>
+
+      <tr>
+        <th>Contents</th>
+        <th>Tags</th>
+        <th>Age</th>
+        <th>Storage</th>
+      </tr>
+
+      <% @my_collections[0..6].each do |c| %>
+        <tr data-object-uuid="<%= c.uuid %>">
+          <td>
+            <small>
+              <a href="<%= collection_path(c.uuid) %>">
+                <% c.files.each do |file| %>
+                  <%= file[0] == '.' ? file[1] : "#{file[0]}/#{file[1]}" %>
+                <% end %>
+              </a>
+            </small>
+          </td>
+          <td>
+            <% if @my_tag_links[c.uuid] %>
+            <small>
+              <%= @my_tag_links[c.uuid].collect(&:name).join(", ") %>
+            </small>
+            <% end %>
+          </td>
+          <td>
+            <small>
+              <%= c.created_at.to_s if c.created_at %>
+            </small>
+          </td>
+          <td>
+            <%= render partial: 'collections/toggle_persist', locals: { uuid: c.uuid, current_state: @persist_state[c.uuid] } %>
+          </td>
+        </tr>
+      <% end %>
+    </table>
+  <% end %>
+</div>
+
+<% else %>
+
+  <div class="row-fluid">
+    <div class="col-sm-4">
+      <%= image_tag "dax.png", style: "max-width:100%" %>
+    </div>
+    <div class="col-sm-8">
+      <h2>Welcome to Arvados, <%= current_user.first_name %>!</h2>
+      <div class="well">
+        <p>
+          Your account must be activated by an Arvados administrator.  If this
+          is your first time accessing Arvados and would like to request
+          access, or you believe you are seeing the page in error, please
+          <%= link_to "contact us", Rails.configuration.activation_contact_link %>.
+          You should receive an email at the address you used to log in when
+          your account is activated.  In the mean time, you can
+          <%= link_to "learn more about Arvados", "https://arvados.org/projects/arvados/wiki/Introduction_to_Arvados" %>,
+          and <%= link_to "read the Arvados user guide", "http://doc.arvados.org/user" %>.
+        </p>
+        <p style="padding-bottom: 1em">
+          <%= link_to raw('Contact us &#x2709;'),
+              Rails.configuration.activation_contact_link, class: "pull-right btn btn-primary" %></p>
+      </div>
+    </div>
+  </div>
+<% end %>
+
+<% content_for :js do %>
+setInterval(function(){$('a.refresh:eq(0)').click()}, 60000);
+<% end %>
diff --git a/apps/workbench/app/views/users/activity.html.erb b/apps/workbench/app/views/users/activity.html.erb
new file mode 100644 (file)
index 0000000..9fd93ff
--- /dev/null
@@ -0,0 +1,72 @@
+<% content_for :css do %>
+table#users-activity-table th {
+    overflow-x: hidden;
+}
+table#users-activity-table .cell-for-span-This-month,
+table#users-activity-table .cell-for-span-Last-month {
+    background: #eee;
+}
+<% end %>
+<table class="table table-condensed arv-index" id="users-activity-table">
+  <colgroup>
+    <col width="28%" />
+  </colgroup>
+  <% @spans.each do |_| %>
+  <colgroup>
+    <% 3.times do %>
+    <col width="<%= (72 / @spans.count / 3).floor %>%" />
+    <% end %>
+  </colgroup>
+  <% end %>
+
+  <tr>
+    <th rowspan="2">User</th>
+    <% @spans.each do |span, start_at, end_at| %>
+    <th colspan="3" class="cell-for-span-<%= span.gsub ' ','-' %>">
+      <%= span %>
+      <br />
+      <%= start_at.strftime('%b %-d') %>
+      -
+      <%= (end_at-1.second).strftime('%b %-d') %>
+    </th>
+    <% end %>
+  </tr>
+  <tr>
+    <% @spans.each do |span, _| %>
+    <th class="cell-for-span-<%= span.gsub ' ','-' %>">Logins</th>
+    <th class="cell-for-span-<%= span.gsub ' ','-' %>">Jobs</th>
+    <th class="cell-for-span-<%= span.gsub ' ','-' %>">Pipelines</th>
+    <% end %>
+  </tr>
+
+  <% @users.each do |user| %>
+  <tr>
+    <td>
+      <small>
+        <% if user.uuid %>
+        <%= link_to_if_arvados_object user, friendly_name: true %>
+        <% else %>
+        <b>Total</b>
+        <% end %>
+      </small>
+    </td>
+
+    <% @spans.each do |span, _| %>
+    <% ['logins', 'jobs', 'pipeline_instances'].each do |type| %>
+    <td class="cell-for-span-<%= span.gsub ' ','-' %>">
+      <small>
+        <%= @user_activity[user.uuid][span + " " + type].to_s %>
+      </small>
+    </td>
+    <% end %>
+    <% end %>
+  </tr>
+  <% end %>
+</table>
+
+<% content_for :footer_js do %>
+$('#users-activity-table td small').each(function(){
+  if ($(this).html().trim() == '0')
+    $(this).css('opacity', '0.3');
+});
+<% end %>
diff --git a/apps/workbench/app/views/users/add_ssh_key.js.erb b/apps/workbench/app/views/users/add_ssh_key.js.erb
new file mode 100644 (file)
index 0000000..6117b98
--- /dev/null
@@ -0,0 +1,2 @@
+$("#add-ssh-key-modal-window").modal("hide");
+document.location.reload();
diff --git a/apps/workbench/app/views/users/add_ssh_key_popup.js.erb b/apps/workbench/app/views/users/add_ssh_key_popup.js.erb
new file mode 100644 (file)
index 0000000..b04d37f
--- /dev/null
@@ -0,0 +1,8 @@
+$("#add-ssh-key-modal-window").html("<%= escape_javascript(render partial: 'add_ssh_key_popup') %>");
+
+// reset form input fields, for the next time around
+function reset_form() {
+  $('#name').val("");
+  $('#public_key').val("");
+  $('select').val('')
+}
diff --git a/apps/workbench/app/views/users/home.html.erb b/apps/workbench/app/views/users/home.html.erb
new file mode 100644 (file)
index 0000000..9fb2c27
--- /dev/null
@@ -0,0 +1 @@
+<%= render :partial => 'home' %>
diff --git a/apps/workbench/app/views/users/home.js.erb b/apps/workbench/app/views/users/home.js.erb
new file mode 100644 (file)
index 0000000..d845186
--- /dev/null
@@ -0,0 +1,3 @@
+var new_content = "<%= escape_javascript(render partial: 'tables') %>";
+if ($('div#home-tables').html() != new_content)
+   $('div#home-tables').html(new_content);
diff --git a/apps/workbench/app/views/users/inactive.html.erb b/apps/workbench/app/views/users/inactive.html.erb
new file mode 100644 (file)
index 0000000..832b580
--- /dev/null
@@ -0,0 +1,26 @@
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+
+<div class="row">
+  <div class="col-sm-8 col-sm-push-4" style="margin-top: 1em">
+    <div class="well clearfix">
+      <%= image_tag "dax.png", style: "width: 147px; height: 197px; max-width: 25%; margin-right: 2em", class: 'pull-left' %>
+
+      <h3>Hi! You're logged in, but...</h3>
+
+      <p>
+
+        Your account is inactive.
+
+      </p><p>
+
+        An administrator must activate your account before you can get
+        any further.
+
+      </p><p>
+
+        <%= link_to 'Retry', (params[:return_to] || '/'), class: 'btn btn-primary' %>
+
+      </p>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/users/manage_account.html.erb b/apps/workbench/app/views/users/manage_account.html.erb
new file mode 100644 (file)
index 0000000..0751866
--- /dev/null
@@ -0,0 +1 @@
+<%= render :partial => 'manage_account' %>
diff --git a/apps/workbench/app/views/users/profile.html.erb b/apps/workbench/app/views/users/profile.html.erb
new file mode 100644 (file)
index 0000000..aab8843
--- /dev/null
@@ -0,0 +1,95 @@
+<%
+    profile_config = Rails.configuration.user_profile_form_fields
+    current_user_profile = current_user.prefs[:profile]
+    show_save_button = false
+
+    profile_message = Rails.configuration.user_profile_form_message
+%>
+
+<div>
+    <div class="panel panel-default">
+        <div class="panel-heading">
+          <h4 class="panel-title">
+            Profile
+          </h4>
+        </div>
+        <div class="panel-body">
+          <% if !missing_required_profile? && params[:offer_return_to] %>
+            <div class="alert alert-success">
+              <p>Thank you for filling in your profile. <%= link_to 'Back to work!', params[:offer_return_to], class: 'btn btn-sm btn-primary' %></p>
+            </div>
+          <% else %>
+            <div class="alert alert-info">
+              <p><%=raw(profile_message)%></p>
+            </div>
+          <% end %>
+
+            <%= form_for current_user, html: {id: 'save_profile_form', name: 'save_profile_form', class: 'form-horizontal'} do %>
+              <%= hidden_field_tag :offer_return_to, params[:offer_return_to] %>
+              <%= hidden_field_tag :return_to, profile_user_path(current_user.uuid, offer_return_to: params[:offer_return_to]) %>
+              <div class="form-group">
+                  <label for="email" class="col-sm-3 control-label"> Email </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="email" name="email"><%=current_user.email%></p>
+                  </div>
+              </div>
+              <div class="form-group">
+                  <label for="first_name" class="col-sm-3 control-label"> First name </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="first_name" name="first_name"><%=current_user.first_name%></p>
+                  </div>
+              </div>
+              <div class="form-group">
+                  <label for="last_name" class="col-sm-3 control-label"> Last name </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="last_name" name="last_name"><%=current_user.last_name%></p>
+                  </div>
+              </div>
+              <div class="form-group">
+                  <label for="identity_url" class="col-sm-3 control-label"> Identity URL </label>
+                  <div class="col-sm-8">
+                    <p class="form-control-static" id="identity_url" name="identity_url"><%=current_user.andand.identity_url%></p>
+                  </div>
+              </div>
+
+              <% profile_config.kind_of?(Array) && profile_config.andand.each do |entry| %>
+                <% if entry['key'] %>
+                  <%
+                      show_save_button = true
+                      label = entry['required'] ? '* ' : ''
+                      label += entry['form_field_title']
+                      value = current_user_profile[entry['key'].to_sym] if current_user_profile
+                  %>
+                  <div class="form-group">
+                    <label for="<%=entry['key']%>"
+                           class="col-sm-3 control-label"
+                           style=<%="color:red" if entry['required']&&(!value||value.empty?)%>> <%=label%>
+                    </label>
+                    <% if entry['type'] == 'select' %>
+                      <div class="col-sm-8">
+                        <select class="form-control" name="user[prefs][:profile][:<%=entry['key']%>]">
+                          <% entry['options'].each do |option| %>
+                            <option value="<%=option%>" <%='selected' if option==value%>><%=option%></option>
+                          <% end %>
+                        </select>
+                      </div>
+                    <% else %>
+                      <div class="col-sm-8">
+                        <input type="text" class="form-control" name="user[prefs][:profile][:<%=entry['key']%>]" placeholder="<%=entry['form_field_description']%>" value="<%=value%>" ></input>
+                      </div>
+                    <% end %>
+                  </div>
+                <% end %>
+              <% end %>
+
+              <% if show_save_button %>
+                <div class="form-group">
+                  <div class="col-sm-offset-3 col-sm-8">
+                    <button type="submit" class="btn btn-primary">Save profile</button>
+                  </div>
+                </div>
+              <% end %>
+            <% end %>
+        </div>
+    </div>
+</div>
diff --git a/apps/workbench/app/views/users/setup.js.erb b/apps/workbench/app/views/users/setup.js.erb
new file mode 100644 (file)
index 0000000..bce71b4
--- /dev/null
@@ -0,0 +1,2 @@
+$("#user-setup-modal-window").modal("hide");
+document.location.reload();
diff --git a/apps/workbench/app/views/users/setup_popup.js.erb b/apps/workbench/app/views/users/setup_popup.js.erb
new file mode 100644 (file)
index 0000000..b1aa796
--- /dev/null
@@ -0,0 +1,44 @@
+$("#user-setup-modal-window").html("<%= escape_javascript(render partial: 'setup_popup') %>");
+
+// disable the submit button on load
+var $input = $('input:text'),
+$register = $('#register');
+
+var email_disabled = document.forms["setup_form"]["email"].disabled;
+var email_value = document.forms["setup_form"]["email"].value;
+var prefix_value = document.forms["setup_form"]["openid_prefix"].value;
+if ((email_disabled == false) && (email_value == null || email_value == "" ||
+        prefix_value == null || prefix_value == "")) {
+  $register.prop('disabled', true);
+}
+
+// capture events to enable submit button when applicable
+$input.on('keyup paste mouseleave', function() {
+  var trigger = false;
+
+  var email_disabled = document.forms["setup_form"]["email"].disabled;
+  var email_value = document.forms["setup_form"]["email"].value;
+  var prefix_value = document.forms["setup_form"]["openid_prefix"].value;
+
+  var emailRegExp = /^([\w-\.]+@([\w-]+\.)+[\w-]{2,4})?$/;
+  var validEmail = false;
+
+  if (emailRegExp.test(email_value )) {
+    validEmail = true;
+  }
+
+  if ((email_disabled == false) && (!validEmail || email_value == null ||
+            email_value == "" || prefix_value == null || prefix_value == "")){
+    trigger = true;
+  }
+
+  $register.prop('disabled', trigger);
+});
+
+// reset form input fields, for the next time around
+function reset_form() {
+  $('#email').val("");
+  $('#openid_prefix').val("");
+  $('#repo_name').val("");
+  $('select').val('')
+}
diff --git a/apps/workbench/app/views/users/storage.html.erb b/apps/workbench/app/views/users/storage.html.erb
new file mode 100644 (file)
index 0000000..7533aa0
--- /dev/null
@@ -0,0 +1,66 @@
+<% content_for :css do %>
+table#users-storage-table th {
+    overflow-x: hidden;
+    text-align: center;
+}
+table#users-storage-table .byte-value {
+    text-align: right;
+}
+<% end %>
+<table class="table table-condensed arv-index" id="users-storage-table">
+  <colgroup>
+    <col />
+  </colgroup>
+
+  <tr>
+    <th rowspan="2">User</th>
+    <th colspan="2">
+      Collections Read Size
+    </th>
+    <th colspan="2">
+      Collections Persisted Storage
+    </th>
+    <th rowspan="2">Measured At</th>
+  </tr>
+  <tr>
+    <% 2.times do %>
+    <th class="byte-value">
+      Total (unweighted)
+    </th>
+    <th class="byte-value">
+      Shared (weighted)
+    </th>
+    <% end %>
+  </tr>
+
+  <% @users.each do |user| %>
+  <tr>
+    <td>
+      <% if user.uuid %>
+      <small>
+        <%= link_to_if_arvados_object user, friendly_name: true %>
+      </small>
+      <% else %>
+      <b>Total</b>
+      <% end %>
+    </td>
+    <% [:read_collections_total_bytes, :read_collections_weighted_bytes, :persisted_collections_total_bytes, :persisted_collections_weighted_bytes].each do |key| %>
+    <td class="byte-value">
+      <%= human_readable_bytes_html(@user_storage[user.uuid].fetch(key,0).floor) %>
+    </td>
+    <% end %>
+    <% if @log_date.key?(user.uuid) %>
+    <td class="date" title="<%= @log_date[user.uuid] %>">
+      <%= @log_date[user.uuid].strftime('%F') %>
+    </td>
+    <% end %>
+  </tr>
+  <% end %>
+</table>
+
+<% content_for :footer_js do %>
+$('#users-storage-table td small').each(function(){
+  if ($(this).html().trim() == '0')
+    $(this).css('opacity', '0.3');
+});
+<% end %>
diff --git a/apps/workbench/app/views/users/welcome.html.erb b/apps/workbench/app/views/users/welcome.html.erb
new file mode 100644 (file)
index 0000000..a810a8d
--- /dev/null
@@ -0,0 +1,41 @@
+<% content_for :breadcrumbs do raw '<!-- -->' end %>
+
+<div class="row">
+  <div class="col-sm-8 col-sm-push-4" style="margin-top: 1em">
+    <div class="well clearfix">
+      <%= image_tag "dax.png", style: "width: 147px; height: 197px; max-width: 25%; margin-right: 2em", class: 'pull-left' %>
+
+      <h3>Please log in.</h3>
+
+      <p>
+
+        The "Log in" button below will show you a Google sign-in page.
+        After you assure Google that you want to log in here with your
+        Google account, you will be redirected back here to
+        <%= Rails.configuration.site_name %>.
+
+      </p><p>
+
+        If you have never used <%= Rails.configuration.site_name %>
+        before, logging in for the first time will automatically
+        create a new account.
+
+      </p><p>
+
+        <i><%= Rails.configuration.site_name %> uses your name and
+          email address only for identification, and does not retrieve
+          any other personal information from Google.</i>
+
+      </p>
+        <%# Todo: add list of external authentications providers to
+            discovery document, then generate the option list here. Right
+            now, don't provide 'auth_provider' to get the default one. %>
+        <div class="pull-right">
+          <%= link_to arvados_api_client.arvados_login_url(return_to: request.url), class: "btn btn-primary" do %>
+          Log in to <%= Rails.configuration.site_name %>
+          <i class="fa fa-fw fa-arrow-circle-right"></i>
+          <% end %>
+        </div>
+    </div>
+  </div>
+</div>
diff --git a/apps/workbench/app/views/virtual_machines/_show_help.html.erb b/apps/workbench/app/views/virtual_machines/_show_help.html.erb
new file mode 100644 (file)
index 0000000..50f8c43
--- /dev/null
@@ -0,0 +1,26 @@
+<p>
+Sample <code>~/.ssh/config</code> section:
+</p>
+
+<pre>
+Host *.arvados
+  ProxyCommand ssh -p2222 turnout@switchyard.<%= current_api_host || 'xyzzy.arvadosapi.com' %> -x -a $SSH_PROXY_FLAGS %h
+<% if @objects.first.andand.current_user_logins.andand.first %>
+  User <%= @objects.first.current_user_logins.andand.first %>
+<% end %>
+</pre>
+
+<p>
+Sample login command:
+</p>
+
+<pre>
+ssh <%= @objects.first.andand.hostname.andand.sub('.'+current_api_host,'') or 'vm-hostname' %>.arvados
+</pre>
+
+<p>
+  See also:
+  <%= link_to raw('Arvados Docs &rarr; User Guide &rarr; SSH access'),
+  "#{Rails.configuration.arvados_docsite}/user/getting_started/ssh-access-unix.html",
+  target: "_blank"%>.
+</p>
diff --git a/apps/workbench/app/views/websocket/index.html.erb b/apps/workbench/app/views/websocket/index.html.erb
new file mode 100644 (file)
index 0000000..d805371
--- /dev/null
@@ -0,0 +1,34 @@
+<% content_for :page_title do %>
+  Event bus debugging page
+<% end %>
+<h1>Event bus debugging page</h1>
+
+<form>
+<textarea style="width:100%; height: 10em" id="websocket-message-content"></textarea>
+<button type="button" id="send-to-websocket">Send</button>
+</form>
+
+<br>
+
+<p id="PutStuffHere"></p>
+
+<script>
+$(function() {
+putStuffThere = function (content) {
+  $("#PutStuffHere").append(content + "<br>");
+};
+
+var dispatcher = new WebSocket('<%= arvados_api_client.discovery[:websocketUrl] %>?api_token=<%= Thread.current[:arvados_api_token] %>');
+dispatcher.onmessage = function(event) {
+  //putStuffThere(JSON.parse(event.data));
+  putStuffThere(event.data);
+};
+
+sendStuff = function () {
+  dispatcher.send($("#websocket-message-content").val());
+};
+
+$("#send-to-websocket").click(sendStuff);
+});
+
+</script>
diff --git a/apps/workbench/config.ru b/apps/workbench/config.ru
new file mode 100644 (file)
index 0000000..c0b3696
--- /dev/null
@@ -0,0 +1,4 @@
+# This file is used by Rack-based servers to start the application.
+
+require ::File.expand_path('../config/environment',  __FILE__)
+run ArvadosWorkbench::Application
diff --git a/apps/workbench/config/application.default.yml b/apps/workbench/config/application.default.yml
new file mode 100644 (file)
index 0000000..dd30641
--- /dev/null
@@ -0,0 +1,192 @@
+# Do not use this file for site configuration. Create application.yml
+# instead (see application.yml.example).
+
+# Below is a sample setting for diagnostics testing.
+# Configure workbench URL as "arvados_workbench_url"
+# Configure test user tokens as "user_tokens".
+#   At this time the tests need an "active" user token.
+# Also, configure the pipelines to be executed as "pipelines_to_test".
+# For each of the pipelines identified by the name of your choice
+#     ("pipeline_1" and "pipeline_2" in this sample), provide the following:
+#   template_uuid: is the uuid of the template to be executed
+#   input_paths: an array of inputs for the pipeline. Use either a collection's "uuid"
+#     or a file's "uuid/file_name" path in this array. If the pipeline does not require
+#     any inputs, this can be omitted. 
+#   max_wait_seconds: max time in seconds to wait for the pipeline run to complete.
+#     Default value of 30 seconds is used when this value is not provided.
+diagnostics:
+  arvados_workbench_url: https://localhost:3031
+  user_tokens:
+    active: eu33jurqntstmwo05h1jr3eblmi961e802703y6657s8zb14r
+  pipelines_to_test:
+    pipeline_1:
+      template_uuid: zzzzz-p5p6p-rxj8d71854j9idn
+      input_paths: [zzzzz-4zz18-nz98douzhaa3jh2]
+      max_wait_seconds: 10
+    pipeline_2:
+      template_uuid: zzzzz-p5p6p-1xbobfobk94ppbv
+      input_paths: [zzzzz-4zz18-nz98douzhaa3jh2, zzzzz-4zz18-gpw9o5wpcti3nib]
+
+# Below is a sample setting for performance testing.
+# Configure workbench URL as "arvados_workbench_url"
+# Configure test user token as "user_token".
+performance:
+  arvados_workbench_url: https://localhost:3031
+  user_token: eu33jurqntstmwo05h1jr3eblmi961e802703y6657s8zb14r
+
+development:
+  cache_classes: false
+  eager_load: true
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_mailer.raise_delivery_errors: false
+  active_support.deprecation: :log
+  action_dispatch.best_standards_support: :builtin
+  assets.debug: true
+  profiling_enabled: true
+  site_name: Arvados Workbench (dev)
+  local_modified: "<%= '-modified' if `git status -s` != '' %>"
+
+  # API server configuration
+  arvados_login_base: ~
+  arvados_v1_base: ~
+  arvados_insecure_https: ~
+
+production:
+  force_ssl: true
+  cache_classes: true
+  eager_load: true
+  consider_all_requests_local: false
+  action_controller.perform_caching: true
+  serve_static_assets: false
+  assets.compile: false
+  assets.digest: true
+  i18n.fallbacks: true
+  active_support.deprecation: :notify
+  profiling_enabled: false
+
+  arvados_insecure_https: false
+
+  data_import_dir: /data/arvados-workbench-upload/data
+  data_export_dir: /data/arvados-workbench-download/data
+
+  # API server configuration
+  arvados_login_base: ~
+  arvados_v1_base: ~
+  arvados_insecure_https: ~
+
+  site_name: Arvados Workbench
+
+test:
+  cache_classes: true
+  eager_load: false
+  serve_static_assets: true
+  static_cache_control: public, max-age=3600
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_dispatch.show_exceptions: false
+  action_controller.allow_forgery_protection: false
+  action_mailer.delivery_method: :test
+  active_support.deprecation: :stderr
+  profiling_enabled: false
+  secret_token: <%= rand(2**256).to_s(36) %>
+  secret_key_base: <%= rand(2**256).to_s(36) %>
+
+  # When you run the Workbench's integration tests, it starts the API
+  # server as a dependency.  These settings should match the API
+  # server's Rails defaults.  If you adjust those, change these
+  # settings in application.yml to match.
+  arvados_login_base: https://localhost:3000/login
+  arvados_v1_base: https://localhost:3000/arvados/v1
+  arvados_insecure_https: true
+
+  site_name: Workbench:test
+
+  # Enable user profile with one required field
+  user_profile_form_fields:
+    - key: organization
+      type: text
+      form_field_title: Institution
+      form_field_description: Your organization
+      required: true
+    - key: role
+      type: select
+      form_field_title: Your role
+      form_field_description: Choose the category that best describes your role in your organization.
+      options:
+        - Bio-informatician
+        - Computational biologist
+        - Biologist or geneticist
+        - Software developer
+        - IT
+        - Other
+
+common:
+  assets.js_compressor: false
+  assets.css_compressor: false
+  data_import_dir: /tmp/arvados-workbench-upload
+  data_export_dir: /tmp/arvados-workbench-download
+  arvados_login_base: https://arvados.local/login
+  arvados_v1_base: https://arvados.local/arvados/v1
+  arvados_insecure_https: true
+  activation_contact_link: mailto:info@arvados.org
+  arvados_docsite: http://doc.arvados.org
+  arvados_theme: default
+  show_user_agreement_inline: false
+  secret_token: ~
+  secret_key_base: false
+  default_openid_prefix: https://www.google.com/accounts/o8/id
+  send_user_setup_notification_email: true
+
+  # Set user_profile_form_fields to enable and configure the user profile page.
+  # Default is set to false. A commented setting with full description is provided below.
+  user_profile_form_fields: false
+
+  # Below is a sample setting of user_profile_form_fields config parameter.
+  # This configuration parameter should be set to either false (to disable) or
+  # to an array as shown below. 
+  # Configure the list of input fields to be displayed in the profile page
+  # using the attribute "key" for each of the input fields.
+  # This sample shows configuration with one required and one optional form fields.
+  # For each of these input fields:
+  #   You can specify "type" as "text" or "select".
+  #   List the "options" to be displayed for each of the "select" menu.
+  #   Set "required" as "true" for any of these fields to make them required.
+  # If any of the required fields are missing in the user's profile, the user will be
+  # redirected to the profile page before they can access any Workbench features.
+  #user_profile_form_fields:
+  #  - key: organization
+  #    type: text
+  #    form_field_title: Institution/Company
+  #    form_field_description: Your organization
+  #    required: true
+  #  - key: role
+  #    type: select
+  #    form_field_title: Your role
+  #    form_field_description: Choose the category that best describes your role in your organization.
+  #    options:
+  #      - Bio-informatician
+  #      - Computational biologist
+  #      - Biologist or geneticist
+  #      - Software developer
+  #      - IT
+  #      - Other
+
+  # Use "user_profile_form_message" to configure the message you want to display on
+  # the profile page.
+  user_profile_form_message: Welcome to Arvados. All <span style="color:red">required fields</span> must be completed before you can proceed.
+
+  # source_version
+  source_version: "<%= `git log -n 1 --format=%h`.strip %>"
+  local_modified: false
+
+  # report notification to and from addresses
+  issue_reporter_email_from: arvados@example.com
+  issue_reporter_email_to: arvados@example.com
+  support_email_address: arvados@example.com
+
+  # filename suffixes for which view icon would be shown in collection show page
+  filename_suffixes_with_view_icon: [txt, gif, jpeg, jpg, png, html, htm, pdf]
+
+  # the maximum number of bytes to load in the log viewer
+  log_viewer_max_bytes: 1000000
diff --git a/apps/workbench/config/application.rb b/apps/workbench/config/application.rb
new file mode 100644 (file)
index 0000000..4ac6819
--- /dev/null
@@ -0,0 +1,53 @@
+require File.expand_path('../boot', __FILE__)
+
+require 'rails/all'
+
+Bundler.require(:default, Rails.env)
+
+module ArvadosWorkbench
+  class Application < Rails::Application
+    # Settings in config/environments/* take precedence over those specified here.
+    # Application configuration should go into files in config/initializers
+    # -- all .rb files in that directory are automatically loaded.
+
+    # Custom directories with classes and modules you want to be autoloadable.
+    # config.autoload_paths += %W(#{config.root}/extras)
+
+    # Only load the plugins named here, in the order given (default is alphabetical).
+    # :all can be used as a placeholder for all plugins not explicitly named.
+    # config.plugins = [ :exception_notification, :ssl_requirement, :all ]
+
+    # Activate observers that should always be running.
+    # config.active_record.observers = :cacher, :garbage_collector, :forum_observer
+
+    # Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
+    # Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
+    # config.time_zone = 'Central Time (US & Canada)'
+
+    # The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
+    # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
+    # config.i18n.default_locale = :de
+
+    # Configure the default encoding used in templates for Ruby 1.9.
+    config.encoding = "utf-8"
+
+    # Configure sensitive parameters which will be filtered from the log file.
+    config.filter_parameters += [:password]
+
+    # Enable escaping HTML in JSON.
+    config.active_support.escape_html_entities_in_json = true
+
+    # Use SQL instead of Active Record's schema dumper when creating the database.
+    # This is necessary if your schema can't be completely dumped by the schema dumper,
+    # like if you have constraints or database-specific column types
+    # config.active_record.schema_format = :sql
+
+    # Enable the asset pipeline
+    config.assets.enabled = true
+
+    # Version of your assets, change this if you want to expire all your assets
+    config.assets.version = '1.0'
+  end
+end
+
+require File.expand_path('../load_config', __FILE__)
diff --git a/apps/workbench/config/application.yml.example b/apps/workbench/config/application.yml.example
new file mode 100644 (file)
index 0000000..bd19dd5
--- /dev/null
@@ -0,0 +1,29 @@
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+development:
+  # At minimum, you need a nice long randomly generated secret_token here.
+  secret_token: ~
+
+  # You probably also want to point to your API server.
+  arvados_login_base: https://arvados.local:3030/login
+  arvados_v1_base: https://arvados.local:3030/arvados/v1
+  arvados_insecure_https: true
+
+production:
+  # At minimum, you need a nice long randomly generated secret_token here.
+  secret_token: ~
+
+  # You probably also want to point to your API server.
+  arvados_login_base: https://arvados.local:3030/login
+  arvados_v1_base: https://arvados.local:3030/arvados/v1
+  arvados_insecure_https: false
diff --git a/apps/workbench/config/boot.rb b/apps/workbench/config/boot.rb
new file mode 100644 (file)
index 0000000..13513bd
--- /dev/null
@@ -0,0 +1,16 @@
+require 'rubygems'
+
+# Set up gems listed in the Gemfile.
+ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
+
+require 'bundler/setup' if File.exists?(ENV['BUNDLE_GEMFILE'])
+
+# Use ARVADOS_API_TOKEN environment variable (if set) in console
+require 'rails'
+module ArvadosApiClientConsoleMode
+  class Railtie < Rails::Railtie
+    console do
+      Thread.current[:arvados_api_token] ||= ENV['ARVADOS_API_TOKEN']
+    end
+  end
+end
diff --git a/apps/workbench/config/database.yml b/apps/workbench/config/database.yml
new file mode 100644 (file)
index 0000000..34a3224
--- /dev/null
@@ -0,0 +1,39 @@
+# SQLite version 3.x
+#   gem install sqlite3
+#
+#   Ensure the SQLite 3 gem is defined in your Gemfile
+#   gem 'sqlite3'
+development:
+  adapter: sqlite3
+  database: db/development.sqlite3
+  pool: 5
+  timeout: 5000
+
+# Warning: The database defined as "test" will be erased and
+# re-generated from your development database when you run "rake".
+# Do not set this db to the same as development or production.
+test:
+  adapter: sqlite3
+  database: db/test.sqlite3
+  pool: 5
+  timeout: 5000
+
+production:
+  adapter: sqlite3
+  database: db/production.sqlite3
+  pool: 5
+  timeout: 5000
+
+# Note: The "diagnostics" database configuration is not actually used.
+diagnostics:
+  adapter: sqlite3
+  database: db/diagnostics.sqlite3
+  pool: 5
+  timeout: 5000
+
+# Note: The "performance" database configuration is not actually used.
+performance:
+  adapter: sqlite3
+  database: db/diagnostics.sqlite3
+  pool: 5
+  timeout: 5000
diff --git a/apps/workbench/config/environment.rb b/apps/workbench/config/environment.rb
new file mode 100644 (file)
index 0000000..c6d8386
--- /dev/null
@@ -0,0 +1,5 @@
+# Load the rails application
+require File.expand_path('../application', __FILE__)
+
+# Initialize the rails application
+ArvadosWorkbench::Application.initialize!
diff --git a/apps/workbench/config/environments/development.rb.example b/apps/workbench/config/environments/development.rb.example
new file mode 100644 (file)
index 0000000..3ea9ec2
--- /dev/null
@@ -0,0 +1,28 @@
+ArvadosWorkbench::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # In the development environment your application's code is reloaded on
+  # every request. This slows down response time but is perfect for development
+  # since you don't have to restart the web server when you make code changes.
+  config.cache_classes = false
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Don't care if the mailer can't send
+  config.action_mailer.raise_delivery_errors = false
+
+  # Print deprecation notices to the Rails logger
+  config.active_support.deprecation = :log
+
+  # Only use best-standards-support built into browsers
+  config.action_dispatch.best_standards_support = :builtin
+
+  # Do not compress assets
+  config.assets.js_compressor = false
+
+  # Expands the lines which load the assets
+  config.assets.debug = true
+
+end
diff --git a/apps/workbench/config/environments/production.rb.example b/apps/workbench/config/environments/production.rb.example
new file mode 100644 (file)
index 0000000..492b118
--- /dev/null
@@ -0,0 +1,67 @@
+ArvadosWorkbench::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # Code is not reloaded between requests
+  config.cache_classes = true
+
+  # Full error reports are disabled and caching is turned on
+  config.consider_all_requests_local       = false
+  config.action_controller.perform_caching = true
+
+  # Disable Rails's static asset server (Apache or nginx will already do this)
+  config.serve_static_assets = false
+
+  # Compress JavaScripts and CSS
+  config.assets.js_compressor = :uglifier
+
+  # Don't fallback to assets pipeline if a precompiled asset is missed
+  config.assets.compile = false
+
+  # Generate digests for assets URLs
+  config.assets.digest = true
+
+  # Defaults to nil and saved in location specified by config.assets.prefix
+  # config.assets.manifest = YOUR_PATH
+
+  # Specifies the header that your server uses for sending files
+  # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
+  # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx
+
+  # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
+  # config.force_ssl = true
+
+  # See everything in the log (default is :info)
+  # config.log_level = :debug
+
+  # Prepend all log lines with the following tags
+  # config.log_tags = [ :subdomain, :uuid ]
+
+  # Use a different logger for distributed setups
+  # config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new)
+
+  # Use a different cache store in production
+  # config.cache_store = :mem_cache_store
+
+  # Enable serving of images, stylesheets, and JavaScripts from an asset server
+  # config.action_controller.asset_host = "http://assets.example.com"
+
+  # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)
+  # config.assets.precompile += %w( search.js )
+
+  # Disable delivery errors, bad email addresses will be ignored
+  # config.action_mailer.raise_delivery_errors = false
+
+  # Enable threaded mode
+  # config.threadsafe!
+
+  # Enable locale fallbacks for I18n (makes lookups for any locale fall back to
+  # the I18n.default_locale when a translation can not be found)
+  config.i18n.fallbacks = true
+
+  # Send deprecation notices to registered listeners
+  config.active_support.deprecation = :notify
+
+  # Log timing data for API transactions
+  config.profiling_enabled = false
+
+end
diff --git a/apps/workbench/config/environments/test.rb b/apps/workbench/config/environments/test.rb
new file mode 120000 (symlink)
index 0000000..f1e9dbf
--- /dev/null
@@ -0,0 +1 @@
+test.rb.example
\ No newline at end of file
diff --git a/apps/workbench/config/environments/test.rb.example b/apps/workbench/config/environments/test.rb.example
new file mode 100644 (file)
index 0000000..fd034d3
--- /dev/null
@@ -0,0 +1,35 @@
+ArvadosWorkbench::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # The test environment is used exclusively to run your application's
+  # test suite. You never need to work with it otherwise. Remember that
+  # your test database is "scratch space" for the test suite and is wiped
+  # and recreated between test runs. Don't rely on the data there!
+  config.cache_classes = true
+
+  # Configure static asset server for tests with Cache-Control for performance
+  config.serve_static_assets = true
+  config.static_cache_control = "public, max-age=3600"
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Raise exceptions instead of rendering exception templates
+  config.action_dispatch.show_exceptions = false
+
+  # Disable request forgery protection in test environment
+  config.action_controller.allow_forgery_protection    = false
+
+  # Tell Action Mailer not to deliver emails to the real world.
+  # The :test delivery method accumulates sent emails in the
+  # ActionMailer::Base.deliveries array.
+  config.action_mailer.delivery_method = :test
+
+  # Print deprecation notices to the stderr
+  config.active_support.deprecation = :stderr
+
+  # Log timing data for API transactions
+  config.profiling_enabled = false
+
+end
diff --git a/apps/workbench/config/initializers/backtrace_silencers.rb b/apps/workbench/config/initializers/backtrace_silencers.rb
new file mode 100644 (file)
index 0000000..59385cd
--- /dev/null
@@ -0,0 +1,7 @@
+# Be sure to restart your server when you modify this file.
+
+# You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces.
+# Rails.backtrace_cleaner.add_silencer { |line| line =~ /my_noisy_library/ }
+
+# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code.
+# Rails.backtrace_cleaner.remove_silencers!
diff --git a/apps/workbench/config/initializers/inflections.rb b/apps/workbench/config/initializers/inflections.rb
new file mode 100644 (file)
index 0000000..8f74496
--- /dev/null
@@ -0,0 +1,22 @@
+# Be sure to restart your server when you modify this file.
+
+# Add new inflection rules using the following format
+# (all these examples are active by default):
+# ActiveSupport::Inflector.inflections do |inflect|
+#   inflect.plural /^(ox)$/i, '\1en'
+#   inflect.singular /^(ox)en/i, '\1'
+#   inflect.irregular 'person', 'people'
+#   inflect.uncountable %w( fish sheep )
+# end
+#
+# These inflection rules are supported but not enabled by default:
+# ActiveSupport::Inflector.inflections do |inflect|
+#   inflect.acronym 'RESTful'
+# end
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.plural /^([Ss]pecimen)$/i, '\1s'
+  inflect.singular /^([Ss]pecimen)s?/i, '\1'
+  inflect.plural /^([Hh]uman)$/i, '\1s'
+  inflect.singular /^([Hh]uman)s?/i, '\1'
+end
diff --git a/apps/workbench/config/initializers/mime_types.rb b/apps/workbench/config/initializers/mime_types.rb
new file mode 100644 (file)
index 0000000..72aca7e
--- /dev/null
@@ -0,0 +1,5 @@
+# Be sure to restart your server when you modify this file.
+
+# Add new mime types for use in respond_to blocks:
+# Mime::Type.register "text/richtext", :rtf
+# Mime::Type.register_alias "text/html", :iphone
diff --git a/apps/workbench/config/initializers/redcloth.rb b/apps/workbench/config/initializers/redcloth.rb
new file mode 100644 (file)
index 0000000..8a02913
--- /dev/null
@@ -0,0 +1,23 @@
+module RedClothArvadosLinkExtension
+
+  class RedClothViewBase < ActionView::Base
+    include ApplicationHelper
+    include ActionView::Helpers::UrlHelper
+    include Rails.application.routes.url_helpers
+
+    def helper_link_to_if_arvados_object(link, opts)
+      link_to_if_arvados_object(link, opts)
+    end
+  end
+
+  def refs_arvados(text)
+    text.gsub!(/"(?!\s)([^"]*\S)":(\S+)/) do
+      text, link = $~[1..2]
+      arvados_link = RedClothViewBase.new.helper_link_to_if_arvados_object(link, { :link_text => text })
+      # if it's not an arvados_link the helper will return the link unprocessed and so we will reconstruct the textile link string so it can be processed normally
+      (arvados_link == link) ? "\"#{text}\":#{link}" : arvados_link
+    end
+  end
+end
+
+RedCloth.send(:include, RedClothArvadosLinkExtension)
diff --git a/apps/workbench/config/initializers/secret_token.rb.example b/apps/workbench/config/initializers/secret_token.rb.example
new file mode 100644 (file)
index 0000000..fb1c1c1
--- /dev/null
@@ -0,0 +1,7 @@
+# Be sure to restart your server when you modify this file.
+
+# Your secret key for verifying the integrity of signed cookies.
+# If you change this key, all old signed cookies will become invalid!
+# Make sure the secret is at least 30 characters and all random,
+# no regular words or you'll be exposed to dictionary attacks.
+ArvadosWorkbench::Application.config.secret_token ||= rand(2**256).to_s(36)
diff --git a/apps/workbench/config/initializers/session_store.rb b/apps/workbench/config/initializers/session_store.rb
new file mode 100644 (file)
index 0000000..2d49dca
--- /dev/null
@@ -0,0 +1,8 @@
+# Be sure to restart your server when you modify this file.
+
+ArvadosWorkbench::Application.config.session_store :cookie_store, key: '_arvados_workbench_session'
+
+# Use the database for sessions instead of the cookie-based default,
+# which shouldn't be used to store highly confidential information
+# (create the session table with "rails generate session_migration")
+# ArvadosWorkbench::Application.config.session_store :active_record_store
diff --git a/apps/workbench/config/initializers/wrap_parameters.rb b/apps/workbench/config/initializers/wrap_parameters.rb
new file mode 100644 (file)
index 0000000..999df20
--- /dev/null
@@ -0,0 +1,14 @@
+# Be sure to restart your server when you modify this file.
+#
+# This file contains settings for ActionController::ParamsWrapper which
+# is enabled by default.
+
+# Enable parameter wrapping for JSON. You can disable this by setting :format to an empty array.
+ActiveSupport.on_load(:action_controller) do
+  wrap_parameters format: [:json]
+end
+
+# Disable root element in JSON by default.
+ActiveSupport.on_load(:active_record) do
+  self.include_root_in_json = false
+end
diff --git a/apps/workbench/config/load_config.rb b/apps/workbench/config/load_config.rb
new file mode 100644 (file)
index 0000000..51fc81a
--- /dev/null
@@ -0,0 +1,49 @@
+# This file must be loaded _after_ secret_token.rb if secret_token is
+# defined there instead of in config/application.yml.
+
+$application_config = {}
+
+%w(application.default application).each do |cfgfile|
+  path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
+  if File.exists? path
+    yaml = ERB.new(IO.read path).result(binding)
+    confs = YAML.load(yaml)
+    $application_config.merge!(confs['common'] || {})
+    $application_config.merge!(confs[::Rails.env.to_s] || {})
+  end
+end
+
+ArvadosWorkbench::Application.configure do
+  nils = []
+  $application_config.each do |k, v|
+    # "foo.bar: baz" --> { config.foo.bar = baz }
+    cfg = config
+    ks = k.split '.'
+    k = ks.pop
+    ks.each do |kk|
+      cfg = cfg.send(kk)
+    end
+    if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
+      # Config must have been set already in environments/*.rb.
+      #
+      # After config files have been migrated, this mechanism should
+      # be deprecated, then removed.
+    elsif v.nil?
+      # Config variables are not allowed to be nil. Make a "naughty"
+      # list, and present it below.
+      nils << k
+    else
+      cfg.send "#{k}=", v
+    end
+  end
+  if !nils.empty?
+    raise <<EOS
+Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
+
+The following configuration settings must be specified in
+config/application.yml:
+* #{nils.join "\n* "}
+
+EOS
+  end
+end
diff --git a/apps/workbench/config/locales/en.bootstrap.yml b/apps/workbench/config/locales/en.bootstrap.yml
new file mode 100644 (file)
index 0000000..664de2b
--- /dev/null
@@ -0,0 +1,18 @@
+# Sample localization file for English. Add more files in this directory for other locales.
+# See https://github.com/svenfuchs/rails-i18n/tree/master/rails%2Flocale for starting points.
+
+en:
+  helpers:
+    actions: "Actions"
+    links:
+      back: "Back"
+      cancel: "Cancel"
+      confirm: "Are you sure?"
+      destroy: "Delete"
+      new: "New"
+      edit: "Edit"
+    titles:
+      edit: "Edit"
+      save: "Save"
+      new: "New"
+      delete: "Delete"
diff --git a/apps/workbench/config/locales/en.yml b/apps/workbench/config/locales/en.yml
new file mode 100644 (file)
index 0000000..179c14c
--- /dev/null
@@ -0,0 +1,5 @@
+# Sample localization file for English. Add more files in this directory for other locales.
+# See https://github.com/svenfuchs/rails-i18n/tree/master/rails%2Flocale for starting points.
+
+en:
+  hello: "Hello world"
diff --git a/apps/workbench/config/piwik.yml.example b/apps/workbench/config/piwik.yml.example
new file mode 100644 (file)
index 0000000..f70e1f1
--- /dev/null
@@ -0,0 +1,33 @@
+# Configuration:
+# 
+# disabled
+#   false if tracking tag should be shown
+# use_async
+#   Set to true if you want to use asynchronous tracking
+# url
+#   The url of your piwik instance (e.g. localhost/piwik/
+# id_site
+#   The id of your website inside Piwik
+#
+production:
+  piwik:
+    id_site: 1
+    url: localhost
+    use_async: false
+    disabled: false
+
+development:
+  piwik:
+    id_site: 1
+    url: localhost
+    disabled: true
+    use_async: false
+    hostname: localhost
+
+test:
+  piwik:
+    id_site: 1
+    url: localhost
+    disabled: true
+    use_async: false
+    hostname: localhost
diff --git a/apps/workbench/config/routes.rb b/apps/workbench/config/routes.rb
new file mode 100644 (file)
index 0000000..86cdc38
--- /dev/null
@@ -0,0 +1,92 @@
+ArvadosWorkbench::Application.routes.draw do
+  themes_for_rails
+
+  resources :keep_disks
+  resources :keep_services
+  resources :user_agreements do
+    post 'sign', on: :collection
+    get 'signatures', on: :collection
+  end
+  get '/user_agreements/signatures' => 'user_agreements#signatures'
+  get "users/setup_popup" => 'users#setup_popup', :as => :setup_user_popup
+  get "users/setup" => 'users#setup', :as => :setup_user
+  get "report_issue_popup" => 'actions#report_issue_popup', :as => :report_issue_popup
+  post "report_issue" => 'actions#report_issue', :as => :report_issue
+  resources :nodes
+  resources :humans
+  resources :traits
+  resources :api_client_authorizations
+  resources :repositories
+  resources :virtual_machines
+  resources :authorized_keys
+  resources :job_tasks
+  resources :jobs do
+    post 'cancel', :on => :member
+    get 'logs', :on => :member
+  end
+  match '/logout' => 'sessions#destroy', via: [:get, :post]
+  get '/logged_out' => 'sessions#index'
+  resources :users do
+    get 'choose', :on => :collection
+    get 'home', :on => :member
+    get 'welcome', :on => :collection
+    get 'inactive', :on => :collection
+    get 'activity', :on => :collection
+    get 'storage', :on => :collection
+    post 'sudo', :on => :member
+    post 'unsetup', :on => :member
+    get 'setup_popup', :on => :member
+    get 'profile', :on => :member
+  end
+  get '/manage_account' => 'users#manage_account'
+  get "/add_ssh_key_popup" => 'users#add_ssh_key_popup', :as => :add_ssh_key_popup
+  get "/add_ssh_key" => 'users#add_ssh_key', :as => :add_ssh_key
+  resources :logs
+  resources :factory_jobs
+  resources :uploaded_datasets
+  resources :groups do
+    get 'choose', on: :collection
+  end
+  resources :specimens
+  resources :pipeline_templates do
+    get 'choose', on: :collection
+  end
+  resources :pipeline_instances do
+    get 'compare', on: :collection
+    post 'copy', on: :member
+  end
+  resources :links
+  get '/collections/graph' => 'collections#graph'
+  resources :collections do
+    post 'set_persistent', on: :member
+    get 'sharing_popup', :on => :member
+    post 'share', :on => :member
+    post 'unshare', :on => :member
+    get 'choose', on: :collection
+  end
+  get('/collections/download/:uuid/:reader_token/*file' => 'collections#show_file',
+      format: false)
+  get '/collections/download/:uuid/:reader_token' => 'collections#show_file_links'
+  get '/collections/:uuid/*file' => 'collections#show_file', :format => false
+  resources :projects do
+    match 'remove/:item_uuid', on: :member, via: :delete, action: :remove_item
+    match 'remove_items', on: :member, via: :delete, action: :remove_items
+    get 'choose', on: :collection
+    post 'share_with', on: :member
+    get 'tab_counts', on: :member
+  end
+  resources :search do
+    get 'choose', :on => :collection
+  end
+
+  post 'actions' => 'actions#post'
+  get 'actions' => 'actions#show'
+  get 'websockets' => 'websocket#index'
+  post "combine_selected" => 'actions#combine_selected_files_into_collection'
+
+  root :to => 'projects#index'
+
+  # Send unroutable requests to an arbitrary controller
+  # (ends up at ApplicationController#render_not_found)
+  match '*a', to: 'links#render_not_found', via: [:get, :post]
+end
diff --git a/apps/workbench/db/schema.rb b/apps/workbench/db/schema.rb
new file mode 100644 (file)
index 0000000..b5e6a79
--- /dev/null
@@ -0,0 +1,16 @@
+# encoding: UTF-8
+# This file is auto-generated from the current state of the database. Instead
+# of editing this file, please use the migrations feature of Active Record to
+# incrementally modify your database, and then regenerate this schema definition.
+#
+# Note that this schema.rb definition is the authoritative source for your
+# database schema. If you need to create the application database on another
+# system, you should be using db:schema:load, not running all the migrations
+# from scratch. The latter is a flawed and unsustainable approach (the more migrations
+# you'll amass, the slower it'll run and the greater likelihood for issues).
+#
+# It's strongly recommended to check this file into your version control system.
+
+ActiveRecord::Schema.define(:version => 0) do
+
+end
diff --git a/apps/workbench/db/seeds.rb b/apps/workbench/db/seeds.rb
new file mode 100644 (file)
index 0000000..4edb1e8
--- /dev/null
@@ -0,0 +1,7 @@
+# This file should contain all the record creation needed to seed the database with its default values.
+# The data can then be loaded with the rake db:seed (or created alongside the db with db:setup).
+#
+# Examples:
+#
+#   cities = City.create([{ name: 'Chicago' }, { name: 'Copenhagen' }])
+#   Mayor.create(name: 'Emanuel', city: cities.first)
diff --git a/apps/workbench/lib/assets/.gitkeep b/apps/workbench/lib/assets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/lib/tasks/.gitkeep b/apps/workbench/lib/tasks/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/lib/tasks/config_check.rake b/apps/workbench/lib/tasks/config_check.rake
new file mode 100644 (file)
index 0000000..ec1ae7b
--- /dev/null
@@ -0,0 +1,19 @@
+namespace :config do
+  desc 'Ensure site configuration has all required settings'
+  task check: :environment do
+    $application_config.sort.each do |k, v|
+      if ENV.has_key?('QUIET') then
+        # Make sure we still check for the variable to exist
+        eval("Rails.configuration.#{k}")
+      else
+        if /(password|secret)/.match(k) then
+          # Make sure we still check for the variable to exist, but don't print the value
+          eval("Rails.configuration.#{k}")
+          $stderr.puts "%-32s %s" % [k, '*********']
+        else
+          $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")]
+        end
+      end
+    end
+  end
+end
diff --git a/apps/workbench/log/.gitkeep b/apps/workbench/log/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/public/404.html b/apps/workbench/public/404.html
new file mode 100644 (file)
index 0000000..9a48320
--- /dev/null
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The page you were looking for doesn't exist (404)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/404.html -->
+  <div class="dialog">
+    <h1>The page you were looking for doesn't exist.</h1>
+    <p>You may have mistyped the address or the page may have moved.</p>
+  </div>
+</body>
+</html>
diff --git a/apps/workbench/public/422.html b/apps/workbench/public/422.html
new file mode 100644 (file)
index 0000000..83660ab
--- /dev/null
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The change you wanted was rejected (422)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/422.html -->
+  <div class="dialog">
+    <h1>The change you wanted was rejected.</h1>
+    <p>Maybe you tried to change something you didn't have access to.</p>
+  </div>
+</body>
+</html>
diff --git a/apps/workbench/public/500.html b/apps/workbench/public/500.html
new file mode 100644 (file)
index 0000000..f3648a0
--- /dev/null
@@ -0,0 +1,25 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>We're sorry, but something went wrong (500)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/500.html -->
+  <div class="dialog">
+    <h1>We're sorry, but something went wrong.</h1>
+  </div>
+</body>
+</html>
diff --git a/apps/workbench/public/d3.v3.min.js b/apps/workbench/public/d3.v3.min.js
new file mode 100644 (file)
index 0000000..cba27c9
--- /dev/null
@@ -0,0 +1,4 @@
+(function(){function t(t){return t.target}function n(t){return t.source}function e(t,n){try{for(var e in n)Object.defineProperty(t.prototype,e,{value:n[e],enumerable:!1})}catch(r){t.prototype=n}}function r(t){for(var n=-1,e=t.length,r=[];e>++n;)r.push(t[n]);return r}function i(t){return Array.prototype.slice.call(t)}function u(){}function a(t){return t}function o(){return!0}function c(t){return"function"==typeof t?t:function(){return t}}function l(t,n,e){return function(){var r=e.apply(n,arguments);return arguments.length?t:r}}function s(t){return null!=t&&!isNaN(t)}function f(t){return t.length}function h(t){return t.trim().replace(/\s+/g," ")}function d(t){for(var n=1;t*n%1;)n*=10;return n}function g(t){return 1===t.length?function(n,e){t(null==n?e:null)}:t}function p(t){return t.responseText}function m(t){return JSON.parse(t.responseText)}function v(t){var n=document.createRange();return n.selectNode(document.body),n.createContextualFragment(t.responseText)}function y(t){return t.responseXML}function M(){}function b(t){function n(){for(var n,r=e,i=-1,u=r.length;u>++i;)(n=r[i].on)&&n.apply(this,arguments);return t}var e=[],r=new u;return n.on=function(n,i){var u,a=r.get(n);return 2>arguments.length?a&&a.on:(a&&(a.on=null,e=e.slice(0,u=e.indexOf(a)).concat(e.slice(u+1)),r.remove(n)),i&&e.push(r.set(n,{on:i})),t)},n}function x(t,n){return n-(t?1+Math.floor(Math.log(t+Math.pow(10,1+Math.floor(Math.log(t)/Math.LN10)-n))/Math.LN10):1)}function _(t){return t+""}function w(t,n){var e=Math.pow(10,3*Math.abs(8-n));return{scale:n>8?function(t){return t/e}:function(t){return t*e},symbol:t}}function S(t){return function(n){return 0>=n?0:n>=1?1:t(n)}}function k(t){return function(n){return 1-t(1-n)}}function E(t){return function(n){return.5*(.5>n?t(2*n):2-t(2-2*n))}}function A(t){return t*t}function N(t){return t*t*t}function T(t){if(0>=t)return 0;if(t>=1)return 1;var n=t*t,e=n*t;return 4*(.5>t?e:3*(t-n)+e-.75)}function q(t){return function(n){return Math.pow(n,t)}}function C(t){return 1-Math.cos(t*Ru/2)}function z(t){return Math.pow(2,10*(t-1))}function D(t){return 1-Math.sqrt(1-t*t)}function L(t,n){var e;return 2>arguments.length&&(n=.45),arguments.length?e=n/(2*Ru)*Math.asin(1/t):(t=1,e=n/4),function(r){return 1+t*Math.pow(2,10*-r)*Math.sin(2*(r-e)*Ru/n)}}function F(t){return t||(t=1.70158),function(n){return n*n*((t+1)*n-t)}}function H(t){return 1/2.75>t?7.5625*t*t:2/2.75>t?7.5625*(t-=1.5/2.75)*t+.75:2.5/2.75>t?7.5625*(t-=2.25/2.75)*t+.9375:7.5625*(t-=2.625/2.75)*t+.984375}function R(){d3.event.stopPropagation(),d3.event.preventDefault()}function P(){for(var t,n=d3.event;t=n.sourceEvent;)n=t;return n}function j(t){for(var n=new M,e=0,r=arguments.length;r>++e;)n[arguments[e]]=b(n);return n.of=function(e,r){return function(i){try{var u=i.sourceEvent=d3.event;i.target=t,d3.event=i,n[i.type].apply(e,r)}finally{d3.event=u}}},n}function O(t){var n=[t.a,t.b],e=[t.c,t.d],r=U(n),i=Y(n,e),u=U(I(e,n,-i))||0;n[0]*e[1]<e[0]*n[1]&&(n[0]*=-1,n[1]*=-1,r*=-1,i*=-1),this.rotate=(r?Math.atan2(n[1],n[0]):Math.atan2(-e[0],e[1]))*Ou,this.translate=[t.e,t.f],this.scale=[r,u],this.skew=u?Math.atan2(i,u)*Ou:0}function Y(t,n){return t[0]*n[0]+t[1]*n[1]}function U(t){var n=Math.sqrt(Y(t,t));return n&&(t[0]/=n,t[1]/=n),n}function I(t,n,e){return t[0]+=e*n[0],t[1]+=e*n[1],t}function V(t){return"transform"==t?d3.interpolateTransform:d3.interpolate}function X(t,n){return n=n-(t=+t)?1/(n-t):0,function(e){return(e-t)*n}}function Z(t,n){return n=n-(t=+t)?1/(n-t):0,function(e){return Math.max(0,Math.min(1,(e-t)*n))}}function B(){}function $(t,n,e){return new J(t,n,e)}function J(t,n,e){this.r=t,this.g=n,this.b=e}function G(t){return 16>t?"0"+Math.max(0,t).toString(16):Math.min(255,t).toString(16)}function K(t,n,e){var r,i,u,a=0,o=0,c=0;if(r=/([a-z]+)\((.*)\)/i.exec(t))switch(i=r[2].split(","),r[1]){case"hsl":return e(parseFloat(i[0]),parseFloat(i[1])/100,parseFloat(i[2])/100);case"rgb":return n(nn(i[0]),nn(i[1]),nn(i[2]))}return(u=aa.get(t))?n(u.r,u.g,u.b):(null!=t&&"#"===t.charAt(0)&&(4===t.length?(a=t.charAt(1),a+=a,o=t.charAt(2),o+=o,c=t.charAt(3),c+=c):7===t.length&&(a=t.substring(1,3),o=t.substring(3,5),c=t.substring(5,7)),a=parseInt(a,16),o=parseInt(o,16),c=parseInt(c,16)),n(a,o,c))}function W(t,n,e){var r,i,u=Math.min(t/=255,n/=255,e/=255),a=Math.max(t,n,e),o=a-u,c=(a+u)/2;return o?(i=.5>c?o/(a+u):o/(2-a-u),r=t==a?(n-e)/o+(e>n?6:0):n==a?(e-t)/o+2:(t-n)/o+4,r*=60):i=r=0,en(r,i,c)}function Q(t,n,e){t=tn(t),n=tn(n),e=tn(e);var r=gn((.4124564*t+.3575761*n+.1804375*e)/sa),i=gn((.2126729*t+.7151522*n+.072175*e)/fa),u=gn((.0193339*t+.119192*n+.9503041*e)/ha);return ln(116*i-16,500*(r-i),200*(i-u))}function tn(t){return.04045>=(t/=255)?t/12.92:Math.pow((t+.055)/1.055,2.4)}function nn(t){var n=parseFloat(t);return"%"===t.charAt(t.length-1)?Math.round(2.55*n):n}function en(t,n,e){return new rn(t,n,e)}function rn(t,n,e){this.h=t,this.s=n,this.l=e}function un(t,n,e){function r(t){return t>360?t-=360:0>t&&(t+=360),60>t?u+(a-u)*t/60:180>t?a:240>t?u+(a-u)*(240-t)/60:u}function i(t){return Math.round(255*r(t))}var u,a;return t%=360,0>t&&(t+=360),n=0>n?0:n>1?1:n,e=0>e?0:e>1?1:e,a=.5>=e?e*(1+n):e+n-e*n,u=2*e-a,$(i(t+120),i(t),i(t-120))}function an(t,n,e){return new on(t,n,e)}function on(t,n,e){this.h=t,this.c=n,this.l=e}function cn(t,n,e){return ln(e,Math.cos(t*=ju)*n,Math.sin(t)*n)}function ln(t,n,e){return new sn(t,n,e)}function sn(t,n,e){this.l=t,this.a=n,this.b=e}function fn(t,n,e){var r=(t+16)/116,i=r+n/500,u=r-e/200;return i=dn(i)*sa,r=dn(r)*fa,u=dn(u)*ha,$(pn(3.2404542*i-1.5371385*r-.4985314*u),pn(-.969266*i+1.8760108*r+.041556*u),pn(.0556434*i-.2040259*r+1.0572252*u))}function hn(t,n,e){return an(180*(Math.atan2(e,n)/Ru),Math.sqrt(n*n+e*e),t)}function dn(t){return t>.206893034?t*t*t:(t-4/29)/7.787037}function gn(t){return t>.008856?Math.pow(t,1/3):7.787037*t+4/29}function pn(t){return Math.round(255*(.00304>=t?12.92*t:1.055*Math.pow(t,1/2.4)-.055))}function mn(t){return Iu(t,Ma),t}function vn(t){return function(){return ga(t,this)}}function yn(t){return function(){return pa(t,this)}}function Mn(t,n){function e(){this.removeAttribute(t)}function r(){this.removeAttributeNS(t.space,t.local)}function i(){this.setAttribute(t,n)}function u(){this.setAttributeNS(t.space,t.local,n)}function a(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}function o(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}return t=d3.ns.qualify(t),null==n?t.local?r:e:"function"==typeof n?t.local?o:a:t.local?u:i}function bn(t){return RegExp("(?:^|\\s+)"+d3.requote(t)+"(?:\\s+|$)","g")}function xn(t,n){function e(){for(var e=-1;i>++e;)t[e](this,n)}function r(){for(var e=-1,r=n.apply(this,arguments);i>++e;)t[e](this,r)}t=t.trim().split(/\s+/).map(_n);var i=t.length;return"function"==typeof n?r:e}function _n(t){var n=bn(t);return function(e,r){if(i=e.classList)return r?i.add(t):i.remove(t);var i=e.className,u=null!=i.baseVal,a=u?i.baseVal:i;r?(n.lastIndex=0,n.test(a)||(a=h(a+" "+t),u?i.baseVal=a:e.className=a)):a&&(a=h(a.replace(n," ")),u?i.baseVal=a:e.className=a)}}function wn(t,n,e){function r(){this.style.removeProperty(t)}function i(){this.style.setProperty(t,n,e)}function u(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}return null==n?r:"function"==typeof n?u:i}function Sn(t,n){function e(){delete this[t]}function r(){this[t]=n}function i(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}return null==n?e:"function"==typeof n?i:r}function kn(t){return{__data__:t}}function En(t){return function(){return ya(this,t)}}function An(t){return arguments.length||(t=d3.ascending),function(n,e){return t(n&&n.__data__,e&&e.__data__)}}function Nn(t,n,e){function r(){var n=this[u];n&&(this.removeEventListener(t,n,n.$),delete this[u])}function i(){function i(t){var e=d3.event;d3.event=t,o[0]=a.__data__;try{n.apply(a,o)}finally{d3.event=e}}var a=this,o=Yu(arguments);r.call(this),this.addEventListener(t,this[u]=i,i.$=e),i._=n}var u="__on"+t,a=t.indexOf(".");return a>0&&(t=t.substring(0,a)),n?i:r}function Tn(t,n){for(var e=0,r=t.length;r>e;e++)for(var i,u=t[e],a=0,o=u.length;o>a;a++)(i=u[a])&&n(i,a,e);return t}function qn(t){return Iu(t,xa),t}function Cn(t,n){return Iu(t,wa),t.id=n,t}function zn(t,n,e,r){var i=t.__transition__||(t.__transition__={active:0,count:0}),a=i[e];if(!a){var o=r.time;return a=i[e]={tween:new u,event:d3.dispatch("start","end"),time:o,ease:r.ease,delay:r.delay,duration:r.duration},++i.count,d3.timer(function(r){function u(r){return i.active>e?l():(i.active=e,h.start.call(t,s,n),a.tween.forEach(function(e,r){(r=r.call(t,s,n))&&p.push(r)}),c(r)||d3.timer(c,0,o),1)}function c(r){if(i.active!==e)return l();for(var u=(r-d)/g,a=f(u),o=p.length;o>0;)p[--o].call(t,a);return u>=1?(l(),h.end.call(t,s,n),1):void 0}function l(){return--i.count?delete i[e]:delete t.__transition__,1}var s=t.__data__,f=a.ease,h=a.event,d=a.delay,g=a.duration,p=[];return r>=d?u(r):d3.timer(u,d,o),1},0,o),a}}function Dn(t){return null==t&&(t=""),function(){this.textContent=t}}function Ln(t,n,e,r){var i=t.id;return Tn(t,"function"==typeof e?function(t,u,a){t.__transition__[i].tween.set(n,r(e.call(t,t.__data__,u,a)))}:(e=r(e),function(t){t.__transition__[i].tween.set(n,e)}))}function Fn(){for(var t,n=Date.now(),e=qa;e;)t=n-e.then,t>=e.delay&&(e.flush=e.callback(t)),e=e.next;var r=Hn()-n;r>24?(isFinite(r)&&(clearTimeout(Aa),Aa=setTimeout(Fn,r)),Ea=0):(Ea=1,Ca(Fn))}function Hn(){for(var t=null,n=qa,e=1/0;n;)n.flush?(delete Ta[n.callback.id],n=t?t.next=n.next:qa=n.next):(e=Math.min(e,n.then+n.delay),n=(t=n).next);return e}function Rn(t,n){var e=t.ownerSVGElement||t;if(e.createSVGPoint){var r=e.createSVGPoint();if(0>za&&(window.scrollX||window.scrollY)){e=d3.select(document.body).append("svg").style("position","absolute").style("top",0).style("left",0);var i=e[0][0].getScreenCTM();za=!(i.f||i.e),e.remove()}return za?(r.x=n.pageX,r.y=n.pageY):(r.x=n.clientX,r.y=n.clientY),r=r.matrixTransform(t.getScreenCTM().inverse()),[r.x,r.y]}var u=t.getBoundingClientRect();return[n.clientX-u.left-t.clientLeft,n.clientY-u.top-t.clientTop]}function Pn(){}function jn(t){var n=t[0],e=t[t.length-1];return e>n?[n,e]:[e,n]}function On(t){return t.rangeExtent?t.rangeExtent():jn(t.range())}function Yn(t,n){var e,r=0,i=t.length-1,u=t[r],a=t[i];return u>a&&(e=r,r=i,i=e,e=u,u=a,a=e),(n=n(a-u))&&(t[r]=n.floor(u),t[i]=n.ceil(a)),t}function Un(){return Math}function In(t,n,e,r){function i(){var i=Math.min(t.length,n.length)>2?Gn:Jn,c=r?Z:X;return a=i(t,n,c,e),o=i(n,t,c,d3.interpolate),u}function u(t){return a(t)}var a,o;return u.invert=function(t){return o(t)},u.domain=function(n){return arguments.length?(t=n.map(Number),i()):t},u.range=function(t){return arguments.length?(n=t,i()):n},u.rangeRound=function(t){return u.range(t).interpolate(d3.interpolateRound)},u.clamp=function(t){return arguments.length?(r=t,i()):r},u.interpolate=function(t){return arguments.length?(e=t,i()):e},u.ticks=function(n){return Bn(t,n)},u.tickFormat=function(n){return $n(t,n)},u.nice=function(){return Yn(t,Xn),i()},u.copy=function(){return In(t,n,e,r)},i()}function Vn(t,n){return d3.rebind(t,n,"range","rangeRound","interpolate","clamp")}function Xn(t){return t=Math.pow(10,Math.round(Math.log(t)/Math.LN10)-1),t&&{floor:function(n){return Math.floor(n/t)*t},ceil:function(n){return Math.ceil(n/t)*t}}}function Zn(t,n){var e=jn(t),r=e[1]-e[0],i=Math.pow(10,Math.floor(Math.log(r/n)/Math.LN10)),u=n/r*i;return.15>=u?i*=10:.35>=u?i*=5:.75>=u&&(i*=2),e[0]=Math.ceil(e[0]/i)*i,e[1]=Math.floor(e[1]/i)*i+.5*i,e[2]=i,e}function Bn(t,n){return d3.range.apply(d3,Zn(t,n))}function $n(t,n){return d3.format(",."+Math.max(0,-Math.floor(Math.log(Zn(t,n)[2])/Math.LN10+.01))+"f")}function Jn(t,n,e,r){var i=e(t[0],t[1]),u=r(n[0],n[1]);return function(t){return u(i(t))}}function Gn(t,n,e,r){var i=[],u=[],a=0,o=Math.min(t.length,n.length)-1;for(t[o]<t[0]&&(t=t.slice().reverse(),n=n.slice().reverse());o>=++a;)i.push(e(t[a-1],t[a])),u.push(r(n[a-1],n[a]));return function(n){var e=d3.bisect(t,n,1,o)-1;return u[e](i[e](n))}}function Kn(t,n){function e(e){return t(n(e))}var r=n.pow;return e.invert=function(n){return r(t.invert(n))},e.domain=function(i){return arguments.length?(n=0>i[0]?Qn:Wn,r=n.pow,t.domain(i.map(n)),e):t.domain().map(r)},e.nice=function(){return t.domain(Yn(t.domain(),Un)),e},e.ticks=function(){var e=jn(t.domain()),i=[];if(e.every(isFinite)){var u=Math.floor(e[0]),a=Math.ceil(e[1]),o=r(e[0]),c=r(e[1]);if(n===Qn)for(i.push(r(u));a>u++;)for(var l=9;l>0;l--)i.push(r(u)*l);else{for(;a>u;u++)for(var l=1;10>l;l++)i.push(r(u)*l);i.push(r(u))}for(u=0;o>i[u];u++);for(a=i.length;i[a-1]>c;a--);i=i.slice(u,a)}return i},e.tickFormat=function(t,i){if(2>arguments.length&&(i=Da),!arguments.length)return i;var u,a=Math.max(.1,t/e.ticks().length),o=n===Qn?(u=-1e-12,Math.floor):(u=1e-12,Math.ceil);return function(t){return a>=t/r(o(n(t)+u))?i(t):""}},e.copy=function(){return Kn(t.copy(),n)},Vn(e,t)}function Wn(t){return Math.log(0>t?0:t)/Math.LN10}function Qn(t){return-Math.log(t>0?0:-t)/Math.LN10}function te(t,n){function e(n){return t(r(n))}var r=ne(n),i=ne(1/n);return e.invert=function(n){return i(t.invert(n))},e.domain=function(n){return arguments.length?(t.domain(n.map(r)),e):t.domain().map(i)},e.ticks=function(t){return Bn(e.domain(),t)},e.tickFormat=function(t){return $n(e.domain(),t)},e.nice=function(){return e.domain(Yn(e.domain(),Xn))},e.exponent=function(t){if(!arguments.length)return n;var u=e.domain();return r=ne(n=t),i=ne(1/n),e.domain(u)},e.copy=function(){return te(t.copy(),n)},Vn(e,t)}function ne(t){return function(n){return 0>n?-Math.pow(-n,t):Math.pow(n,t)}}function ee(t,n){function e(n){return a[((i.get(n)||i.set(n,t.push(n)))-1)%a.length]}function r(n,e){return d3.range(t.length).map(function(t){return n+e*t})}var i,a,o;return e.domain=function(r){if(!arguments.length)return t;t=[],i=new u;for(var a,o=-1,c=r.length;c>++o;)i.has(a=r[o])||i.set(a,t.push(a));return e[n.t].apply(e,n.a)},e.range=function(t){return arguments.length?(a=t,o=0,n={t:"range",a:arguments},e):a},e.rangePoints=function(i,u){2>arguments.length&&(u=0);var c=i[0],l=i[1],s=(l-c)/(Math.max(1,t.length-1)+u);return a=r(2>t.length?(c+l)/2:c+s*u/2,s),o=0,n={t:"rangePoints",a:arguments},e},e.rangeBands=function(i,u,c){2>arguments.length&&(u=0),3>arguments.length&&(c=u);var l=i[1]<i[0],s=i[l-0],f=i[1-l],h=(f-s)/(t.length-u+2*c);return a=r(s+h*c,h),l&&a.reverse(),o=h*(1-u),n={t:"rangeBands",a:arguments},e},e.rangeRoundBands=function(i,u,c){2>arguments.length&&(u=0),3>arguments.length&&(c=u);var l=i[1]<i[0],s=i[l-0],f=i[1-l],h=Math.floor((f-s)/(t.length-u+2*c)),d=f-s-(t.length-u)*h;return a=r(s+Math.round(d/2),h),l&&a.reverse(),o=Math.round(h*(1-u)),n={t:"rangeRoundBands",a:arguments},e},e.rangeBand=function(){return o},e.rangeExtent=function(){return jn(n.a[0])},e.copy=function(){return ee(t,n)},e.domain(t)}function re(t,n){function e(){var e=0,u=n.length;for(i=[];u>++e;)i[e-1]=d3.quantile(t,e/u);return r}function r(t){return isNaN(t=+t)?0/0:n[d3.bisect(i,t)]}var i;return r.domain=function(n){return arguments.length?(t=n.filter(function(t){return!isNaN(t)}).sort(d3.ascending),e()):t},r.range=function(t){return arguments.length?(n=t,e()):n},r.quantiles=function(){return i},r.copy=function(){return re(t,n)},e()}function ie(t,n,e){function r(n){return e[Math.max(0,Math.min(a,Math.floor(u*(n-t))))]}function i(){return u=e.length/(n-t),a=e.length-1,r}var u,a;return r.domain=function(e){return arguments.length?(t=+e[0],n=+e[e.length-1],i()):[t,n]},r.range=function(t){return arguments.length?(e=t,i()):e},r.copy=function(){return ie(t,n,e)},i()}function ue(t,n){function e(e){return n[d3.bisect(t,e)]}return e.domain=function(n){return arguments.length?(t=n,e):t},e.range=function(t){return arguments.length?(n=t,e):n},e.copy=function(){return ue(t,n)},e}function ae(t){function n(t){return+t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=e.map(n),n):t},n.ticks=function(n){return Bn(t,n)},n.tickFormat=function(n){return $n(t,n)},n.copy=function(){return ae(t)},n}function oe(t){return t.innerRadius}function ce(t){return t.outerRadius}function le(t){return t.startAngle}function se(t){return t.endAngle}function fe(t){function n(n){function a(){s.push("M",u(t(f),l))}for(var o,s=[],f=[],h=-1,d=n.length,g=c(e),p=c(r);d>++h;)i.call(this,o=n[h],h)?f.push([+g.call(this,o,h),+p.call(this,o,h)]):f.length&&(a(),f=[]);return f.length&&a(),s.length?s.join(""):null}var e=he,r=de,i=o,u=ge,a=u.key,l=.7;return n.x=function(t){return arguments.length?(e=t,n):e},n.y=function(t){return arguments.length?(r=t,n):r},n.defined=function(t){return arguments.length?(i=t,n):i},n.interpolate=function(t){return arguments.length?(a="function"==typeof t?u=t:(u=Oa.get(t)||ge).key,n):a},n.tension=function(t){return arguments.length?(l=t,n):l},n}function he(t){return t[0]}function de(t){return t[1]}function ge(t){return t.join("L")}function pe(t){return ge(t)+"Z"}function me(t){for(var n=0,e=t.length,r=t[0],i=[r[0],",",r[1]];e>++n;)i.push("V",(r=t[n])[1],"H",r[0]);return i.join("")}function ve(t){for(var n=0,e=t.length,r=t[0],i=[r[0],",",r[1]];e>++n;)i.push("H",(r=t[n])[0],"V",r[1]);return i.join("")}function ye(t,n){return 4>t.length?ge(t):t[1]+xe(t.slice(1,t.length-1),_e(t,n))}function Me(t,n){return 3>t.length?ge(t):t[0]+xe((t.push(t[0]),t),_e([t[t.length-2]].concat(t,[t[1]]),n))}function be(t,n){return 3>t.length?ge(t):t[0]+xe(t,_e(t,n))}function xe(t,n){if(1>n.length||t.length!=n.length&&t.length!=n.length+2)return ge(t);var e=t.length!=n.length,r="",i=t[0],u=t[1],a=n[0],o=a,c=1;if(e&&(r+="Q"+(u[0]-2*a[0]/3)+","+(u[1]-2*a[1]/3)+","+u[0]+","+u[1],i=t[1],c=2),n.length>1){o=n[1],u=t[c],c++,r+="C"+(i[0]+a[0])+","+(i[1]+a[1])+","+(u[0]-o[0])+","+(u[1]-o[1])+","+u[0]+","+u[1];for(var l=2;n.length>l;l++,c++)u=t[c],o=n[l],r+="S"+(u[0]-o[0])+","+(u[1]-o[1])+","+u[0]+","+u[1]}if(e){var s=t[c];r+="Q"+(u[0]+2*o[0]/3)+","+(u[1]+2*o[1]/3)+","+s[0]+","+s[1]}return r}function _e(t,n){for(var e,r=[],i=(1-n)/2,u=t[0],a=t[1],o=1,c=t.length;c>++o;)e=u,u=a,a=t[o],r.push([i*(a[0]-e[0]),i*(a[1]-e[1])]);return r}function we(t){if(3>t.length)return ge(t);var n=1,e=t.length,r=t[0],i=r[0],u=r[1],a=[i,i,i,(r=t[1])[0]],o=[u,u,u,r[1]],c=[i,",",u];for(Ne(c,a,o);e>++n;)r=t[n],a.shift(),a.push(r[0]),o.shift(),o.push(r[1]),Ne(c,a,o);for(n=-1;2>++n;)a.shift(),a.push(r[0]),o.shift(),o.push(r[1]),Ne(c,a,o);return c.join("")}function Se(t){if(4>t.length)return ge(t);for(var n,e=[],r=-1,i=t.length,u=[0],a=[0];3>++r;)n=t[r],u.push(n[0]),a.push(n[1]);for(e.push(Ae(Ia,u)+","+Ae(Ia,a)),--r;i>++r;)n=t[r],u.shift(),u.push(n[0]),a.shift(),a.push(n[1]),Ne(e,u,a);return e.join("")}function ke(t){for(var n,e,r=-1,i=t.length,u=i+4,a=[],o=[];4>++r;)e=t[r%i],a.push(e[0]),o.push(e[1]);for(n=[Ae(Ia,a),",",Ae(Ia,o)],--r;u>++r;)e=t[r%i],a.shift(),a.push(e[0]),o.shift(),o.push(e[1]),Ne(n,a,o);return n.join("")}function Ee(t,n){var e=t.length-1;if(e)for(var r,i,u=t[0][0],a=t[0][1],o=t[e][0]-u,c=t[e][1]-a,l=-1;e>=++l;)r=t[l],i=l/e,r[0]=n*r[0]+(1-n)*(u+i*o),r[1]=n*r[1]+(1-n)*(a+i*c);return we(t)}function Ae(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]+t[3]*n[3]}function Ne(t,n,e){t.push("C",Ae(Ya,n),",",Ae(Ya,e),",",Ae(Ua,n),",",Ae(Ua,e),",",Ae(Ia,n),",",Ae(Ia,e))}function Te(t,n){return(n[1]-t[1])/(n[0]-t[0])}function qe(t){for(var n=0,e=t.length-1,r=[],i=t[0],u=t[1],a=r[0]=Te(i,u);e>++n;)r[n]=(a+(a=Te(i=u,u=t[n+1])))/2;return r[n]=a,r}function Ce(t){for(var n,e,r,i,u=[],a=qe(t),o=-1,c=t.length-1;c>++o;)n=Te(t[o],t[o+1]),1e-6>Math.abs(n)?a[o]=a[o+1]=0:(e=a[o]/n,r=a[o+1]/n,i=e*e+r*r,i>9&&(i=3*n/Math.sqrt(i),a[o]=i*e,a[o+1]=i*r));for(o=-1;c>=++o;)i=(t[Math.min(c,o+1)][0]-t[Math.max(0,o-1)][0])/(6*(1+a[o]*a[o])),u.push([i||0,a[o]*i||0]);return u}function ze(t){return 3>t.length?ge(t):t[0]+xe(t,Ce(t))}function De(t){for(var n,e,r,i=-1,u=t.length;u>++i;)n=t[i],e=n[0],r=n[1]+Pa,n[0]=e*Math.cos(r),n[1]=e*Math.sin(r);return t}function Le(t){function n(n){function o(){m.push("M",l(t(y),d),h,f(t(v.reverse()),d),"Z")}for(var s,g,p,m=[],v=[],y=[],M=-1,b=n.length,x=c(e),_=c(i),w=e===r?function(){return g}:c(r),S=i===u?function(){return p}:c(u);b>++M;)a.call(this,s=n[M],M)?(v.push([g=+x.call(this,s,M),p=+_.call(this,s,M)]),y.push([+w.call(this,s,M),+S.call(this,s,M)])):v.length&&(o(),v=[],y=[]);return v.length&&o(),m.length?m.join(""):null}var e=he,r=he,i=0,u=de,a=o,l=ge,s=l.key,f=l,h="L",d=.7;return n.x=function(t){return arguments.length?(e=r=t,n):r},n.x0=function(t){return arguments.length?(e=t,n):e},n.x1=function(t){return arguments.length?(r=t,n):r},n.y=function(t){return arguments.length?(i=u=t,n):u},n.y0=function(t){return arguments.length?(i=t,n):i},n.y1=function(t){return arguments.length?(u=t,n):u},n.defined=function(t){return arguments.length?(a=t,n):a},n.interpolate=function(t){return arguments.length?(s="function"==typeof t?l=t:(l=Oa.get(t)||ge).key,f=l.reverse||l,h=l.closed?"M":"L",n):s},n.tension=function(t){return arguments.length?(d=t,n):d},n}function Fe(t){return t.radius}function He(t){return[t.x,t.y]}function Re(t){return function(){var n=t.apply(this,arguments),e=n[0],r=n[1]+Pa;return[e*Math.cos(r),e*Math.sin(r)]}}function Pe(){return 64}function je(){return"circle"}function Oe(t){var n=Math.sqrt(t/Ru);return"M0,"+n+"A"+n+","+n+" 0 1,1 0,"+-n+"A"+n+","+n+" 0 1,1 0,"+n+"Z"}function Ye(t,n){t.attr("transform",function(t){return"translate("+n(t)+",0)"})}function Ue(t,n){t.attr("transform",function(t){return"translate(0,"+n(t)+")"})}function Ie(t,n,e){if(r=[],e&&n.length>1){for(var r,i,u,a=jn(t.domain()),o=-1,c=n.length,l=(n[1]-n[0])/++e;c>++o;)for(i=e;--i>0;)(u=+n[o]-i*l)>=a[0]&&r.push(u);for(--o,i=0;e>++i&&(u=+n[o]+i*l)<a[1];)r.push(u)}return r}function Ve(){Ja||(Ja=d3.select("body").append("div").style("visibility","hidden").style("top",0).style("height",0).style("width",0).style("overflow-y","scroll").append("div").style("height","2000px").node().parentNode);var t,n=d3.event;try{Ja.scrollTop=1e3,Ja.dispatchEvent(n),t=1e3-Ja.scrollTop}catch(e){t=n.wheelDelta||5*-n.detail}return t}function Xe(t){for(var n=t.source,e=t.target,r=Be(n,e),i=[n];n!==r;)n=n.parent,i.push(n);for(var u=i.length;e!==r;)i.splice(u,0,e),e=e.parent;return i}function Ze(t){for(var n=[],e=t.parent;null!=e;)n.push(t),t=e,e=e.parent;return n.push(t),n}function Be(t,n){if(t===n)return t;for(var e=Ze(t),r=Ze(n),i=e.pop(),u=r.pop(),a=null;i===u;)a=i,i=e.pop(),u=r.pop();return a}function $e(t){t.fixed|=2}function Je(t){t.fixed&=1}function Ge(t){t.fixed|=4,t.px=t.x,t.py=t.y}function Ke(t){t.fixed&=3}function We(t,n,e){var r=0,i=0;if(t.charge=0,!t.leaf)for(var u,a=t.nodes,o=a.length,c=-1;o>++c;)u=a[c],null!=u&&(We(u,n,e),t.charge+=u.charge,r+=u.charge*u.cx,i+=u.charge*u.cy);if(t.point){t.leaf||(t.point.x+=Math.random()-.5,t.point.y+=Math.random()-.5);var l=n*e[t.point.index];t.charge+=t.pointCharge=l,r+=l*t.point.x,i+=l*t.point.y}t.cx=r/t.charge,t.cy=i/t.charge}function Qe(){return 20}function tr(){return 1}function nr(t){return t.x}function er(t){return t.y}function rr(t,n,e){t.y0=n,t.y=e}function ir(t){return d3.range(t.length)}function ur(t){for(var n=-1,e=t[0].length,r=[];e>++n;)r[n]=0;return r}function ar(t){for(var n,e=1,r=0,i=t[0][1],u=t.length;u>e;++e)(n=t[e][1])>i&&(r=e,i=n);return r}function or(t){return t.reduce(cr,0)}function cr(t,n){return t+n[1]}function lr(t,n){return sr(t,Math.ceil(Math.log(n.length)/Math.LN2+1))}function sr(t,n){for(var e=-1,r=+t[0],i=(t[1]-r)/n,u=[];n>=++e;)u[e]=i*e+r;return u}function fr(t){return[d3.min(t),d3.max(t)]}function hr(t,n){return d3.rebind(t,n,"sort","children","value"),t.nodes=t,t.links=mr,t}function dr(t){return t.children}function gr(t){return t.value}function pr(t,n){return n.value-t.value}function mr(t){return d3.merge(t.map(function(t){return(t.children||[]).map(function(n){return{source:t,target:n}})}))}function vr(t,n){return t.value-n.value}function yr(t,n){var e=t._pack_next;t._pack_next=n,n._pack_prev=t,n._pack_next=e,e._pack_prev=n}function Mr(t,n){t._pack_next=n,n._pack_prev=t}function br(t,n){var e=n.x-t.x,r=n.y-t.y,i=t.r+n.r;return i*i-e*e-r*r>.001}function xr(t){function n(t){s=Math.min(t.x-t.r,s),f=Math.max(t.x+t.r,f),h=Math.min(t.y-t.r,h),d=Math.max(t.y+t.r,d)}if((e=t.children)&&(l=e.length)){var e,r,i,u,a,o,c,l,s=1/0,f=-1/0,h=1/0,d=-1/0;if(e.forEach(_r),r=e[0],r.x=-r.r,r.y=0,n(r),l>1&&(i=e[1],i.x=i.r,i.y=0,n(i),l>2))for(u=e[2],kr(r,i,u),n(u),yr(r,u),r._pack_prev=u,yr(u,i),i=r._pack_next,a=3;l>a;a++){kr(r,i,u=e[a]);var g=0,p=1,m=1;for(o=i._pack_next;o!==i;o=o._pack_next,p++)if(br(o,u)){g=1;break}if(1==g)for(c=r._pack_prev;c!==o._pack_prev&&!br(c,u);c=c._pack_prev,m++);g?(m>p||p==m&&i.r<r.r?Mr(r,i=o):Mr(r=c,i),a--):(yr(r,u),i=u,n(u))}var v=(s+f)/2,y=(h+d)/2,M=0;for(a=0;l>a;a++)u=e[a],u.x-=v,u.y-=y,M=Math.max(M,u.r+Math.sqrt(u.x*u.x+u.y*u.y));t.r=M,e.forEach(wr)}}function _r(t){t._pack_next=t._pack_prev=t}function wr(t){delete t._pack_next,delete t._pack_prev}function Sr(t,n,e,r){var i=t.children;if(t.x=n+=r*t.x,t.y=e+=r*t.y,t.r*=r,i)for(var u=-1,a=i.length;a>++u;)Sr(i[u],n,e,r)}function kr(t,n,e){var r=t.r+e.r,i=n.x-t.x,u=n.y-t.y;if(r&&(i||u)){var a=n.r+e.r,o=i*i+u*u;a*=a,r*=r;var c=.5+(r-a)/(2*o),l=Math.sqrt(Math.max(0,2*a*(r+o)-(r-=o)*r-a*a))/(2*o);e.x=t.x+c*i+l*u,e.y=t.y+c*u-l*i}else e.x=t.x+r,e.y=t.y}function Er(t){return 1+d3.max(t,function(t){return t.y})}function Ar(t){return t.reduce(function(t,n){return t+n.x},0)/t.length}function Nr(t){var n=t.children;return n&&n.length?Nr(n[0]):t}function Tr(t){var n,e=t.children;return e&&(n=e.length)?Tr(e[n-1]):t}function qr(t,n){return t.parent==n.parent?1:2}function Cr(t){var n=t.children;return n&&n.length?n[0]:t._tree.thread}function zr(t){var n,e=t.children;return e&&(n=e.length)?e[n-1]:t._tree.thread}function Dr(t,n){var e=t.children;if(e&&(i=e.length))for(var r,i,u=-1;i>++u;)n(r=Dr(e[u],n),t)>0&&(t=r);return t}function Lr(t,n){return t.x-n.x}function Fr(t,n){return n.x-t.x}function Hr(t,n){return t.depth-n.depth}function Rr(t,n){function e(t,r){var i=t.children;if(i&&(a=i.length))for(var u,a,o=null,c=-1;a>++c;)u=i[c],e(u,o),o=u;n(t,r)}e(t,null)}function Pr(t){for(var n,e=0,r=0,i=t.children,u=i.length;--u>=0;)n=i[u]._tree,n.prelim+=e,n.mod+=e,e+=n.shift+(r+=n.change)}function jr(t,n,e){t=t._tree,n=n._tree;var r=e/(n.number-t.number);t.change+=r,n.change-=r,n.shift+=e,n.prelim+=e,n.mod+=e}function Or(t,n,e){return t._tree.ancestor.parent==n.parent?t._tree.ancestor:e}function Yr(t){return{x:t.x,y:t.y,dx:t.dx,dy:t.dy}}function Ur(t,n){var e=t.x+n[3],r=t.y+n[0],i=t.dx-n[1]-n[3],u=t.dy-n[0]-n[2];return 0>i&&(e+=i/2,i=0),0>u&&(r+=u/2,u=0),{x:e,y:r,dx:i,dy:u}}function Ir(t,n){function e(t,e){return d3.xhr(t,n,e).response(r)}function r(t){return e.parse(t.responseText)}function i(n){return n.map(u).join(t)}function u(t){return a.test(t)?'"'+t.replace(/\"/g,'""')+'"':t}var a=RegExp('["'+t+"\n]"),o=t.charCodeAt(0);return e.parse=function(t){var n;return e.parseRows(t,function(t){return n?n(t):(n=Function("d","return {"+t.map(function(t,n){return JSON.stringify(t)+": d["+n+"]"}).join(",")+"}"),void 0)})},e.parseRows=function(t,n){function e(){if(s>=l)return a;if(i)return i=!1,u;var n=s;if(34===t.charCodeAt(n)){for(var e=n;l>e++;)if(34===t.charCodeAt(e)){if(34!==t.charCodeAt(e+1))break;++e}s=e+2;var r=t.charCodeAt(e+1);return 13===r?(i=!0,10===t.charCodeAt(e+2)&&++s):10===r&&(i=!0),t.substring(n+1,e).replace(/""/g,'"')}for(;l>s;){var r=t.charCodeAt(s++),c=1;if(10===r)i=!0;else if(13===r)i=!0,10===t.charCodeAt(s)&&(++s,++c);else if(r!==o)continue;return t.substring(n,s-c)}return t.substring(n)}for(var r,i,u={},a={},c=[],l=t.length,s=0,f=0;(r=e())!==a;){for(var h=[];r!==u&&r!==a;)h.push(r),r=e();(!n||(h=n(h,f++)))&&c.push(h)}return c},e.format=function(t){return t.map(i).join("\n")},e}function Vr(t,n){no.hasOwnProperty(t.type)&&no[t.type](t,n)}function Xr(t,n,e){var r,i=-1,u=t.length-e;for(n.lineStart();u>++i;)r=t[i],n.point(r[0],r[1]);n.lineEnd()}function Zr(t,n){var e=-1,r=t.length;for(n.polygonStart();r>++e;)Xr(t[e],n,1);n.polygonEnd()}function Br(t){return[Math.atan2(t[1],t[0]),Math.asin(Math.max(-1,Math.min(1,t[2])))]}function $r(t,n){return Pu>Math.abs(t[0]-n[0])&&Pu>Math.abs(t[1]-n[1])}function Jr(t){var n=t[0],e=t[1],r=Math.cos(e);return[r*Math.cos(n),r*Math.sin(n),Math.sin(e)]}function Gr(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function Kr(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function Wr(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function Qr(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function ti(t){var n=Math.sqrt(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}function ni(t){function n(n){function r(e,r){e=t(e,r),n.point(e[0],e[1])}function u(){s=0/0,p.point=a,n.lineStart()}function a(r,u){var a=Jr([r,u]),o=t(r,u);e(s,f,l,h,d,g,s=o[0],f=o[1],l=r,h=a[0],d=a[1],g=a[2],i,n),n.point(s,f)}function o(){p.point=r,n.lineEnd()}function c(){var t,r,c,m,v,y,M;u(),p.point=function(n,e){a(t=n,r=e),c=s,m=f,v=h,y=d,M=g,p.point=a},p.lineEnd=function(){e(s,f,l,h,d,g,c,m,t,v,y,M,i,n),p.lineEnd=o,o()}}var l,s,f,h,d,g,p={point:r,lineStart:u,lineEnd:o,polygonStart:function(){n.polygonStart(),p.lineStart=c},polygonEnd:function(){n.polygonEnd(),p.lineStart=u}};return p}function e(n,i,u,a,o,c,l,s,f,h,d,g,p,m){var v=l-n,y=s-i,M=v*v+y*y;if(M>4*r&&p--){var b=a+h,x=o+d,_=c+g,w=Math.sqrt(b*b+x*x+_*_),S=Math.asin(_/=w),k=Pu>Math.abs(Math.abs(_)-1)?(u+f)/2:Math.atan2(x,b),E=t(k,S),A=E[0],N=E[1],T=A-n,q=N-i,C=y*T-v*q;(C*C/M>r||Math.abs((v*T+y*q)/M-.5)>.3)&&(e(n,i,u,a,o,c,A,N,k,b/=w,x/=w,_,p,m),m.point(A,N),e(A,N,k,b,x,_,l,s,f,h,d,g,p,m))}}var r=.5,i=16;return n.precision=function(t){return arguments.length?(i=(r=t*t)>0&&16,n):Math.sqrt(r)},n}function ei(t,n){function e(t,n){var e=Math.sqrt(u-2*i*Math.sin(n))/i;return[e*Math.sin(t*=i),a-e*Math.cos(t)]}var r=Math.sin(t),i=(r+Math.sin(n))/2,u=1+r*(2*i-r),a=Math.sqrt(u)/i;return e.invert=function(t,n){var e=a-n;return[Math.atan2(t,e)/i,Math.asin((u-(t*t+e*e)*i*i)/(2*i))]},e}function ri(t){function n(t,n){r>t&&(r=t),t>u&&(u=t),i>n&&(i=n),n>a&&(a=n)}function e(){o.point=o.lineEnd=Pn}var r,i,u,a,o={point:n,lineStart:Pn,lineEnd:Pn,polygonStart:function(){o.lineEnd=e},polygonEnd:function(){o.point=n}};return function(n){return a=u=-(r=i=1/0),d3.geo.stream(n,t(o)),[[r,i],[u,a]]}}function ii(t,n){if(!io){++uo,t*=ju;var e=Math.cos(n*=ju);ao+=(e*Math.cos(t)-ao)/uo,oo+=(e*Math.sin(t)-oo)/uo,co+=(Math.sin(n)-co)/uo}}function ui(){var t,n;io=1,ai(),io=2;var e=lo.point;lo.point=function(r,i){e(t=r,n=i)},lo.lineEnd=function(){lo.point(t,n),oi(),lo.lineEnd=oi}}function ai(){function t(t,i){t*=ju;var u=Math.cos(i*=ju),a=u*Math.cos(t),o=u*Math.sin(t),c=Math.sin(i),l=Math.atan2(Math.sqrt((l=e*c-r*o)*l+(l=r*a-n*c)*l+(l=n*o-e*a)*l),n*a+e*o+r*c);uo+=l,ao+=l*(n+(n=a)),oo+=l*(e+(e=o)),co+=l*(r+(r=c))}var n,e,r;io>1||(1>io&&(io=1,uo=ao=oo=co=0),lo.point=function(i,u){i*=ju;var a=Math.cos(u*=ju);n=a*Math.cos(i),e=a*Math.sin(i),r=Math.sin(u),lo.point=t})}function oi(){lo.point=ii}function ci(t,n){var e=Math.cos(t),r=Math.sin(t);return function(i,u,a,o){null!=i?(i=li(e,i),u=li(e,u),(a>0?u>i:i>u)&&(i+=2*a*Ru)):(i=t+2*a*Ru,u=t);for(var c,l=a*n,s=i;a>0?s>u:u>s;s-=l)o.point((c=Br([e,-r*Math.cos(s),-r*Math.sin(s)]))[0],c[1])}}function li(t,n){var e=Jr(n);e[0]-=t,ti(e);var r=Math.acos(Math.max(-1,Math.min(1,-e[1])));return((0>-e[2]?-r:r)+2*Math.PI-Pu)%(2*Math.PI)}function si(t,n,e){return function(r){function i(n,e){t(n,e)&&r.point(n,e)}function u(t,n){m.point(t,n)}function a(){v.point=u,m.lineStart()}function o(){v.point=i,m.lineEnd()}function c(t,n){M.point(t,n),p.push([t,n])}function l(){M.lineStart(),p=[]}function s(){c(p[0][0],p[0][1]),M.lineEnd();var t,n=M.clean(),e=y.buffer(),i=e.length;if(!i)return g=!0,d+=mi(p,-1),p=null,void 0;if(p=null,1&n){t=e[0],h+=mi(t,1);var u,i=t.length-1,a=-1;for(r.lineStart();i>++a;)r.point((u=t[a])[0],u[1]);return r.lineEnd(),void 0}i>1&&2&n&&e.push(e.pop().concat(e.shift())),f.push(e.filter(gi))}var f,h,d,g,p,m=n(r),v={point:i,lineStart:a,lineEnd:o,polygonStart:function(){v.point=c,v.lineStart=l,v.lineEnd=s,g=!1,d=h=0,f=[],r.polygonStart()
+},polygonEnd:function(){v.point=i,v.lineStart=a,v.lineEnd=o,f=d3.merge(f),f.length?fi(f,e,r):(-Pu>h||g&&-Pu>d)&&(r.lineStart(),e(null,null,1,r),r.lineEnd()),r.polygonEnd(),f=null},sphere:function(){r.polygonStart(),r.lineStart(),e(null,null,1,r),r.lineEnd(),r.polygonEnd()}},y=pi(),M=n(y);return v}}function fi(t,n,e){var r=[],i=[];if(t.forEach(function(t){var n=t.length;if(!(1>=n)){var e=t[0],u=t[n-1],a={point:e,points:t,other:null,visited:!1,entry:!0,subject:!0},o={point:e,points:[e],other:a,visited:!1,entry:!1,subject:!1};a.other=o,r.push(a),i.push(o),a={point:u,points:[u],other:null,visited:!1,entry:!1,subject:!0},o={point:u,points:[u],other:a,visited:!1,entry:!0,subject:!1},a.other=o,r.push(a),i.push(o)}}),i.sort(di),hi(r),hi(i),r.length)for(var u,a,o,c=r[0];;){for(u=c;u.visited;)if((u=u.next)===c)return;a=u.points,e.lineStart();do{if(u.visited=u.other.visited=!0,u.entry){if(u.subject)for(var l=0;a.length>l;l++)e.point((o=a[l])[0],o[1]);else n(u.point,u.next.point,1,e);u=u.next}else{if(u.subject){a=u.prev.points;for(var l=a.length;--l>=0;)e.point((o=a[l])[0],o[1])}else n(u.point,u.prev.point,-1,e);u=u.prev}u=u.other,a=u.points}while(!u.visited);e.lineEnd()}}function hi(t){if(n=t.length){for(var n,e,r=0,i=t[0];n>++r;)i.next=e=t[r],e.prev=i,i=e;i.next=e=t[0],e.prev=i}}function di(t,n){return(0>(t=t.point)[0]?t[1]-Ru/2-Pu:Ru/2-t[1])-(0>(n=n.point)[0]?n[1]-Ru/2-Pu:Ru/2-n[1])}function gi(t){return t.length>1}function pi(){var t,n=[];return{lineStart:function(){n.push(t=[])},point:function(n,e){t.push([n,e])},lineEnd:Pn,buffer:function(){var e=n;return n=[],t=null,e}}}function mi(t,n){if(!(e=t.length))return 0;for(var e,r,i,u=0,a=0,o=t[0],c=o[0],l=o[1],s=Math.cos(l),f=Math.atan2(n*Math.sin(c)*s,Math.sin(l)),h=1-n*Math.cos(c)*s,d=f;e>++u;)o=t[u],s=Math.cos(l=o[1]),r=Math.atan2(n*Math.sin(c=o[0])*s,Math.sin(l)),i=1-n*Math.cos(c)*s,Pu>Math.abs(h-2)&&Pu>Math.abs(i-2)||(Pu>Math.abs(i)||Pu>Math.abs(h)||(Pu>Math.abs(Math.abs(r-f)-Ru)?i+h>2&&(a+=4*(r-f)):a+=Pu>Math.abs(h-2)?4*(r-d):((3*Ru+r-f)%(2*Ru)-Ru)*(h+i)),d=f,f=r,h=i);return a}function vi(t){var n,e=0/0,r=0/0,i=0/0;return{lineStart:function(){t.lineStart(),n=1},point:function(u,a){var o=u>0?Ru:-Ru,c=Math.abs(u-e);Pu>Math.abs(c-Ru)?(t.point(e,r=(r+a)/2>0?Ru/2:-Ru/2),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(o,r),t.point(u,r),n=0):i!==o&&c>=Ru&&(Pu>Math.abs(e-i)&&(e-=i*Pu),Pu>Math.abs(u-o)&&(u-=o*Pu),r=yi(e,r,u,a),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(o,r),n=0),t.point(e=u,r=a),i=o},lineEnd:function(){t.lineEnd(),e=r=0/0},clean:function(){return 2-n}}}function yi(t,n,e,r){var i,u,a=Math.sin(t-e);return Math.abs(a)>Pu?Math.atan((Math.sin(n)*(u=Math.cos(r))*Math.sin(e)-Math.sin(r)*(i=Math.cos(n))*Math.sin(t))/(i*u*a)):(n+r)/2}function Mi(t,n,e,r){var i;if(null==t)i=e*Ru/2,r.point(-Ru,i),r.point(0,i),r.point(Ru,i),r.point(Ru,0),r.point(Ru,-i),r.point(0,-i),r.point(-Ru,-i),r.point(-Ru,0),r.point(-Ru,i);else if(Math.abs(t[0]-n[0])>Pu){var u=(t[0]<n[0]?1:-1)*Ru;i=e*u/2,r.point(-u,i),r.point(0,i),r.point(u,i)}else r.point(n[0],n[1])}function bi(t){function n(t,n){return Math.cos(t)*Math.cos(n)>u}function e(t){var e,i,u,a;return{lineStart:function(){u=i=!1,a=1},point:function(o,c){var l,s=[o,c],f=n(o,c);!e&&(u=i=f)&&t.lineStart(),f!==i&&(l=r(e,s),($r(e,l)||$r(s,l))&&(s[0]+=Pu,s[1]+=Pu,f=n(s[0],s[1]))),f!==i&&(a=0,(i=f)?(t.lineStart(),l=r(s,e),t.point(l[0],l[1])):(l=r(e,s),t.point(l[0],l[1]),t.lineEnd()),e=l),!f||e&&$r(e,s)||t.point(s[0],s[1]),e=s},lineEnd:function(){i&&t.lineEnd(),e=null},clean:function(){return a|(u&&i)<<1}}}function r(t,n){var e=Jr(t,0),r=Jr(n,0),i=[1,0,0],a=Kr(e,r),o=Gr(a,a),c=a[0],l=o-c*c;if(!l)return t;var s=u*o/l,f=-u*c/l,h=Kr(i,a),d=Qr(i,s),g=Qr(a,f);Wr(d,g);var p=h,m=Gr(d,p),v=Gr(p,p),y=Math.sqrt(m*m-v*(Gr(d,d)-1)),M=Qr(p,(-m-y)/v);return Wr(M,d),Br(M)}var i=t*ju,u=Math.cos(i),a=ci(i,6*ju);return si(n,e,a)}function xi(t,n){function e(e,r){return e=t(e,r),n(e[0],e[1])}return t.invert&&n.invert&&(e.invert=function(e,r){return e=n.invert(e,r),e&&t.invert(e[0],e[1])}),e}function _i(t,n){return[t,n]}function wi(t,n,e){var r=d3.range(t,n-Pu,e).concat(n);return function(t){return r.map(function(n){return[t,n]})}}function Si(t,n,e){var r=d3.range(t,n-Pu,e).concat(n);return function(t){return r.map(function(n){return[n,t]})}}function ki(t,n,e,r){function i(t){var n=Math.sin(t*=d)*g,e=Math.sin(d-t)*g,r=e*l+n*f,i=e*s+n*h,u=e*a+n*c;return[Math.atan2(i,r)/ju,Math.atan2(u,Math.sqrt(r*r+i*i))/ju]}var u=Math.cos(n),a=Math.sin(n),o=Math.cos(r),c=Math.sin(r),l=u*Math.cos(t),s=u*Math.sin(t),f=o*Math.cos(e),h=o*Math.sin(e),d=Math.acos(Math.max(-1,Math.min(1,a*c+u*o*Math.cos(e-t)))),g=1/Math.sin(d);return i.distance=d,i}function Ei(t,n){return[t/(2*Ru),Math.max(-.5,Math.min(.5,Math.log(Math.tan(Ru/4+n/2))/(2*Ru)))]}function Ai(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function Ni(t){var n=ni(function(n,e){return t([n*Ou,e*Ou])});return function(t){return t=n(t),{point:function(n,e){t.point(n*ju,e*ju)},sphere:function(){t.sphere()},lineStart:function(){t.lineStart()},lineEnd:function(){t.lineEnd()},polygonStart:function(){t.polygonStart()},polygonEnd:function(){t.polygonEnd()}}}}function Ti(){function t(t,n){a.push("M",t,",",n,u)}function n(t,n){a.push("M",t,",",n),o.point=e}function e(t,n){a.push("L",t,",",n)}function r(){o.point=t}function i(){a.push("Z")}var u=Ai(4.5),a=[],o={point:t,lineStart:function(){o.point=n},lineEnd:r,polygonStart:function(){o.lineEnd=i},polygonEnd:function(){o.lineEnd=r,o.point=t},pointRadius:function(t){return u=Ai(t),o},result:function(){if(a.length){var t=a.join("");return a=[],t}}};return o}function qi(t){function n(n,e){t.moveTo(n,e),t.arc(n,e,a,0,2*Ru)}function e(n,e){t.moveTo(n,e),o.point=r}function r(n,e){t.lineTo(n,e)}function i(){o.point=n}function u(){t.closePath()}var a=4.5,o={point:n,lineStart:function(){o.point=e},lineEnd:i,polygonStart:function(){o.lineEnd=u},polygonEnd:function(){o.lineEnd=i,o.point=n},pointRadius:function(t){return a=t,o},result:Pn};return o}function Ci(){function t(t,n){po+=i*t-r*n,r=t,i=n}var n,e,r,i;mo.point=function(u,a){mo.point=t,n=r=u,e=i=a},mo.lineEnd=function(){t(n,e)}}function zi(t,n){io||(ao+=t,oo+=n,++co)}function Di(){function t(t,r){var i=t-n,u=r-e,a=Math.sqrt(i*i+u*u);ao+=a*(n+t)/2,oo+=a*(e+r)/2,co+=a,n=t,e=r}var n,e;if(1!==io){if(!(1>io))return;io=1,ao=oo=co=0}vo.point=function(r,i){vo.point=t,n=r,e=i}}function Li(){vo.point=zi}function Fi(){function t(t,n){var e=i*t-r*n;ao+=e*(r+t),oo+=e*(i+n),co+=3*e,r=t,i=n}var n,e,r,i;2>io&&(io=2,ao=oo=co=0),vo.point=function(u,a){vo.point=t,n=r=u,e=i=a},vo.lineEnd=function(){t(n,e)}}function Hi(){function t(t,n){if(t*=ju,n*=ju,!(Pu>Math.abs(Math.abs(u)-Ru/2)&&Pu>Math.abs(Math.abs(n)-Ru/2))){var e=Math.cos(n),c=Math.sin(n);if(Pu>Math.abs(u-Ru/2))Mo+=2*(t-r);else{var l=t-i,s=Math.cos(l),f=Math.atan2(Math.sqrt((f=e*Math.sin(l))*f+(f=a*c-o*e*s)*f),o*c+a*e*s),h=(f+Ru+u+n)/4;Mo+=(0>l&&l>-Ru||l>Ru?-4:4)*Math.atan(Math.sqrt(Math.abs(Math.tan(h)*Math.tan(h-f/2)*Math.tan(h-Ru/4-u/2)*Math.tan(h-Ru/4-n/2))))}r=i,i=t,u=n,a=e,o=c}}var n,e,r,i,u,a,o;bo.point=function(c,l){bo.point=t,r=i=(n=c)*ju,u=(e=l)*ju,a=Math.cos(u),o=Math.sin(u)},bo.lineEnd=function(){t(n,e)}}function Ri(t){return Pi(function(){return t})()}function Pi(t){function n(t){return t=a(t[0]*ju,t[1]*ju),[t[0]*s+o,c-t[1]*s]}function e(t){return t=a.invert((t[0]-o)/s,(c-t[1])/s),t&&[t[0]*Ou,t[1]*Ou]}function r(){a=xi(u=Oi(p,m,v),i);var t=i(d,g);return o=f-t[0]*s,c=h+t[1]*s,n}var i,u,a,o,c,l=ni(function(t,n){return t=i(t,n),[t[0]*s+o,c-t[1]*s]}),s=150,f=480,h=250,d=0,g=0,p=0,m=0,v=0,y=so,M=null;return n.stream=function(t){return ji(u,y(l(t)))},n.clipAngle=function(t){return arguments.length?(y=null==t?(M=t,so):bi(M=+t),n):M},n.scale=function(t){return arguments.length?(s=+t,r()):s},n.translate=function(t){return arguments.length?(f=+t[0],h=+t[1],r()):[f,h]},n.center=function(t){return arguments.length?(d=t[0]%360*ju,g=t[1]%360*ju,r()):[d*Ou,g*Ou]},n.rotate=function(t){return arguments.length?(p=t[0]%360*ju,m=t[1]%360*ju,v=t.length>2?t[2]%360*ju:0,r()):[p*Ou,m*Ou,v*Ou]},d3.rebind(n,l,"precision"),function(){return i=t.apply(this,arguments),n.invert=i.invert&&e,r()}}function ji(t,n){return{point:function(e,r){r=t(e*ju,r*ju),e=r[0],n.point(e>Ru?e-2*Ru:-Ru>e?e+2*Ru:e,r[1])},sphere:function(){n.sphere()},lineStart:function(){n.lineStart()},lineEnd:function(){n.lineEnd()},polygonStart:function(){n.polygonStart()},polygonEnd:function(){n.polygonEnd()}}}function Oi(t,n,e){return t?n||e?xi(Ui(t),Ii(n,e)):Ui(t):n||e?Ii(n,e):_i}function Yi(t){return function(n,e){return n+=t,[n>Ru?n-2*Ru:-Ru>n?n+2*Ru:n,e]}}function Ui(t){var n=Yi(t);return n.invert=Yi(-t),n}function Ii(t,n){function e(t,n){var e=Math.cos(n),o=Math.cos(t)*e,c=Math.sin(t)*e,l=Math.sin(n),s=l*r+o*i;return[Math.atan2(c*u-s*a,o*r-l*i),Math.asin(Math.max(-1,Math.min(1,s*u+c*a)))]}var r=Math.cos(t),i=Math.sin(t),u=Math.cos(n),a=Math.sin(n);return e.invert=function(t,n){var e=Math.cos(n),o=Math.cos(t)*e,c=Math.sin(t)*e,l=Math.sin(n),s=l*u-c*a;return[Math.atan2(c*u+l*a,o*r+s*i),Math.asin(Math.max(-1,Math.min(1,s*r-o*i)))]},e}function Vi(t,n){function e(n,e){var r=Math.cos(n),i=Math.cos(e),u=t(r*i);return[u*i*Math.sin(n),u*Math.sin(e)]}return e.invert=function(t,e){var r=Math.sqrt(t*t+e*e),i=n(r),u=Math.sin(i),a=Math.cos(i);return[Math.atan2(t*u,r*a),Math.asin(r&&e*u/r)]},e}function Xi(t,n,e,r){var i,u,a,o,c,l,s;return i=r[t],u=i[0],a=i[1],i=r[n],o=i[0],c=i[1],i=r[e],l=i[0],s=i[1],(s-a)*(o-u)-(c-a)*(l-u)>0}function Zi(t,n,e){return(e[0]-n[0])*(t[1]-n[1])<(e[1]-n[1])*(t[0]-n[0])}function Bi(t,n,e,r){var i=t[0],u=e[0],a=n[0]-i,o=r[0]-u,c=t[1],l=e[1],s=n[1]-c,f=r[1]-l,h=(o*(c-l)-f*(i-u))/(f*a-o*s);return[i+h*a,c+h*s]}function $i(t,n){var e={list:t.map(function(t,n){return{index:n,x:t[0],y:t[1]}}).sort(function(t,n){return t.y<n.y?-1:t.y>n.y?1:t.x<n.x?-1:t.x>n.x?1:0}),bottomSite:null},r={list:[],leftEnd:null,rightEnd:null,init:function(){r.leftEnd=r.createHalfEdge(null,"l"),r.rightEnd=r.createHalfEdge(null,"l"),r.leftEnd.r=r.rightEnd,r.rightEnd.l=r.leftEnd,r.list.unshift(r.leftEnd,r.rightEnd)},createHalfEdge:function(t,n){return{edge:t,side:n,vertex:null,l:null,r:null}},insert:function(t,n){n.l=t,n.r=t.r,t.r.l=n,t.r=n},leftBound:function(t){var n=r.leftEnd;do n=n.r;while(n!=r.rightEnd&&i.rightOf(n,t));return n=n.l},del:function(t){t.l.r=t.r,t.r.l=t.l,t.edge=null},right:function(t){return t.r},left:function(t){return t.l},leftRegion:function(t){return null==t.edge?e.bottomSite:t.edge.region[t.side]},rightRegion:function(t){return null==t.edge?e.bottomSite:t.edge.region[_o[t.side]]}},i={bisect:function(t,n){var e={region:{l:t,r:n},ep:{l:null,r:null}},r=n.x-t.x,i=n.y-t.y,u=r>0?r:-r,a=i>0?i:-i;return e.c=t.x*r+t.y*i+.5*(r*r+i*i),u>a?(e.a=1,e.b=i/r,e.c/=r):(e.b=1,e.a=r/i,e.c/=i),e},intersect:function(t,n){var e=t.edge,r=n.edge;if(!e||!r||e.region.r==r.region.r)return null;var i=e.a*r.b-e.b*r.a;if(1e-10>Math.abs(i))return null;var u,a,o=(e.c*r.b-r.c*e.b)/i,c=(r.c*e.a-e.c*r.a)/i,l=e.region.r,s=r.region.r;l.y<s.y||l.y==s.y&&l.x<s.x?(u=t,a=e):(u=n,a=r);var f=o>=a.region.r.x;return f&&"l"===u.side||!f&&"r"===u.side?null:{x:o,y:c}},rightOf:function(t,n){var e=t.edge,r=e.region.r,i=n.x>r.x;if(i&&"l"===t.side)return 1;if(!i&&"r"===t.side)return 0;if(1===e.a){var u=n.y-r.y,a=n.x-r.x,o=0,c=0;if(!i&&0>e.b||i&&e.b>=0?c=o=u>=e.b*a:(c=n.x+n.y*e.b>e.c,0>e.b&&(c=!c),c||(o=1)),!o){var l=r.x-e.region.l.x;c=e.b*(a*a-u*u)<l*u*(1+2*a/l+e.b*e.b),0>e.b&&(c=!c)}}else{var s=e.c-e.a*n.x,f=n.y-s,h=n.x-r.x,d=s-r.y;c=f*f>h*h+d*d}return"l"===t.side?c:!c},endPoint:function(t,e,r){t.ep[e]=r,t.ep[_o[e]]&&n(t)},distance:function(t,n){var e=t.x-n.x,r=t.y-n.y;return Math.sqrt(e*e+r*r)}},u={list:[],insert:function(t,n,e){t.vertex=n,t.ystar=n.y+e;for(var r=0,i=u.list,a=i.length;a>r;r++){var o=i[r];if(!(t.ystar>o.ystar||t.ystar==o.ystar&&n.x>o.vertex.x))break}i.splice(r,0,t)},del:function(t){for(var n=0,e=u.list,r=e.length;r>n&&e[n]!=t;++n);e.splice(n,1)},empty:function(){return 0===u.list.length},nextEvent:function(t){for(var n=0,e=u.list,r=e.length;r>n;++n)if(e[n]==t)return e[n+1];return null},min:function(){var t=u.list[0];return{x:t.vertex.x,y:t.ystar}},extractMin:function(){return u.list.shift()}};r.init(),e.bottomSite=e.list.shift();for(var a,o,c,l,s,f,h,d,g,p,m,v,y,M=e.list.shift();;)if(u.empty()||(a=u.min()),M&&(u.empty()||M.y<a.y||M.y==a.y&&M.x<a.x))o=r.leftBound(M),c=r.right(o),h=r.rightRegion(o),v=i.bisect(h,M),f=r.createHalfEdge(v,"l"),r.insert(o,f),p=i.intersect(o,f),p&&(u.del(o),u.insert(o,p,i.distance(p,M))),o=f,f=r.createHalfEdge(v,"r"),r.insert(o,f),p=i.intersect(f,c),p&&u.insert(f,p,i.distance(p,M)),M=e.list.shift();else{if(u.empty())break;o=u.extractMin(),l=r.left(o),c=r.right(o),s=r.right(c),h=r.leftRegion(o),d=r.rightRegion(c),m=o.vertex,i.endPoint(o.edge,o.side,m),i.endPoint(c.edge,c.side,m),r.del(o),u.del(c),r.del(c),y="l",h.y>d.y&&(g=h,h=d,d=g,y="r"),v=i.bisect(h,d),f=r.createHalfEdge(v,y),r.insert(l,f),i.endPoint(v,_o[y],m),p=i.intersect(l,f),p&&(u.del(l),u.insert(l,p,i.distance(p,h))),p=i.intersect(f,s),p&&u.insert(f,p,i.distance(p,h))}for(o=r.right(r.leftEnd);o!=r.rightEnd;o=r.right(o))n(o.edge)}function Ji(){return{leaf:!0,nodes:[],point:null}}function Gi(t,n,e,r,i,u){if(!t(n,e,r,i,u)){var a=.5*(e+i),o=.5*(r+u),c=n.nodes;c[0]&&Gi(t,c[0],e,r,a,o),c[1]&&Gi(t,c[1],a,r,i,o),c[2]&&Gi(t,c[2],e,o,a,u),c[3]&&Gi(t,c[3],a,o,i,u)}}function Ki(){this._=new Date(arguments.length>1?Date.UTC.apply(this,arguments):arguments[0])}function Wi(t,n,e,r){for(var i,u,a=0,o=n.length,c=e.length;o>a;){if(r>=c)return-1;if(i=n.charCodeAt(a++),37===i){if(u=Yo[n.charAt(a++)],!u||0>(r=u(t,e,r)))return-1}else if(i!=e.charCodeAt(r++))return-1}return r}function Qi(t){return RegExp("^(?:"+t.map(d3.requote).join("|")+")","i")}function tu(t){for(var n=new u,e=-1,r=t.length;r>++e;)n.set(t[e].toLowerCase(),e);return n}function nu(t,n,e){t+="";var r=t.length;return e>r?Array(e-r+1).join(n)+t:t}function eu(t,n,e){Lo.lastIndex=0;var r=Lo.exec(n.substring(e));return r?e+=r[0].length:-1}function ru(t,n,e){Do.lastIndex=0;var r=Do.exec(n.substring(e));return r?e+=r[0].length:-1}function iu(t,n,e){Ro.lastIndex=0;var r=Ro.exec(n.substring(e));return r?(t.m=Po.get(r[0].toLowerCase()),e+=r[0].length):-1}function uu(t,n,e){Fo.lastIndex=0;var r=Fo.exec(n.substring(e));return r?(t.m=Ho.get(r[0].toLowerCase()),e+=r[0].length):-1}function au(t,n,e){return Wi(t,""+Oo.c,n,e)}function ou(t,n,e){return Wi(t,""+Oo.x,n,e)}function cu(t,n,e){return Wi(t,""+Oo.X,n,e)}function lu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+4));return r?(t.y=+r[0],e+=r[0].length):-1}function su(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.y=fu(+r[0]),e+=r[0].length):-1}function fu(t){return t+(t>68?1900:2e3)}function hu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.m=r[0]-1,e+=r[0].length):-1}function du(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.d=+r[0],e+=r[0].length):-1}function gu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.H=+r[0],e+=r[0].length):-1}function pu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.M=+r[0],e+=r[0].length):-1}function mu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+2));return r?(t.S=+r[0],e+=r[0].length):-1}function vu(t,n,e){Uo.lastIndex=0;var r=Uo.exec(n.substring(e,e+3));return r?(t.L=+r[0],e+=r[0].length):-1}function yu(t,n,e){var r=Io.get(n.substring(e,e+=2).toLowerCase());return null==r?-1:(t.p=r,e)}function Mu(t){var n=t.getTimezoneOffset(),e=n>0?"-":"+",r=~~(Math.abs(n)/60),i=Math.abs(n)%60;return e+nu(r,"0",2)+nu(i,"0",2)}function bu(t){return t.toISOString()}function xu(t,n,e){function r(n){var e=t(n),r=u(e,1);return r-n>n-e?e:r}function i(e){return n(e=t(new wo(e-1)),1),e}function u(t,e){return n(t=new wo(+t),e),t}function a(t,r,u){var a=i(t),o=[];if(u>1)for(;r>a;)e(a)%u||o.push(new Date(+a)),n(a,1);else for(;r>a;)o.push(new Date(+a)),n(a,1);return o}function o(t,n,e){try{wo=Ki;var r=new Ki;return r._=t,a(r,n,e)}finally{wo=Date}}t.floor=t,t.round=r,t.ceil=i,t.offset=u,t.range=a;var c=t.utc=_u(t);return c.floor=c,c.round=_u(r),c.ceil=_u(i),c.offset=_u(u),c.range=o,t}function _u(t){return function(n,e){try{wo=Ki;var r=new Ki;return r._=n,t(r,e)._}finally{wo=Date}}}function wu(t,n,e){function r(n){return t(n)}return r.invert=function(n){return ku(t.invert(n))},r.domain=function(n){return arguments.length?(t.domain(n),r):t.domain().map(ku)},r.nice=function(t){return r.domain(Yn(r.domain(),function(){return t}))},r.ticks=function(e,i){var u=Su(r.domain());if("function"!=typeof e){var a=u[1]-u[0],o=a/e,c=d3.bisect(Xo,o);if(c==Xo.length)return n.year(u,e);if(!c)return t.ticks(e).map(ku);Math.log(o/Xo[c-1])<Math.log(Xo[c]/o)&&--c,e=n[c],i=e[1],e=e[0].range}return e(u[0],new Date(+u[1]+1),i)},r.tickFormat=function(){return e},r.copy=function(){return wu(t.copy(),n,e)},d3.rebind(r,t,"range","rangeRound","interpolate","clamp")}function Su(t){var n=t[0],e=t[t.length-1];return e>n?[n,e]:[e,n]}function ku(t){return new Date(t)}function Eu(t){return function(n){for(var e=t.length-1,r=t[e];!r[1](n);)r=t[--e];return r[0](n)}}function Au(t){var n=new Date(t,0,1);return n.setFullYear(t),n}function Nu(t){var n=t.getFullYear(),e=Au(n),r=Au(n+1);return n+(t-e)/(r-e)}function Tu(t){var n=new Date(Date.UTC(t,0,1));return n.setUTCFullYear(t),n}function qu(t){var n=t.getUTCFullYear(),e=Tu(n),r=Tu(n+1);return n+(t-e)/(r-e)}var Cu=".",zu=",",Du=[3,3];Date.now||(Date.now=function(){return+new Date});try{document.createElement("div").style.setProperty("opacity",0,"")}catch(Lu){var Fu=CSSStyleDeclaration.prototype,Hu=Fu.setProperty;Fu.setProperty=function(t,n,e){Hu.call(this,t,n+"",e)}}d3={version:"3.0.4"};var Ru=Math.PI,Pu=1e-6,ju=Ru/180,Ou=180/Ru,Yu=i;try{Yu(document.documentElement.childNodes)[0].nodeType}catch(Uu){Yu=r}var Iu=[].__proto__?function(t,n){t.__proto__=n}:function(t,n){for(var e in n)t[e]=n[e]};d3.map=function(t){var n=new u;for(var e in t)n.set(e,t[e]);return n},e(u,{has:function(t){return Vu+t in this},get:function(t){return this[Vu+t]},set:function(t,n){return this[Vu+t]=n},remove:function(t){return t=Vu+t,t in this&&delete this[t]},keys:function(){var t=[];return this.forEach(function(n){t.push(n)}),t},values:function(){var t=[];return this.forEach(function(n,e){t.push(e)}),t},entries:function(){var t=[];return this.forEach(function(n,e){t.push({key:n,value:e})}),t},forEach:function(t){for(var n in this)n.charCodeAt(0)===Xu&&t.call(this,n.substring(1),this[n])}});var Vu="\0",Xu=Vu.charCodeAt(0);d3.functor=c,d3.rebind=function(t,n){for(var e,r=1,i=arguments.length;i>++r;)t[e=arguments[r]]=l(t,n,n[e]);return t},d3.ascending=function(t,n){return n>t?-1:t>n?1:t>=n?0:0/0},d3.descending=function(t,n){return t>n?-1:n>t?1:n>=t?0:0/0},d3.mean=function(t,n){var e,r=t.length,i=0,u=-1,a=0;if(1===arguments.length)for(;r>++u;)s(e=t[u])&&(i+=(e-i)/++a);else for(;r>++u;)s(e=n.call(t,t[u],u))&&(i+=(e-i)/++a);return a?i:void 0},d3.median=function(t,n){return arguments.length>1&&(t=t.map(n)),t=t.filter(s),t.length?d3.quantile(t.sort(d3.ascending),.5):void 0},d3.min=function(t,n){var e,r,i=-1,u=t.length;if(1===arguments.length){for(;u>++i&&(null==(e=t[i])||e!=e);)e=void 0;for(;u>++i;)null!=(r=t[i])&&e>r&&(e=r)}else{for(;u>++i&&(null==(e=n.call(t,t[i],i))||e!=e);)e=void 0;for(;u>++i;)null!=(r=n.call(t,t[i],i))&&e>r&&(e=r)}return e},d3.max=function(t,n){var e,r,i=-1,u=t.length;if(1===arguments.length){for(;u>++i&&(null==(e=t[i])||e!=e);)e=void 0;for(;u>++i;)null!=(r=t[i])&&r>e&&(e=r)}else{for(;u>++i&&(null==(e=n.call(t,t[i],i))||e!=e);)e=void 0;for(;u>++i;)null!=(r=n.call(t,t[i],i))&&r>e&&(e=r)}return e},d3.extent=function(t,n){var e,r,i,u=-1,a=t.length;if(1===arguments.length){for(;a>++u&&(null==(e=i=t[u])||e!=e);)e=i=void 0;for(;a>++u;)null!=(r=t[u])&&(e>r&&(e=r),r>i&&(i=r))}else{for(;a>++u&&(null==(e=i=n.call(t,t[u],u))||e!=e);)e=void 0;for(;a>++u;)null!=(r=n.call(t,t[u],u))&&(e>r&&(e=r),r>i&&(i=r))}return[e,i]},d3.random={normal:function(t,n){var e=arguments.length;return 2>e&&(n=1),1>e&&(t=0),function(){var e,r,i;do e=2*Math.random()-1,r=2*Math.random()-1,i=e*e+r*r;while(!i||i>1);return t+n*e*Math.sqrt(-2*Math.log(i)/i)}},logNormal:function(t,n){var e=arguments.length;2>e&&(n=1),1>e&&(t=0);var r=d3.random.normal();return function(){return Math.exp(t+n*r())}},irwinHall:function(t){return function(){for(var n=0,e=0;t>e;e++)n+=Math.random();return n/t}}},d3.sum=function(t,n){var e,r=0,i=t.length,u=-1;if(1===arguments.length)for(;i>++u;)isNaN(e=+t[u])||(r+=e);else for(;i>++u;)isNaN(e=+n.call(t,t[u],u))||(r+=e);return r},d3.quantile=function(t,n){var e=(t.length-1)*n+1,r=Math.floor(e),i=+t[r-1],u=e-r;return u?i+u*(t[r]-i):i},d3.shuffle=function(t){for(var n,e,r=t.length;r;)e=0|Math.random()*r--,n=t[r],t[r]=t[e],t[e]=n;return t},d3.transpose=function(t){return d3.zip.apply(d3,t)},d3.zip=function(){if(!(r=arguments.length))return[];for(var t=-1,n=d3.min(arguments,f),e=Array(n);n>++t;)for(var r,i=-1,u=e[t]=Array(r);r>++i;)u[i]=arguments[i][t];return e},d3.bisector=function(t){return{left:function(n,e,r,i){for(3>arguments.length&&(r=0),4>arguments.length&&(i=n.length);i>r;){var u=r+i>>>1;e>t.call(n,n[u],u)?r=u+1:i=u}return r},right:function(n,e,r,i){for(3>arguments.length&&(r=0),4>arguments.length&&(i=n.length);i>r;){var u=r+i>>>1;t.call(n,n[u],u)>e?i=u:r=u+1}return r}}};var Zu=d3.bisector(function(t){return t});d3.bisectLeft=Zu.left,d3.bisect=d3.bisectRight=Zu.right,d3.nest=function(){function t(n,o){if(o>=a.length)return r?r.call(i,n):e?n.sort(e):n;for(var c,l,s,f=-1,h=n.length,d=a[o++],g=new u,p={};h>++f;)(s=g.get(c=d(l=n[f])))?s.push(l):g.set(c,[l]);return g.forEach(function(n,e){p[n]=t(e,o)}),p}function n(t,e){if(e>=a.length)return t;var r,i=[],u=o[e++];for(r in t)i.push({key:r,values:n(t[r],e)});return u&&i.sort(function(t,n){return u(t.key,n.key)}),i}var e,r,i={},a=[],o=[];return i.map=function(n){return t(n,0)},i.entries=function(e){return n(t(e,0),0)},i.key=function(t){return a.push(t),i},i.sortKeys=function(t){return o[a.length-1]=t,i},i.sortValues=function(t){return e=t,i},i.rollup=function(t){return r=t,i},i},d3.keys=function(t){var n=[];for(var e in t)n.push(e);return n},d3.values=function(t){var n=[];for(var e in t)n.push(t[e]);return n},d3.entries=function(t){var n=[];for(var e in t)n.push({key:e,value:t[e]});return n},d3.permute=function(t,n){for(var e=[],r=-1,i=n.length;i>++r;)e[r]=t[n[r]];return e},d3.merge=function(t){return Array.prototype.concat.apply([],t)},d3.range=function(t,n,e){if(3>arguments.length&&(e=1,2>arguments.length&&(n=t,t=0)),1/0===(n-t)/e)throw Error("infinite range");var r,i=[],u=d(Math.abs(e)),a=-1;if(t*=u,n*=u,e*=u,0>e)for(;(r=t+e*++a)>n;)i.push(r/u);else for(;n>(r=t+e*++a);)i.push(r/u);return i},d3.requote=function(t){return t.replace(Bu,"\\$&")};var Bu=/[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g;d3.round=function(t,n){return n?Math.round(t*(n=Math.pow(10,n)))/n:Math.round(t)},d3.xhr=function(t,n,e){function r(){var t=l.status;!t&&l.responseText||t>=200&&300>t||304===t?u.load.call(i,c.call(i,l)):u.error.call(i,l)}var i={},u=d3.dispatch("progress","load","error"),o={},c=a,l=new(window.XDomainRequest&&/^(http(s)?:)?\/\//.test(t)?XDomainRequest:XMLHttpRequest);return"onload"in l?l.onload=l.onerror=r:l.onreadystatechange=function(){l.readyState>3&&r()},l.onprogress=function(t){var n=d3.event;d3.event=t;try{u.progress.call(i,l)}finally{d3.event=n}},i.header=function(t,n){return t=(t+"").toLowerCase(),2>arguments.length?o[t]:(null==n?delete o[t]:o[t]=n+"",i)},i.mimeType=function(t){return arguments.length?(n=null==t?null:t+"",i):n},i.response=function(t){return c=t,i},["get","post"].forEach(function(t){i[t]=function(){return i.send.apply(i,[t].concat(Yu(arguments)))}}),i.send=function(e,r,u){if(2===arguments.length&&"function"==typeof r&&(u=r,r=null),l.open(e,t,!0),null==n||"accept"in o||(o.accept=n+",*/*"),l.setRequestHeader)for(var a in o)l.setRequestHeader(a,o[a]);return null!=n&&l.overrideMimeType&&l.overrideMimeType(n),null!=u&&i.on("error",u).on("load",function(t){u(null,t)}),l.send(null==r?null:r),i},i.abort=function(){return l.abort(),i},d3.rebind(i,u,"on"),2===arguments.length&&"function"==typeof n&&(e=n,n=null),null==e?i:i.get(g(e))},d3.text=function(){return d3.xhr.apply(d3,arguments).response(p)},d3.json=function(t,n){return d3.xhr(t,"application/json",n).response(m)},d3.html=function(t,n){return d3.xhr(t,"text/html",n).response(v)},d3.xml=function(){return d3.xhr.apply(d3,arguments).response(y)};var $u={svg:"http://www.w3.org/2000/svg",xhtml:"http://www.w3.org/1999/xhtml",xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};d3.ns={prefix:$u,qualify:function(t){var n=t.indexOf(":"),e=t;return n>=0&&(e=t.substring(0,n),t=t.substring(n+1)),$u.hasOwnProperty(e)?{space:$u[e],local:t}:t}},d3.dispatch=function(){for(var t=new M,n=-1,e=arguments.length;e>++n;)t[arguments[n]]=b(t);return t},M.prototype.on=function(t,n){var e=t.indexOf("."),r="";return e>0&&(r=t.substring(e+1),t=t.substring(0,e)),2>arguments.length?this[t].on(r):this[t].on(r,n)},d3.format=function(t){var n=Ju.exec(t),e=n[1]||" ",r=n[2]||">",i=n[3]||"",u=n[4]||"",a=n[5],o=+n[6],c=n[7],l=n[8],s=n[9],f=1,h="",d=!1;switch(l&&(l=+l.substring(1)),(a||"0"===e&&"="===r)&&(a=e="0",r="=",c&&(o-=Math.floor((o-1)/4))),s){case"n":c=!0,s="g";break;case"%":f=100,h="%",s="f";break;case"p":f=100,h="%",s="r";break;case"b":case"o":case"x":case"X":u&&(u="0"+s.toLowerCase());case"c":case"d":d=!0,l=0;break;case"s":f=-1,s="r"}"#"===u&&(u=""),"r"!=s||l||(s="g"),s=Gu.get(s)||_;var g=a&&c;return function(t){if(d&&t%1)return"";var n=0>t||0===t&&0>1/t?(t=-t,"-"):i;if(0>f){var p=d3.formatPrefix(t,l);t=p.scale(t),h=p.symbol}else t*=f;t=s(t,l),!a&&c&&(t=Ku(t));var m=u.length+t.length+(g?0:n.length),v=o>m?Array(m=o-m+1).join(e):"";return g&&(t=Ku(v+t)),Cu&&t.replace(".",Cu),n+=u,("<"===r?n+t+v:">"===r?v+n+t:"^"===r?v.substring(0,m>>=1)+n+t+v.substring(m):n+(g?t:v+t))+h}};var Ju=/(?:([^{])?([<>=^]))?([+\- ])?(#)?(0)?([0-9]+)?(,)?(\.[0-9]+)?([a-zA-Z%])?/,Gu=d3.map({b:function(t){return t.toString(2)},c:function(t){return String.fromCharCode(t)},o:function(t){return t.toString(8)},x:function(t){return t.toString(16)},X:function(t){return t.toString(16).toUpperCase()},g:function(t,n){return t.toPrecision(n)},e:function(t,n){return t.toExponential(n)},f:function(t,n){return t.toFixed(n)},r:function(t,n){return d3.round(t,n=x(t,n)).toFixed(Math.max(0,Math.min(20,n)))}}),Ku=a;if(Du){var Wu=Du.length;Ku=function(t){for(var n=t.lastIndexOf("."),e=n>=0?"."+t.substring(n+1):(n=t.length,""),r=[],i=0,u=Du[0];n>0&&u>0;)r.push(t.substring(n-=u,n+u)),u=Du[i=(i+1)%Wu];return r.reverse().join(zu||"")+e}}var Qu=["y","z","a","f","p","n","μ","m","","k","M","G","T","P","E","Z","Y"].map(w);d3.formatPrefix=function(t,n){var e=0;return t&&(0>t&&(t*=-1),n&&(t=d3.round(t,x(t,n))),e=1+Math.floor(1e-12+Math.log(t)/Math.LN10),e=Math.max(-24,Math.min(24,3*Math.floor((0>=e?e+1:e-1)/3)))),Qu[8+e/3]};var ta=function(){return a},na=d3.map({linear:ta,poly:q,quad:function(){return A},cubic:function(){return N},sin:function(){return C},exp:function(){return z},circle:function(){return D},elastic:L,back:F,bounce:function(){return H}}),ea=d3.map({"in":a,out:k,"in-out":E,"out-in":function(t){return E(k(t))}});d3.ease=function(t){var n=t.indexOf("-"),e=n>=0?t.substring(0,n):t,r=n>=0?t.substring(n+1):"in";return e=na.get(e)||ta,r=ea.get(r)||a,S(r(e.apply(null,Array.prototype.slice.call(arguments,1))))},d3.event=null,d3.transform=function(t){var n=document.createElementNS(d3.ns.prefix.svg,"g");return(d3.transform=function(t){n.setAttribute("transform",t);var e=n.transform.baseVal.consolidate();return new O(e?e.matrix:ra)})(t)},O.prototype.toString=function(){return"translate("+this.translate+")rotate("+this.rotate+")skewX("+this.skew+")scale("+this.scale+")"};var ra={a:1,b:0,c:0,d:1,e:0,f:0};d3.interpolate=function(t,n){for(var e,r=d3.interpolators.length;--r>=0&&!(e=d3.interpolators[r](t,n)););return e},d3.interpolateNumber=function(t,n){return n-=t,function(e){return t+n*e}},d3.interpolateRound=function(t,n){return n-=t,function(e){return Math.round(t+n*e)}},d3.interpolateString=function(t,n){var e,r,i,u,a,o=0,c=0,l=[],s=[];for(ia.lastIndex=0,r=0;e=ia.exec(n);++r)e.index&&l.push(n.substring(o,c=e.index)),s.push({i:l.length,x:e[0]}),l.push(null),o=ia.lastIndex;for(n.length>o&&l.push(n.substring(o)),r=0,u=s.length;(e=ia.exec(t))&&u>r;++r)if(a=s[r],a.x==e[0]){if(a.i)if(null==l[a.i+1])for(l[a.i-1]+=a.x,l.splice(a.i,1),i=r+1;u>i;++i)s[i].i--;else for(l[a.i-1]+=a.x+l[a.i+1],l.splice(a.i,2),i=r+1;u>i;++i)s[i].i-=2;else if(null==l[a.i+1])l[a.i]=a.x;else for(l[a.i]=a.x+l[a.i+1],l.splice(a.i+1,1),i=r+1;u>i;++i)s[i].i--;s.splice(r,1),u--,r--}else a.x=d3.interpolateNumber(parseFloat(e[0]),parseFloat(a.x));for(;u>r;)a=s.pop(),null==l[a.i+1]?l[a.i]=a.x:(l[a.i]=a.x+l[a.i+1],l.splice(a.i+1,1)),u--;return 1===l.length?null==l[0]?s[0].x:function(){return n}:function(t){for(r=0;u>r;++r)l[(a=s[r]).i]=a.x(t);return l.join("")}},d3.interpolateTransform=function(t,n){var e,r=[],i=[],u=d3.transform(t),a=d3.transform(n),o=u.translate,c=a.translate,l=u.rotate,s=a.rotate,f=u.skew,h=a.skew,d=u.scale,g=a.scale;return o[0]!=c[0]||o[1]!=c[1]?(r.push("translate(",null,",",null,")"),i.push({i:1,x:d3.interpolateNumber(o[0],c[0])},{i:3,x:d3.interpolateNumber(o[1],c[1])})):c[0]||c[1]?r.push("translate("+c+")"):r.push(""),l!=s?(l-s>180?s+=360:s-l>180&&(l+=360),i.push({i:r.push(r.pop()+"rotate(",null,")")-2,x:d3.interpolateNumber(l,s)})):s&&r.push(r.pop()+"rotate("+s+")"),f!=h?i.push({i:r.push(r.pop()+"skewX(",null,")")-2,x:d3.interpolateNumber(f,h)}):h&&r.push(r.pop()+"skewX("+h+")"),d[0]!=g[0]||d[1]!=g[1]?(e=r.push(r.pop()+"scale(",null,",",null,")"),i.push({i:e-4,x:d3.interpolateNumber(d[0],g[0])},{i:e-2,x:d3.interpolateNumber(d[1],g[1])})):(1!=g[0]||1!=g[1])&&r.push(r.pop()+"scale("+g+")"),e=i.length,function(t){for(var n,u=-1;e>++u;)r[(n=i[u]).i]=n.x(t);return r.join("")}},d3.interpolateRgb=function(t,n){t=d3.rgb(t),n=d3.rgb(n);var e=t.r,r=t.g,i=t.b,u=n.r-e,a=n.g-r,o=n.b-i;return function(t){return"#"+G(Math.round(e+u*t))+G(Math.round(r+a*t))+G(Math.round(i+o*t))}},d3.interpolateHsl=function(t,n){t=d3.hsl(t),n=d3.hsl(n);var e=t.h,r=t.s,i=t.l,u=n.h-e,a=n.s-r,o=n.l-i;return u>180?u-=360:-180>u&&(u+=360),function(t){return un(e+u*t,r+a*t,i+o*t)+""}},d3.interpolateLab=function(t,n){t=d3.lab(t),n=d3.lab(n);var e=t.l,r=t.a,i=t.b,u=n.l-e,a=n.a-r,o=n.b-i;return function(t){return fn(e+u*t,r+a*t,i+o*t)+""}},d3.interpolateHcl=function(t,n){t=d3.hcl(t),n=d3.hcl(n);var e=t.h,r=t.c,i=t.l,u=n.h-e,a=n.c-r,o=n.l-i;return u>180?u-=360:-180>u&&(u+=360),function(t){return cn(e+u*t,r+a*t,i+o*t)+""}},d3.interpolateArray=function(t,n){var e,r=[],i=[],u=t.length,a=n.length,o=Math.min(t.length,n.length);for(e=0;o>e;++e)r.push(d3.interpolate(t[e],n[e]));for(;u>e;++e)i[e]=t[e];for(;a>e;++e)i[e]=n[e];return function(t){for(e=0;o>e;++e)i[e]=r[e](t);return i}},d3.interpolateObject=function(t,n){var e,r={},i={};for(e in t)e in n?r[e]=V(e)(t[e],n[e]):i[e]=t[e];for(e in n)e in t||(i[e]=n[e]);return function(t){for(e in r)i[e]=r[e](t);return i}};var ia=/[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g;d3.interpolators=[d3.interpolateObject,function(t,n){return n instanceof Array&&d3.interpolateArray(t,n)},function(t,n){return("string"==typeof t||"string"==typeof n)&&d3.interpolateString(t+"",n+"")},function(t,n){return("string"==typeof n?aa.has(n)||/^(#|rgb\(|hsl\()/.test(n):n instanceof B)&&d3.interpolateRgb(t,n)},function(t,n){return!isNaN(t=+t)&&!isNaN(n=+n)&&d3.interpolateNumber(t,n)}],B.prototype.toString=function(){return this.rgb()+""},d3.rgb=function(t,n,e){return 1===arguments.length?t instanceof J?$(t.r,t.g,t.b):K(""+t,$,un):$(~~t,~~n,~~e)};var ua=J.prototype=new B;ua.brighter=function(t){t=Math.pow(.7,arguments.length?t:1);var n=this.r,e=this.g,r=this.b,i=30;return n||e||r?(n&&i>n&&(n=i),e&&i>e&&(e=i),r&&i>r&&(r=i),$(Math.min(255,Math.floor(n/t)),Math.min(255,Math.floor(e/t)),Math.min(255,Math.floor(r/t)))):$(i,i,i)},ua.darker=function(t){return t=Math.pow(.7,arguments.length?t:1),$(Math.floor(t*this.r),Math.floor(t*this.g),Math.floor(t*this.b))
+},ua.hsl=function(){return W(this.r,this.g,this.b)},ua.toString=function(){return"#"+G(this.r)+G(this.g)+G(this.b)};var aa=d3.map({aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"});aa.forEach(function(t,n){aa.set(t,K(n,$,un))}),d3.hsl=function(t,n,e){return 1===arguments.length?t instanceof rn?en(t.h,t.s,t.l):K(""+t,W,en):en(+t,+n,+e)};var oa=rn.prototype=new B;oa.brighter=function(t){return t=Math.pow(.7,arguments.length?t:1),en(this.h,this.s,this.l/t)},oa.darker=function(t){return t=Math.pow(.7,arguments.length?t:1),en(this.h,this.s,t*this.l)},oa.rgb=function(){return un(this.h,this.s,this.l)},d3.hcl=function(t,n,e){return 1===arguments.length?t instanceof on?an(t.h,t.c,t.l):t instanceof sn?hn(t.l,t.a,t.b):hn((t=Q((t=d3.rgb(t)).r,t.g,t.b)).l,t.a,t.b):an(+t,+n,+e)};var ca=on.prototype=new B;ca.brighter=function(t){return an(this.h,this.c,Math.min(100,this.l+la*(arguments.length?t:1)))},ca.darker=function(t){return an(this.h,this.c,Math.max(0,this.l-la*(arguments.length?t:1)))},ca.rgb=function(){return cn(this.h,this.c,this.l).rgb()},d3.lab=function(t,n,e){return 1===arguments.length?t instanceof sn?ln(t.l,t.a,t.b):t instanceof on?cn(t.l,t.c,t.h):Q((t=d3.rgb(t)).r,t.g,t.b):ln(+t,+n,+e)};var la=18,sa=.95047,fa=1,ha=1.08883,da=sn.prototype=new B;da.brighter=function(t){return ln(Math.min(100,this.l+la*(arguments.length?t:1)),this.a,this.b)},da.darker=function(t){return ln(Math.max(0,this.l-la*(arguments.length?t:1)),this.a,this.b)},da.rgb=function(){return fn(this.l,this.a,this.b)};var ga=function(t,n){return n.querySelector(t)},pa=function(t,n){return n.querySelectorAll(t)},ma=document.documentElement,va=ma.matchesSelector||ma.webkitMatchesSelector||ma.mozMatchesSelector||ma.msMatchesSelector||ma.oMatchesSelector,ya=function(t,n){return va.call(t,n)};"function"==typeof Sizzle&&(ga=function(t,n){return Sizzle(t,n)[0]||null},pa=function(t,n){return Sizzle.uniqueSort(Sizzle(t,n))},ya=Sizzle.matchesSelector);var Ma=[];d3.selection=function(){return ba},d3.selection.prototype=Ma,Ma.select=function(t){var n,e,r,i,u=[];"function"!=typeof t&&(t=vn(t));for(var a=-1,o=this.length;o>++a;){u.push(n=[]),n.parentNode=(r=this[a]).parentNode;for(var c=-1,l=r.length;l>++c;)(i=r[c])?(n.push(e=t.call(i,i.__data__,c)),e&&"__data__"in i&&(e.__data__=i.__data__)):n.push(null)}return mn(u)},Ma.selectAll=function(t){var n,e,r=[];"function"!=typeof t&&(t=yn(t));for(var i=-1,u=this.length;u>++i;)for(var a=this[i],o=-1,c=a.length;c>++o;)(e=a[o])&&(r.push(n=Yu(t.call(e,e.__data__,o))),n.parentNode=e);return mn(r)},Ma.attr=function(t,n){if(2>arguments.length){if("string"==typeof t){var e=this.node();return t=d3.ns.qualify(t),t.local?e.getAttributeNS(t.space,t.local):e.getAttribute(t)}for(n in t)this.each(Mn(n,t[n]));return this}return this.each(Mn(t,n))},Ma.classed=function(t,n){if(2>arguments.length){if("string"==typeof t){var e=this.node(),r=(t=t.trim().split(/^|\s+/g)).length,i=-1;if(n=e.classList){for(;r>++i;)if(!n.contains(t[i]))return!1}else for(n=e.className,null!=n.baseVal&&(n=n.baseVal);r>++i;)if(!bn(t[i]).test(n))return!1;return!0}for(n in t)this.each(xn(n,t[n]));return this}return this.each(xn(t,n))},Ma.style=function(t,n,e){var r=arguments.length;if(3>r){if("string"!=typeof t){2>r&&(n="");for(e in t)this.each(wn(e,t[e],n));return this}if(2>r)return getComputedStyle(this.node(),null).getPropertyValue(t);e=""}return this.each(wn(t,n,e))},Ma.property=function(t,n){if(2>arguments.length){if("string"==typeof t)return this.node()[t];for(n in t)this.each(Sn(n,t[n]));return this}return this.each(Sn(t,n))},Ma.text=function(t){return arguments.length?this.each("function"==typeof t?function(){var n=t.apply(this,arguments);this.textContent=null==n?"":n}:null==t?function(){this.textContent=""}:function(){this.textContent=t}):this.node().textContent},Ma.html=function(t){return arguments.length?this.each("function"==typeof t?function(){var n=t.apply(this,arguments);this.innerHTML=null==n?"":n}:null==t?function(){this.innerHTML=""}:function(){this.innerHTML=t}):this.node().innerHTML},Ma.append=function(t){function n(){return this.appendChild(document.createElementNS(this.namespaceURI,t))}function e(){return this.appendChild(document.createElementNS(t.space,t.local))}return t=d3.ns.qualify(t),this.select(t.local?e:n)},Ma.insert=function(t,n){function e(){return this.insertBefore(document.createElementNS(this.namespaceURI,t),ga(n,this))}function r(){return this.insertBefore(document.createElementNS(t.space,t.local),ga(n,this))}return t=d3.ns.qualify(t),this.select(t.local?r:e)},Ma.remove=function(){return this.each(function(){var t=this.parentNode;t&&t.removeChild(this)})},Ma.data=function(t,n){function e(t,e){var r,i,a,o=t.length,f=e.length,h=Math.min(o,f),d=Array(f),g=Array(f),p=Array(o);if(n){var m,v=new u,y=new u,M=[];for(r=-1;o>++r;)m=n.call(i=t[r],i.__data__,r),v.has(m)?p[r]=i:v.set(m,i),M.push(m);for(r=-1;f>++r;)m=n.call(e,a=e[r],r),(i=v.get(m))?(d[r]=i,i.__data__=a):y.has(m)||(g[r]=kn(a)),y.set(m,a),v.remove(m);for(r=-1;o>++r;)v.has(M[r])&&(p[r]=t[r])}else{for(r=-1;h>++r;)i=t[r],a=e[r],i?(i.__data__=a,d[r]=i):g[r]=kn(a);for(;f>r;++r)g[r]=kn(e[r]);for(;o>r;++r)p[r]=t[r]}g.update=d,g.parentNode=d.parentNode=p.parentNode=t.parentNode,c.push(g),l.push(d),s.push(p)}var r,i,a=-1,o=this.length;if(!arguments.length){for(t=Array(o=(r=this[0]).length);o>++a;)(i=r[a])&&(t[a]=i.__data__);return t}var c=qn([]),l=mn([]),s=mn([]);if("function"==typeof t)for(;o>++a;)e(r=this[a],t.call(r,r.parentNode.__data__,a));else for(;o>++a;)e(r=this[a],t);return l.enter=function(){return c},l.exit=function(){return s},l},Ma.datum=function(t){return arguments.length?this.property("__data__",t):this.property("__data__")},Ma.filter=function(t){var n,e,r,i=[];"function"!=typeof t&&(t=En(t));for(var u=0,a=this.length;a>u;u++){i.push(n=[]),n.parentNode=(e=this[u]).parentNode;for(var o=0,c=e.length;c>o;o++)(r=e[o])&&t.call(r,r.__data__,o)&&n.push(r)}return mn(i)},Ma.order=function(){for(var t=-1,n=this.length;n>++t;)for(var e,r=this[t],i=r.length-1,u=r[i];--i>=0;)(e=r[i])&&(u&&u!==e.nextSibling&&u.parentNode.insertBefore(e,u),u=e);return this},Ma.sort=function(t){t=An.apply(this,arguments);for(var n=-1,e=this.length;e>++n;)this[n].sort(t);return this.order()},Ma.on=function(t,n,e){var r=arguments.length;if(3>r){if("string"!=typeof t){2>r&&(n=!1);for(e in t)this.each(Nn(e,t[e],n));return this}if(2>r)return(r=this.node()["__on"+t])&&r._;e=!1}return this.each(Nn(t,n,e))},Ma.each=function(t){return Tn(this,function(n,e,r){t.call(n,n.__data__,e,r)})},Ma.call=function(t){var n=Yu(arguments);return t.apply(n[0]=this,n),this},Ma.empty=function(){return!this.node()},Ma.node=function(){for(var t=0,n=this.length;n>t;t++)for(var e=this[t],r=0,i=e.length;i>r;r++){var u=e[r];if(u)return u}return null},Ma.transition=function(){var t,n,e=_a||++Sa,r=[],i=Object.create(ka);i.time=Date.now();for(var u=-1,a=this.length;a>++u;){r.push(t=[]);for(var o=this[u],c=-1,l=o.length;l>++c;)(n=o[c])&&zn(n,c,e,i),t.push(n)}return Cn(r,e)};var ba=mn([[document]]);ba[0].parentNode=ma,d3.select=function(t){return"string"==typeof t?ba.select(t):mn([[t]])},d3.selectAll=function(t){return"string"==typeof t?ba.selectAll(t):mn([Yu(t)])};var xa=[];d3.selection.enter=qn,d3.selection.enter.prototype=xa,xa.append=Ma.append,xa.insert=Ma.insert,xa.empty=Ma.empty,xa.node=Ma.node,xa.select=function(t){for(var n,e,r,i,u,a=[],o=-1,c=this.length;c>++o;){r=(i=this[o]).update,a.push(n=[]),n.parentNode=i.parentNode;for(var l=-1,s=i.length;s>++l;)(u=i[l])?(n.push(r[l]=e=t.call(i.parentNode,u.__data__,l)),e.__data__=u.__data__):n.push(null)}return mn(a)};var _a,wa=[],Sa=0,ka={ease:T,delay:0,duration:250};wa.call=Ma.call,wa.empty=Ma.empty,wa.node=Ma.node,d3.transition=function(t){return arguments.length?_a?t.transition():t:ba.transition()},d3.transition.prototype=wa,wa.select=function(t){var n,e,r,i=this.id,u=[];"function"!=typeof t&&(t=vn(t));for(var a=-1,o=this.length;o>++a;){u.push(n=[]);for(var c=this[a],l=-1,s=c.length;s>++l;)(r=c[l])&&(e=t.call(r,r.__data__,l))?("__data__"in r&&(e.__data__=r.__data__),zn(e,l,i,r.__transition__[i]),n.push(e)):n.push(null)}return Cn(u,i)},wa.selectAll=function(t){var n,e,r,i,u,a=this.id,o=[];"function"!=typeof t&&(t=yn(t));for(var c=-1,l=this.length;l>++c;)for(var s=this[c],f=-1,h=s.length;h>++f;)if(r=s[f]){u=r.__transition__[a],e=t.call(r,r.__data__,f),o.push(n=[]);for(var d=-1,g=e.length;g>++d;)zn(i=e[d],d,a,u),n.push(i)}return Cn(o,a)},wa.filter=function(t){var n,e,r,i=[];"function"!=typeof t&&(t=En(t));for(var u=0,a=this.length;a>u;u++){i.push(n=[]);for(var e=this[u],o=0,c=e.length;c>o;o++)(r=e[o])&&t.call(r,r.__data__,o)&&n.push(r)}return Cn(i,this.id,this.time).ease(this.ease())},wa.attr=function(t,n){function e(){this.removeAttribute(u)}function r(){this.removeAttributeNS(u.space,u.local)}if(2>arguments.length){for(n in t)this.attr(n,t[n]);return this}var i=V(t),u=d3.ns.qualify(t);return Ln(this,"attr."+t,n,function(t){function n(){var n,e=this.getAttribute(u);return e!==t&&(n=i(e,t),function(t){this.setAttribute(u,n(t))})}function a(){var n,e=this.getAttributeNS(u.space,u.local);return e!==t&&(n=i(e,t),function(t){this.setAttributeNS(u.space,u.local,n(t))})}return null==t?u.local?r:e:(t+="",u.local?a:n)})},wa.attrTween=function(t,n){function e(t,e){var r=n.call(this,t,e,this.getAttribute(i));return r&&function(t){this.setAttribute(i,r(t))}}function r(t,e){var r=n.call(this,t,e,this.getAttributeNS(i.space,i.local));return r&&function(t){this.setAttributeNS(i.space,i.local,r(t))}}var i=d3.ns.qualify(t);return this.tween("attr."+t,i.local?r:e)},wa.style=function(t,n,e){function r(){this.style.removeProperty(t)}var i=arguments.length;if(3>i){if("string"!=typeof t){2>i&&(n="");for(e in t)this.style(e,t[e],n);return this}e=""}var u=V(t);return Ln(this,"style."+t,n,function(n){function i(){var r,i=getComputedStyle(this,null).getPropertyValue(t);return i!==n&&(r=u(i,n),function(n){this.style.setProperty(t,r(n),e)})}return null==n?r:(n+="",i)})},wa.styleTween=function(t,n,e){return 3>arguments.length&&(e=""),this.tween("style."+t,function(r,i){var u=n.call(this,r,i,getComputedStyle(this,null).getPropertyValue(t));return u&&function(n){this.style.setProperty(t,u(n),e)}})},wa.text=function(t){return Ln(this,"text",t,Dn)},wa.remove=function(){return this.each("end.transition",function(){var t;!this.__transition__&&(t=this.parentNode)&&t.removeChild(this)})},wa.ease=function(t){var n=this.id;return 1>arguments.length?this.node().__transition__[n].ease:("function"!=typeof t&&(t=d3.ease.apply(d3,arguments)),Tn(this,function(e){e.__transition__[n].ease=t}))},wa.delay=function(t){var n=this.id;return Tn(this,"function"==typeof t?function(e,r,i){e.__transition__[n].delay=0|t.call(e,e.__data__,r,i)}:(t|=0,function(e){e.__transition__[n].delay=t}))},wa.duration=function(t){var n=this.id;return Tn(this,"function"==typeof t?function(e,r,i){e.__transition__[n].duration=Math.max(1,0|t.call(e,e.__data__,r,i))}:(t=Math.max(1,0|t),function(e){e.__transition__[n].duration=t}))},wa.each=function(t,n){var e=this.id;if(2>arguments.length){var r=ka,i=_a;_a=e,Tn(this,function(n,r,i){ka=n.__transition__[e],t.call(n,n.__data__,r,i)}),ka=r,_a=i}else Tn(this,function(r){r.__transition__[e].event.on(t,n)});return this},wa.transition=function(){for(var t,n,e,r,i=this.id,u=++Sa,a=[],o=0,c=this.length;c>o;o++){a.push(t=[]);for(var n=this[o],l=0,s=n.length;s>l;l++)(e=n[l])&&(r=Object.create(e.__transition__[i]),r.delay+=r.duration,zn(e,l,u,r)),t.push(e)}return Cn(a,u)},wa.tween=function(t,n){var e=this.id;return 2>arguments.length?this.node().__transition__[e].tween.get(t):Tn(this,null==n?function(n){n.__transition__[e].tween.remove(t)}:function(r){r.__transition__[e].tween.set(t,n)})};var Ea,Aa,Na=0,Ta={},qa=null;d3.timer=function(t,n,e){if(3>arguments.length){if(2>arguments.length)n=0;else if(!isFinite(n))return;e=Date.now()}var r=Ta[t.id];r&&r.callback===t?(r.then=e,r.delay=n):Ta[t.id=++Na]=qa={callback:t,then:e,delay:n,next:qa},Ea||(Aa=clearTimeout(Aa),Ea=1,Ca(Fn))},d3.timer.flush=function(){for(var t,n=Date.now(),e=qa;e;)t=n-e.then,e.delay||(e.flush=e.callback(t)),e=e.next;Hn()};var Ca=window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){setTimeout(t,17)};d3.mouse=function(t){return Rn(t,P())};var za=/WebKit/.test(navigator.userAgent)?-1:0;d3.touches=function(t,n){return 2>arguments.length&&(n=P().touches),n?Yu(n).map(function(n){var e=Rn(t,n);return e.identifier=n.identifier,e}):[]},d3.scale={},d3.scale.linear=function(){return In([0,1],[0,1],d3.interpolate,!1)},d3.scale.log=function(){return Kn(d3.scale.linear(),Wn)};var Da=d3.format(".0e");Wn.pow=function(t){return Math.pow(10,t)},Qn.pow=function(t){return-Math.pow(10,-t)},d3.scale.pow=function(){return te(d3.scale.linear(),1)},d3.scale.sqrt=function(){return d3.scale.pow().exponent(.5)},d3.scale.ordinal=function(){return ee([],{t:"range",a:[[]]})},d3.scale.category10=function(){return d3.scale.ordinal().range(La)},d3.scale.category20=function(){return d3.scale.ordinal().range(Fa)},d3.scale.category20b=function(){return d3.scale.ordinal().range(Ha)},d3.scale.category20c=function(){return d3.scale.ordinal().range(Ra)};var La=["#1f77b4","#ff7f0e","#2ca02c","#d62728","#9467bd","#8c564b","#e377c2","#7f7f7f","#bcbd22","#17becf"],Fa=["#1f77b4","#aec7e8","#ff7f0e","#ffbb78","#2ca02c","#98df8a","#d62728","#ff9896","#9467bd","#c5b0d5","#8c564b","#c49c94","#e377c2","#f7b6d2","#7f7f7f","#c7c7c7","#bcbd22","#dbdb8d","#17becf","#9edae5"],Ha=["#393b79","#5254a3","#6b6ecf","#9c9ede","#637939","#8ca252","#b5cf6b","#cedb9c","#8c6d31","#bd9e39","#e7ba52","#e7cb94","#843c39","#ad494a","#d6616b","#e7969c","#7b4173","#a55194","#ce6dbd","#de9ed6"],Ra=["#3182bd","#6baed6","#9ecae1","#c6dbef","#e6550d","#fd8d3c","#fdae6b","#fdd0a2","#31a354","#74c476","#a1d99b","#c7e9c0","#756bb1","#9e9ac8","#bcbddc","#dadaeb","#636363","#969696","#bdbdbd","#d9d9d9"];d3.scale.quantile=function(){return re([],[])},d3.scale.quantize=function(){return ie(0,1,[0,1])},d3.scale.threshold=function(){return ue([.5],[0,1])},d3.scale.identity=function(){return ae([0,1])},d3.svg={},d3.svg.arc=function(){function t(){var t=n.apply(this,arguments),u=e.apply(this,arguments),a=r.apply(this,arguments)+Pa,o=i.apply(this,arguments)+Pa,c=(a>o&&(c=a,a=o,o=c),o-a),l=Ru>c?"0":"1",s=Math.cos(a),f=Math.sin(a),h=Math.cos(o),d=Math.sin(o);return c>=ja?t?"M0,"+u+"A"+u+","+u+" 0 1,1 0,"+-u+"A"+u+","+u+" 0 1,1 0,"+u+"M0,"+t+"A"+t+","+t+" 0 1,0 0,"+-t+"A"+t+","+t+" 0 1,0 0,"+t+"Z":"M0,"+u+"A"+u+","+u+" 0 1,1 0,"+-u+"A"+u+","+u+" 0 1,1 0,"+u+"Z":t?"M"+u*s+","+u*f+"A"+u+","+u+" 0 "+l+",1 "+u*h+","+u*d+"L"+t*h+","+t*d+"A"+t+","+t+" 0 "+l+",0 "+t*s+","+t*f+"Z":"M"+u*s+","+u*f+"A"+u+","+u+" 0 "+l+",1 "+u*h+","+u*d+"L0,0"+"Z"}var n=oe,e=ce,r=le,i=se;return t.innerRadius=function(e){return arguments.length?(n=c(e),t):n},t.outerRadius=function(n){return arguments.length?(e=c(n),t):e},t.startAngle=function(n){return arguments.length?(r=c(n),t):r},t.endAngle=function(n){return arguments.length?(i=c(n),t):i},t.centroid=function(){var t=(n.apply(this,arguments)+e.apply(this,arguments))/2,u=(r.apply(this,arguments)+i.apply(this,arguments))/2+Pa;return[Math.cos(u)*t,Math.sin(u)*t]},t};var Pa=-Ru/2,ja=2*Ru-1e-6;d3.svg.line=function(){return fe(a)};var Oa=d3.map({linear:ge,"linear-closed":pe,"step-before":me,"step-after":ve,basis:we,"basis-open":Se,"basis-closed":ke,bundle:Ee,cardinal:be,"cardinal-open":ye,"cardinal-closed":Me,monotone:ze});Oa.forEach(function(t,n){n.key=t,n.closed=/-closed$/.test(t)});var Ya=[0,2/3,1/3,0],Ua=[0,1/3,2/3,0],Ia=[0,1/6,2/3,1/6];d3.svg.line.radial=function(){var t=fe(De);return t.radius=t.x,delete t.x,t.angle=t.y,delete t.y,t},me.reverse=ve,ve.reverse=me,d3.svg.area=function(){return Le(a)},d3.svg.area.radial=function(){var t=Le(De);return t.radius=t.x,delete t.x,t.innerRadius=t.x0,delete t.x0,t.outerRadius=t.x1,delete t.x1,t.angle=t.y,delete t.y,t.startAngle=t.y0,delete t.y0,t.endAngle=t.y1,delete t.y1,t},d3.svg.chord=function(){function e(t,n){var e=r(this,o,t,n),c=r(this,l,t,n);return"M"+e.p0+u(e.r,e.p1,e.a1-e.a0)+(i(e,c)?a(e.r,e.p1,e.r,e.p0):a(e.r,e.p1,c.r,c.p0)+u(c.r,c.p1,c.a1-c.a0)+a(c.r,c.p1,e.r,e.p0))+"Z"}function r(t,n,e,r){var i=n.call(t,e,r),u=s.call(t,i,r),a=f.call(t,i,r)+Pa,o=h.call(t,i,r)+Pa;return{r:u,a0:a,a1:o,p0:[u*Math.cos(a),u*Math.sin(a)],p1:[u*Math.cos(o),u*Math.sin(o)]}}function i(t,n){return t.a0==n.a0&&t.a1==n.a1}function u(t,n,e){return"A"+t+","+t+" 0 "+ +(e>Ru)+",1 "+n}function a(t,n,e,r){return"Q 0,0 "+r}var o=n,l=t,s=Fe,f=le,h=se;return e.radius=function(t){return arguments.length?(s=c(t),e):s},e.source=function(t){return arguments.length?(o=c(t),e):o},e.target=function(t){return arguments.length?(l=c(t),e):l},e.startAngle=function(t){return arguments.length?(f=c(t),e):f},e.endAngle=function(t){return arguments.length?(h=c(t),e):h},e},d3.svg.diagonal=function(){function e(t,n){var e=r.call(this,t,n),a=i.call(this,t,n),o=(e.y+a.y)/2,c=[e,{x:e.x,y:o},{x:a.x,y:o},a];return c=c.map(u),"M"+c[0]+"C"+c[1]+" "+c[2]+" "+c[3]}var r=n,i=t,u=He;return e.source=function(t){return arguments.length?(r=c(t),e):r},e.target=function(t){return arguments.length?(i=c(t),e):i},e.projection=function(t){return arguments.length?(u=t,e):u},e},d3.svg.diagonal.radial=function(){var t=d3.svg.diagonal(),n=He,e=t.projection;return t.projection=function(t){return arguments.length?e(Re(n=t)):n},t},d3.svg.symbol=function(){function t(t,r){return(Va.get(n.call(this,t,r))||Oe)(e.call(this,t,r))}var n=je,e=Pe;return t.type=function(e){return arguments.length?(n=c(e),t):n},t.size=function(n){return arguments.length?(e=c(n),t):e},t};var Va=d3.map({circle:Oe,cross:function(t){var n=Math.sqrt(t/5)/2;return"M"+-3*n+","+-n+"H"+-n+"V"+-3*n+"H"+n+"V"+-n+"H"+3*n+"V"+n+"H"+n+"V"+3*n+"H"+-n+"V"+n+"H"+-3*n+"Z"},diamond:function(t){var n=Math.sqrt(t/(2*Za)),e=n*Za;return"M0,"+-n+"L"+e+",0"+" 0,"+n+" "+-e+",0"+"Z"},square:function(t){var n=Math.sqrt(t)/2;return"M"+-n+","+-n+"L"+n+","+-n+" "+n+","+n+" "+-n+","+n+"Z"},"triangle-down":function(t){var n=Math.sqrt(t/Xa),e=n*Xa/2;return"M0,"+e+"L"+n+","+-e+" "+-n+","+-e+"Z"},"triangle-up":function(t){var n=Math.sqrt(t/Xa),e=n*Xa/2;return"M0,"+-e+"L"+n+","+e+" "+-n+","+e+"Z"}});d3.svg.symbolTypes=Va.keys();var Xa=Math.sqrt(3),Za=Math.tan(30*ju);d3.svg.axis=function(){function t(t){t.each(function(){var t,f=d3.select(this),h=null==l?e.ticks?e.ticks.apply(e,c):e.domain():l,d=null==n?e.tickFormat?e.tickFormat.apply(e,c):String:n,g=Ie(e,h,s),p=f.selectAll(".minor").data(g,String),m=p.enter().insert("line","g").attr("class","tick minor").style("opacity",1e-6),v=d3.transition(p.exit()).style("opacity",1e-6).remove(),y=d3.transition(p).style("opacity",1),M=f.selectAll("g").data(h,String),b=M.enter().insert("g","path").style("opacity",1e-6),x=d3.transition(M.exit()).style("opacity",1e-6).remove(),_=d3.transition(M).style("opacity",1),w=On(e),S=f.selectAll(".domain").data([0]),k=d3.transition(S),E=e.copy(),A=this.__chart__||E;this.__chart__=E,S.enter().append("path").attr("class","domain"),b.append("line").attr("class","tick"),b.append("text");var N=b.select("line"),T=_.select("line"),q=M.select("text").text(d),C=b.select("text"),z=_.select("text");switch(r){case"bottom":t=Ye,m.attr("y2",u),y.attr("x2",0).attr("y2",u),N.attr("y2",i),C.attr("y",Math.max(i,0)+o),T.attr("x2",0).attr("y2",i),z.attr("x",0).attr("y",Math.max(i,0)+o),q.attr("dy",".71em").style("text-anchor","middle"),k.attr("d","M"+w[0]+","+a+"V0H"+w[1]+"V"+a);break;case"top":t=Ye,m.attr("y2",-u),y.attr("x2",0).attr("y2",-u),N.attr("y2",-i),C.attr("y",-(Math.max(i,0)+o)),T.attr("x2",0).attr("y2",-i),z.attr("x",0).attr("y",-(Math.max(i,0)+o)),q.attr("dy","0em").style("text-anchor","middle"),k.attr("d","M"+w[0]+","+-a+"V0H"+w[1]+"V"+-a);break;case"left":t=Ue,m.attr("x2",-u),y.attr("x2",-u).attr("y2",0),N.attr("x2",-i),C.attr("x",-(Math.max(i,0)+o)),T.attr("x2",-i).attr("y2",0),z.attr("x",-(Math.max(i,0)+o)).attr("y",0),q.attr("dy",".32em").style("text-anchor","end"),k.attr("d","M"+-a+","+w[0]+"H0V"+w[1]+"H"+-a);break;case"right":t=Ue,m.attr("x2",u),y.attr("x2",u).attr("y2",0),N.attr("x2",i),C.attr("x",Math.max(i,0)+o),T.attr("x2",i).attr("y2",0),z.attr("x",Math.max(i,0)+o).attr("y",0),q.attr("dy",".32em").style("text-anchor","start"),k.attr("d","M"+a+","+w[0]+"H0V"+w[1]+"H"+a)}if(e.ticks)b.call(t,A),_.call(t,E),x.call(t,E),m.call(t,A),y.call(t,E),v.call(t,E);else{var D=E.rangeBand()/2,L=function(t){return E(t)+D};b.call(t,L),_.call(t,L)}})}var n,e=d3.scale.linear(),r="bottom",i=6,u=6,a=6,o=3,c=[10],l=null,s=0;return t.scale=function(n){return arguments.length?(e=n,t):e},t.orient=function(n){return arguments.length?(r=n,t):r},t.ticks=function(){return arguments.length?(c=arguments,t):c},t.tickValues=function(n){return arguments.length?(l=n,t):l},t.tickFormat=function(e){return arguments.length?(n=e,t):n},t.tickSize=function(n,e){if(!arguments.length)return i;var r=arguments.length-1;return i=+n,u=r>1?+e:i,a=r>0?+arguments[r]:i,t},t.tickPadding=function(n){return arguments.length?(o=+n,t):o},t.tickSubdivide=function(n){return arguments.length?(s=+n,t):s},t},d3.svg.brush=function(){function t(u){u.each(function(){var u,a=d3.select(this),s=a.selectAll(".background").data([0]),f=a.selectAll(".extent").data([0]),h=a.selectAll(".resize").data(l,String);a.style("pointer-events","all").on("mousedown.brush",i).on("touchstart.brush",i),s.enter().append("rect").attr("class","background").style("visibility","hidden").style("cursor","crosshair"),f.enter().append("rect").attr("class","extent").style("cursor","move"),h.enter().append("g").attr("class",function(t){return"resize "+t}).style("cursor",function(t){return Ba[t]}).append("rect").attr("x",function(t){return/[ew]$/.test(t)?-3:null}).attr("y",function(t){return/^[ns]/.test(t)?-3:null}).attr("width",6).attr("height",6).style("visibility","hidden"),h.style("display",t.empty()?"none":null),h.exit().remove(),o&&(u=On(o),s.attr("x",u[0]).attr("width",u[1]-u[0]),e(a)),c&&(u=On(c),s.attr("y",u[0]).attr("height",u[1]-u[0]),r(a)),n(a)})}function n(t){t.selectAll(".resize").attr("transform",function(t){return"translate("+s[+/e$/.test(t)][0]+","+s[+/^s/.test(t)][1]+")"})}function e(t){t.select(".extent").attr("x",s[0][0]),t.selectAll(".extent,.n>rect,.s>rect").attr("width",s[1][0]-s[0][0])}function r(t){t.select(".extent").attr("y",s[0][1]),t.selectAll(".extent,.e>rect,.w>rect").attr("height",s[1][1]-s[0][1])}function i(){function i(){var t=d3.event.changedTouches;return t?d3.touches(v,t)[0]:d3.mouse(v)}function l(){32==d3.event.keyCode&&(S||(p=null,k[0]-=s[1][0],k[1]-=s[1][1],S=2),R())}function f(){32==d3.event.keyCode&&2==S&&(k[0]+=s[1][0],k[1]+=s[1][1],S=0,R())}function h(){var t=i(),u=!1;m&&(t[0]+=m[0],t[1]+=m[1]),S||(d3.event.altKey?(p||(p=[(s[0][0]+s[1][0])/2,(s[0][1]+s[1][1])/2]),k[0]=s[+(t[0]<p[0])][0],k[1]=s[+(t[1]<p[1])][1]):p=null),_&&d(t,o,0)&&(e(b),u=!0),w&&d(t,c,1)&&(r(b),u=!0),u&&(n(b),M({type:"brush",mode:S?"move":"resize"}))}function d(t,n,e){var r,i,a=On(n),o=a[0],c=a[1],l=k[e],f=s[1][e]-s[0][e];return S&&(o-=l,c-=f+l),r=Math.max(o,Math.min(c,t[e])),S?i=(r+=l)+f:(p&&(l=Math.max(o,Math.min(c,2*p[e]-r))),r>l?(i=r,r=l):i=l),s[0][e]!==r||s[1][e]!==i?(u=null,s[0][e]=r,s[1][e]=i,!0):void 0}function g(){h(),b.style("pointer-events","all").selectAll(".resize").style("display",t.empty()?"none":null),d3.select("body").style("cursor",null),E.on("mousemove.brush",null).on("mouseup.brush",null).on("touchmove.brush",null).on("touchend.brush",null).on("keydown.brush",null).on("keyup.brush",null),M({type:"brushend"}),R()}var p,m,v=this,y=d3.select(d3.event.target),M=a.of(v,arguments),b=d3.select(v),x=y.datum(),_=!/^(n|s)$/.test(x)&&o,w=!/^(e|w)$/.test(x)&&c,S=y.classed("extent"),k=i(),E=d3.select(window).on("mousemove.brush",h).on("mouseup.brush",g).on("touchmove.brush",h).on("touchend.brush",g).on("keydown.brush",l).on("keyup.brush",f);if(S)k[0]=s[0][0]-k[0],k[1]=s[0][1]-k[1];else if(x){var A=+/w$/.test(x),N=+/^n/.test(x);m=[s[1-A][0]-k[0],s[1-N][1]-k[1]],k[0]=s[A][0],k[1]=s[N][1]}else d3.event.altKey&&(p=k.slice());b.style("pointer-events","none").selectAll(".resize").style("display",null),d3.select("body").style("cursor",y.style("cursor")),M({type:"brushstart"}),h(),R()}var u,a=j(t,"brushstart","brush","brushend"),o=null,c=null,l=$a[0],s=[[0,0],[0,0]];return t.x=function(n){return arguments.length?(o=n,l=$a[!o<<1|!c],t):o},t.y=function(n){return arguments.length?(c=n,l=$a[!o<<1|!c],t):c},t.extent=function(n){var e,r,i,a,l;return arguments.length?(u=[[0,0],[0,0]],o&&(e=n[0],r=n[1],c&&(e=e[0],r=r[0]),u[0][0]=e,u[1][0]=r,o.invert&&(e=o(e),r=o(r)),e>r&&(l=e,e=r,r=l),s[0][0]=0|e,s[1][0]=0|r),c&&(i=n[0],a=n[1],o&&(i=i[1],a=a[1]),u[0][1]=i,u[1][1]=a,c.invert&&(i=c(i),a=c(a)),i>a&&(l=i,i=a,a=l),s[0][1]=0|i,s[1][1]=0|a),t):(n=u||s,o&&(e=n[0][0],r=n[1][0],u||(e=s[0][0],r=s[1][0],o.invert&&(e=o.invert(e),r=o.invert(r)),e>r&&(l=e,e=r,r=l))),c&&(i=n[0][1],a=n[1][1],u||(i=s[0][1],a=s[1][1],c.invert&&(i=c.invert(i),a=c.invert(a)),i>a&&(l=i,i=a,a=l))),o&&c?[[e,i],[r,a]]:o?[e,r]:c&&[i,a])},t.clear=function(){return u=null,s[0][0]=s[0][1]=s[1][0]=s[1][1]=0,t},t.empty=function(){return o&&s[0][0]===s[1][0]||c&&s[0][1]===s[1][1]},d3.rebind(t,a,"on")};var Ba={n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},$a=[["n","e","s","w","nw","ne","se","sw"],["e","w"],["n","s"],[]];d3.behavior={},d3.behavior.drag=function(){function t(){this.on("mousedown.drag",n).on("touchstart.drag",n)}function n(){function t(){var t=o.parentNode;return null!=s?d3.touches(t).filter(function(t){return t.identifier===s})[0]:d3.mouse(t)}function n(){if(!o.parentNode)return i();var n=t(),e=n[0]-f[0],r=n[1]-f[1];h|=e|r,f=n,R(),c({type:"drag",x:n[0]+a[0],y:n[1]+a[1],dx:e,dy:r})}function i(){c({type:"dragend"}),h&&(R(),d3.event.target===l&&d.on("click.drag",u,!0)),d.on(null!=s?"touchmove.drag-"+s:"mousemove.drag",null).on(null!=s?"touchend.drag-"+s:"mouseup.drag",null)}function u(){R(),d.on("click.drag",null)}var a,o=this,c=e.of(o,arguments),l=d3.event.target,s=d3.event.touches?d3.event.changedTouches[0].identifier:null,f=t(),h=0,d=d3.select(window).on(null!=s?"touchmove.drag-"+s:"mousemove.drag",n).on(null!=s?"touchend.drag-"+s:"mouseup.drag",i,!0);r?(a=r.apply(o,arguments),a=[a.x-f[0],a.y-f[1]]):a=[0,0],null==s&&R(),c({type:"dragstart"})}var e=j(t,"drag","dragstart","dragend"),r=null;return t.origin=function(n){return arguments.length?(r=n,t):r},d3.rebind(t,e,"on")},d3.behavior.zoom=function(){function t(){this.on("mousedown.zoom",o).on("mousewheel.zoom",c).on("mousemove.zoom",l).on("DOMMouseScroll.zoom",c).on("dblclick.zoom",s).on("touchstart.zoom",f).on("touchmove.zoom",h).on("touchend.zoom",f)}function n(t){return[(t[0]-b[0])/x,(t[1]-b[1])/x]}function e(t){return[t[0]*x+b[0],t[1]*x+b[1]]}function r(t){x=Math.max(_[0],Math.min(_[1],t))}function i(t,n){n=e(n),b[0]+=t[0]-n[0],b[1]+=t[1]-n[1]}function u(){m&&m.domain(p.range().map(function(t){return(t-b[0])/x}).map(p.invert)),y&&y.domain(v.range().map(function(t){return(t-b[1])/x}).map(v.invert))}function a(t){u(),d3.event.preventDefault(),t({type:"zoom",scale:x,translate:b})}function o(){function t(){l=1,i(d3.mouse(u),f),a(o)}function e(){l&&R(),s.on("mousemove.zoom",null).on("mouseup.zoom",null),l&&d3.event.target===c&&s.on("click.zoom",r,!0)}function r(){R(),s.on("click.zoom",null)}var u=this,o=w.of(u,arguments),c=d3.event.target,l=0,s=d3.select(window).on("mousemove.zoom",t).on("mouseup.zoom",e),f=n(d3.mouse(u));window.focus(),R()}function c(){d||(d=n(d3.mouse(this))),r(Math.pow(2,.002*Ve())*x),i(d3.mouse(this),d),a(w.of(this,arguments))}function l(){d=null}function s(){var t=d3.mouse(this),e=n(t),u=Math.log(x)/Math.LN2;r(Math.pow(2,d3.event.shiftKey?Math.ceil(u)-1:Math.floor(u)+1)),i(t,e),a(w.of(this,arguments))}function f(){var t=d3.touches(this),e=Date.now();if(g=x,d={},t.forEach(function(t){d[t.identifier]=n(t)}),R(),1===t.length){if(500>e-M){var u=t[0],o=n(t[0]);r(2*x),i(u,o),a(w.of(this,arguments))}M=e}}function h(){var t=d3.touches(this),n=t[0],e=d[n.identifier];if(u=t[1]){var u,o=d[u.identifier];n=[(n[0]+u[0])/2,(n[1]+u[1])/2],e=[(e[0]+o[0])/2,(e[1]+o[1])/2],r(d3.event.scale*g)}i(n,e),M=null,a(w.of(this,arguments))}var d,g,p,m,v,y,M,b=[0,0],x=1,_=Ga,w=j(t,"zoom");return t.translate=function(n){return arguments.length?(b=n.map(Number),u(),t):b},t.scale=function(n){return arguments.length?(x=+n,u(),t):x},t.scaleExtent=function(n){return arguments.length?(_=null==n?Ga:n.map(Number),t):_},t.x=function(n){return arguments.length?(m=n,p=n.copy(),b=[0,0],x=1,t):m},t.y=function(n){return arguments.length?(y=n,v=n.copy(),b=[0,0],x=1,t):y},d3.rebind(t,w,"on")};var Ja,Ga=[0,1/0];d3.layout={},d3.layout.bundle=function(){return function(t){for(var n=[],e=-1,r=t.length;r>++e;)n.push(Xe(t[e]));return n}},d3.layout.chord=function(){function t(){var t,l,f,h,d,g={},p=[],m=d3.range(u),v=[];for(e=[],r=[],t=0,h=-1;u>++h;){for(l=0,d=-1;u>++d;)l+=i[h][d];p.push(l),v.push(d3.range(u)),t+=l}for(a&&m.sort(function(t,n){return a(p[t],p[n])}),o&&v.forEach(function(t,n){t.sort(function(t,e){return o(i[n][t],i[n][e])
+})}),t=(2*Ru-s*u)/t,l=0,h=-1;u>++h;){for(f=l,d=-1;u>++d;){var y=m[h],M=v[y][d],b=i[y][M],x=l,_=l+=b*t;g[y+"-"+M]={index:y,subindex:M,startAngle:x,endAngle:_,value:b}}r[y]={index:y,startAngle:f,endAngle:l,value:(l-f)/t},l+=s}for(h=-1;u>++h;)for(d=h-1;u>++d;){var w=g[h+"-"+d],S=g[d+"-"+h];(w.value||S.value)&&e.push(w.value<S.value?{source:S,target:w}:{source:w,target:S})}c&&n()}function n(){e.sort(function(t,n){return c((t.source.value+t.target.value)/2,(n.source.value+n.target.value)/2)})}var e,r,i,u,a,o,c,l={},s=0;return l.matrix=function(t){return arguments.length?(u=(i=t)&&i.length,e=r=null,l):i},l.padding=function(t){return arguments.length?(s=t,e=r=null,l):s},l.sortGroups=function(t){return arguments.length?(a=t,e=r=null,l):a},l.sortSubgroups=function(t){return arguments.length?(o=t,e=null,l):o},l.sortChords=function(t){return arguments.length?(c=t,e&&n(),l):c},l.chords=function(){return e||t(),e},l.groups=function(){return r||t(),r},l},d3.layout.force=function(){function t(t){return function(n,e,r,i){if(n.point!==t){var u=n.cx-t.x,a=n.cy-t.y,o=1/Math.sqrt(u*u+a*a);if(v>(i-e)*o){var c=n.charge*o*o;return t.px-=u*c,t.py-=a*c,!0}if(n.point&&isFinite(o)){var c=n.pointCharge*o*o;t.px-=u*c,t.py-=a*c}}return!n.charge}}function n(t){t.px=d3.event.x,t.py=d3.event.y,l.resume()}var e,r,i,u,o,l={},s=d3.dispatch("start","tick","end"),f=[1,1],h=.9,d=Qe,g=tr,p=-30,m=.1,v=.8,y=[],M=[];return l.tick=function(){if(.005>(r*=.99))return s.end({type:"end",alpha:r=0}),!0;var n,e,a,c,l,d,g,v,b,x=y.length,_=M.length;for(e=0;_>e;++e)a=M[e],c=a.source,l=a.target,v=l.x-c.x,b=l.y-c.y,(d=v*v+b*b)&&(d=r*u[e]*((d=Math.sqrt(d))-i[e])/d,v*=d,b*=d,l.x-=v*(g=c.weight/(l.weight+c.weight)),l.y-=b*g,c.x+=v*(g=1-g),c.y+=b*g);if((g=r*m)&&(v=f[0]/2,b=f[1]/2,e=-1,g))for(;x>++e;)a=y[e],a.x+=(v-a.x)*g,a.y+=(b-a.y)*g;if(p)for(We(n=d3.geom.quadtree(y),r,o),e=-1;x>++e;)(a=y[e]).fixed||n.visit(t(a));for(e=-1;x>++e;)a=y[e],a.fixed?(a.x=a.px,a.y=a.py):(a.x-=(a.px-(a.px=a.x))*h,a.y-=(a.py-(a.py=a.y))*h);s.tick({type:"tick",alpha:r})},l.nodes=function(t){return arguments.length?(y=t,l):y},l.links=function(t){return arguments.length?(M=t,l):M},l.size=function(t){return arguments.length?(f=t,l):f},l.linkDistance=function(t){return arguments.length?(d=c(t),l):d},l.distance=l.linkDistance,l.linkStrength=function(t){return arguments.length?(g=c(t),l):g},l.friction=function(t){return arguments.length?(h=t,l):h},l.charge=function(t){return arguments.length?(p="function"==typeof t?t:+t,l):p},l.gravity=function(t){return arguments.length?(m=t,l):m},l.theta=function(t){return arguments.length?(v=t,l):v},l.alpha=function(t){return arguments.length?(r?r=t>0?t:0:t>0&&(s.start({type:"start",alpha:r=t}),d3.timer(l.tick)),l):r},l.start=function(){function t(t,r){for(var i,u=n(e),a=-1,o=u.length;o>++a;)if(!isNaN(i=u[a][t]))return i;return Math.random()*r}function n(){if(!a){for(a=[],r=0;s>r;++r)a[r]=[];for(r=0;h>r;++r){var t=M[r];a[t.source.index].push(t.target),a[t.target.index].push(t.source)}}return a[e]}var e,r,a,c,s=y.length,h=M.length,m=f[0],v=f[1];for(e=0;s>e;++e)(c=y[e]).index=e,c.weight=0;for(i=[],u=[],e=0;h>e;++e)c=M[e],"number"==typeof c.source&&(c.source=y[c.source]),"number"==typeof c.target&&(c.target=y[c.target]),i[e]=d.call(this,c,e),u[e]=g.call(this,c,e),++c.source.weight,++c.target.weight;for(e=0;s>e;++e)c=y[e],isNaN(c.x)&&(c.x=t("x",m)),isNaN(c.y)&&(c.y=t("y",v)),isNaN(c.px)&&(c.px=c.x),isNaN(c.py)&&(c.py=c.y);if(o=[],"function"==typeof p)for(e=0;s>e;++e)o[e]=+p.call(this,y[e],e);else for(e=0;s>e;++e)o[e]=p;return l.resume()},l.resume=function(){return l.alpha(.1)},l.stop=function(){return l.alpha(0)},l.drag=function(){e||(e=d3.behavior.drag().origin(a).on("dragstart",$e).on("drag",n).on("dragend",Je)),this.on("mouseover.force",Ge).on("mouseout.force",Ke).call(e)},d3.rebind(l,s,"on")},d3.layout.partition=function(){function t(n,e,r,i){var u=n.children;if(n.x=e,n.y=n.depth*i,n.dx=r,n.dy=i,u&&(a=u.length)){var a,o,c,l=-1;for(r=n.value?r/n.value:0;a>++l;)t(o=u[l],e,c=o.value*r,i),e+=c}}function n(t){var e=t.children,r=0;if(e&&(i=e.length))for(var i,u=-1;i>++u;)r=Math.max(r,n(e[u]));return 1+r}function e(e,u){var a=r.call(this,e,u);return t(a[0],0,i[0],i[1]/n(a[0])),a}var r=d3.layout.hierarchy(),i=[1,1];return e.size=function(t){return arguments.length?(i=t,e):i},hr(e,r)},d3.layout.pie=function(){function t(u){var a=u.map(function(e,r){return+n.call(t,e,r)}),o=+("function"==typeof r?r.apply(this,arguments):r),c=(("function"==typeof i?i.apply(this,arguments):i)-r)/d3.sum(a),l=d3.range(u.length);null!=e&&l.sort(e===Ka?function(t,n){return a[n]-a[t]}:function(t,n){return e(u[t],u[n])});var s=[];return l.forEach(function(t){var n;s[t]={data:u[t],value:n=a[t],startAngle:o,endAngle:o+=n*c}}),s}var n=Number,e=Ka,r=0,i=2*Ru;return t.value=function(e){return arguments.length?(n=e,t):n},t.sort=function(n){return arguments.length?(e=n,t):e},t.startAngle=function(n){return arguments.length?(r=n,t):r},t.endAngle=function(n){return arguments.length?(i=n,t):i},t};var Ka={};d3.layout.stack=function(){function t(a,c){var l=a.map(function(e,r){return n.call(t,e,r)}),s=l.map(function(n){return n.map(function(n,e){return[u.call(t,n,e),o.call(t,n,e)]})}),f=e.call(t,s,c);l=d3.permute(l,f),s=d3.permute(s,f);var h,d,g,p=r.call(t,s,c),m=l.length,v=l[0].length;for(d=0;v>d;++d)for(i.call(t,l[0][d],g=p[d],s[0][d][1]),h=1;m>h;++h)i.call(t,l[h][d],g+=s[h-1][d][1],s[h][d][1]);return a}var n=a,e=ir,r=ur,i=rr,u=nr,o=er;return t.values=function(e){return arguments.length?(n=e,t):n},t.order=function(n){return arguments.length?(e="function"==typeof n?n:Wa.get(n)||ir,t):e},t.offset=function(n){return arguments.length?(r="function"==typeof n?n:Qa.get(n)||ur,t):r},t.x=function(n){return arguments.length?(u=n,t):u},t.y=function(n){return arguments.length?(o=n,t):o},t.out=function(n){return arguments.length?(i=n,t):i},t};var Wa=d3.map({"inside-out":function(t){var n,e,r=t.length,i=t.map(ar),u=t.map(or),a=d3.range(r).sort(function(t,n){return i[t]-i[n]}),o=0,c=0,l=[],s=[];for(n=0;r>n;++n)e=a[n],c>o?(o+=u[e],l.push(e)):(c+=u[e],s.push(e));return s.reverse().concat(l)},reverse:function(t){return d3.range(t.length).reverse()},"default":ir}),Qa=d3.map({silhouette:function(t){var n,e,r,i=t.length,u=t[0].length,a=[],o=0,c=[];for(e=0;u>e;++e){for(n=0,r=0;i>n;n++)r+=t[n][e][1];r>o&&(o=r),a.push(r)}for(e=0;u>e;++e)c[e]=(o-a[e])/2;return c},wiggle:function(t){var n,e,r,i,u,a,o,c,l,s=t.length,f=t[0],h=f.length,d=[];for(d[0]=c=l=0,e=1;h>e;++e){for(n=0,i=0;s>n;++n)i+=t[n][e][1];for(n=0,u=0,o=f[e][0]-f[e-1][0];s>n;++n){for(r=0,a=(t[n][e][1]-t[n][e-1][1])/(2*o);n>r;++r)a+=(t[r][e][1]-t[r][e-1][1])/o;u+=a*t[n][e][1]}d[e]=c-=i?u/i*o:0,l>c&&(l=c)}for(e=0;h>e;++e)d[e]-=l;return d},expand:function(t){var n,e,r,i=t.length,u=t[0].length,a=1/i,o=[];for(e=0;u>e;++e){for(n=0,r=0;i>n;n++)r+=t[n][e][1];if(r)for(n=0;i>n;n++)t[n][e][1]/=r;else for(n=0;i>n;n++)t[n][e][1]=a}for(e=0;u>e;++e)o[e]=0;return o},zero:ur});d3.layout.histogram=function(){function t(t,u){for(var a,o,c=[],l=t.map(e,this),s=r.call(this,l,u),f=i.call(this,s,l,u),u=-1,h=l.length,d=f.length-1,g=n?1:1/h;d>++u;)a=c[u]=[],a.dx=f[u+1]-(a.x=f[u]),a.y=0;if(d>0)for(u=-1;h>++u;)o=l[u],o>=s[0]&&s[1]>=o&&(a=c[d3.bisect(f,o,1,d)-1],a.y+=g,a.push(t[u]));return c}var n=!0,e=Number,r=fr,i=lr;return t.value=function(n){return arguments.length?(e=n,t):e},t.range=function(n){return arguments.length?(r=c(n),t):r},t.bins=function(n){return arguments.length?(i="number"==typeof n?function(t){return sr(t,n)}:c(n),t):i},t.frequency=function(e){return arguments.length?(n=!!e,t):n},t},d3.layout.hierarchy=function(){function t(n,a,o){var c=i.call(e,n,a);if(n.depth=a,o.push(n),c&&(l=c.length)){for(var l,s,f=-1,h=n.children=[],d=0,g=a+1;l>++f;)s=t(c[f],g,o),s.parent=n,h.push(s),d+=s.value;r&&h.sort(r),u&&(n.value=d)}else u&&(n.value=+u.call(e,n,a)||0);return n}function n(t,r){var i=t.children,a=0;if(i&&(o=i.length))for(var o,c=-1,l=r+1;o>++c;)a+=n(i[c],l);else u&&(a=+u.call(e,t,r)||0);return u&&(t.value=a),a}function e(n){var e=[];return t(n,0,e),e}var r=pr,i=dr,u=gr;return e.sort=function(t){return arguments.length?(r=t,e):r},e.children=function(t){return arguments.length?(i=t,e):i},e.value=function(t){return arguments.length?(u=t,e):u},e.revalue=function(t){return n(t,0),t},e},d3.layout.pack=function(){function t(t,i){var u=n.call(this,t,i),a=u[0];a.x=0,a.y=0,Rr(a,function(t){t.r=Math.sqrt(t.value)}),Rr(a,xr);var o=r[0],c=r[1],l=Math.max(2*a.r/o,2*a.r/c);if(e>0){var s=e*l/2;Rr(a,function(t){t.r+=s}),Rr(a,xr),Rr(a,function(t){t.r-=s}),l=Math.max(2*a.r/o,2*a.r/c)}return Sr(a,o/2,c/2,1/l),u}var n=d3.layout.hierarchy().sort(vr),e=0,r=[1,1];return t.size=function(n){return arguments.length?(r=n,t):r},t.padding=function(n){return arguments.length?(e=+n,t):e},hr(t,n)},d3.layout.cluster=function(){function t(t,i){var u,a=n.call(this,t,i),o=a[0],c=0;Rr(o,function(t){var n=t.children;n&&n.length?(t.x=Ar(n),t.y=Er(n)):(t.x=u?c+=e(t,u):0,t.y=0,u=t)});var l=Nr(o),s=Tr(o),f=l.x-e(l,s)/2,h=s.x+e(s,l)/2;return Rr(o,function(t){t.x=(t.x-f)/(h-f)*r[0],t.y=(1-(o.y?t.y/o.y:1))*r[1]}),a}var n=d3.layout.hierarchy().sort(null).value(null),e=qr,r=[1,1];return t.separation=function(n){return arguments.length?(e=n,t):e},t.size=function(n){return arguments.length?(r=n,t):r},hr(t,n)},d3.layout.tree=function(){function t(t,i){function u(t,n){var r=t.children,i=t._tree;if(r&&(a=r.length)){for(var a,c,l,s=r[0],f=s,h=-1;a>++h;)l=r[h],u(l,c),f=o(l,c,f),c=l;Pr(t);var d=.5*(s._tree.prelim+l._tree.prelim);n?(i.prelim=n._tree.prelim+e(t,n),i.mod=i.prelim-d):i.prelim=d}else n&&(i.prelim=n._tree.prelim+e(t,n))}function a(t,n){t.x=t._tree.prelim+n;var e=t.children;if(e&&(r=e.length)){var r,i=-1;for(n+=t._tree.mod;r>++i;)a(e[i],n)}}function o(t,n,r){if(n){for(var i,u=t,a=t,o=n,c=t.parent.children[0],l=u._tree.mod,s=a._tree.mod,f=o._tree.mod,h=c._tree.mod;o=zr(o),u=Cr(u),o&&u;)c=Cr(c),a=zr(a),a._tree.ancestor=t,i=o._tree.prelim+f-u._tree.prelim-l+e(o,u),i>0&&(jr(Or(o,t,r),t,i),l+=i,s+=i),f+=o._tree.mod,l+=u._tree.mod,h+=c._tree.mod,s+=a._tree.mod;o&&!zr(a)&&(a._tree.thread=o,a._tree.mod+=f-s),u&&!Cr(c)&&(c._tree.thread=u,c._tree.mod+=l-h,r=t)}return r}var c=n.call(this,t,i),l=c[0];Rr(l,function(t,n){t._tree={ancestor:t,prelim:0,mod:0,change:0,shift:0,number:n?n._tree.number+1:0}}),u(l),a(l,-l._tree.prelim);var s=Dr(l,Fr),f=Dr(l,Lr),h=Dr(l,Hr),d=s.x-e(s,f)/2,g=f.x+e(f,s)/2,p=h.depth||1;return Rr(l,function(t){t.x=(t.x-d)/(g-d)*r[0],t.y=t.depth/p*r[1],delete t._tree}),c}var n=d3.layout.hierarchy().sort(null).value(null),e=qr,r=[1,1];return t.separation=function(n){return arguments.length?(e=n,t):e},t.size=function(n){return arguments.length?(r=n,t):r},hr(t,n)},d3.layout.treemap=function(){function t(t,n){for(var e,r,i=-1,u=t.length;u>++i;)r=(e=t[i]).value*(0>n?0:n),e.area=isNaN(r)||0>=r?0:r}function n(e){var u=e.children;if(u&&u.length){var a,o,c,l=f(e),s=[],h=u.slice(),g=1/0,p="slice"===d?l.dx:"dice"===d?l.dy:"slice-dice"===d?1&e.depth?l.dy:l.dx:Math.min(l.dx,l.dy);for(t(h,l.dx*l.dy/e.value),s.area=0;(c=h.length)>0;)s.push(a=h[c-1]),s.area+=a.area,"squarify"!==d||g>=(o=r(s,p))?(h.pop(),g=o):(s.area-=s.pop().area,i(s,p,l,!1),p=Math.min(l.dx,l.dy),s.length=s.area=0,g=1/0);s.length&&(i(s,p,l,!0),s.length=s.area=0),u.forEach(n)}}function e(n){var r=n.children;if(r&&r.length){var u,a=f(n),o=r.slice(),c=[];for(t(o,a.dx*a.dy/n.value),c.area=0;u=o.pop();)c.push(u),c.area+=u.area,null!=u.z&&(i(c,u.z?a.dx:a.dy,a,!o.length),c.length=c.area=0);r.forEach(e)}}function r(t,n){for(var e,r=t.area,i=0,u=1/0,a=-1,o=t.length;o>++a;)(e=t[a].area)&&(u>e&&(u=e),e>i&&(i=e));return r*=r,n*=n,r?Math.max(n*i*g/r,r/(n*u*g)):1/0}function i(t,n,e,r){var i,u=-1,a=t.length,o=e.x,l=e.y,s=n?c(t.area/n):0;if(n==e.dx){for((r||s>e.dy)&&(s=e.dy);a>++u;)i=t[u],i.x=o,i.y=l,i.dy=s,o+=i.dx=Math.min(e.x+e.dx-o,s?c(i.area/s):0);i.z=!0,i.dx+=e.x+e.dx-o,e.y+=s,e.dy-=s}else{for((r||s>e.dx)&&(s=e.dx);a>++u;)i=t[u],i.x=o,i.y=l,i.dx=s,l+=i.dy=Math.min(e.y+e.dy-l,s?c(i.area/s):0);i.z=!1,i.dy+=e.y+e.dy-l,e.x+=s,e.dx-=s}}function u(r){var i=a||o(r),u=i[0];return u.x=0,u.y=0,u.dx=l[0],u.dy=l[1],a&&o.revalue(u),t([u],u.dx*u.dy/u.value),(a?e:n)(u),h&&(a=i),i}var a,o=d3.layout.hierarchy(),c=Math.round,l=[1,1],s=null,f=Yr,h=!1,d="squarify",g=.5*(1+Math.sqrt(5));return u.size=function(t){return arguments.length?(l=t,u):l},u.padding=function(t){function n(n){var e=t.call(u,n,n.depth);return null==e?Yr(n):Ur(n,"number"==typeof e?[e,e,e,e]:e)}function e(n){return Ur(n,t)}if(!arguments.length)return s;var r;return f=null==(s=t)?Yr:"function"==(r=typeof t)?n:"number"===r?(t=[t,t,t,t],e):e,u},u.round=function(t){return arguments.length?(c=t?Math.round:Number,u):c!=Number},u.sticky=function(t){return arguments.length?(h=t,a=null,u):h},u.ratio=function(t){return arguments.length?(g=t,u):g},u.mode=function(t){return arguments.length?(d=t+"",u):d},hr(u,o)},d3.csv=Ir(",","text/csv"),d3.tsv=Ir("        ","text/tab-separated-values"),d3.geo={},d3.geo.stream=function(t,n){to.hasOwnProperty(t.type)?to[t.type](t,n):Vr(t,n)};var to={Feature:function(t,n){Vr(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;i>++r;)Vr(e[r].geometry,n)}},no={Sphere:function(t,n){n.sphere()},Point:function(t,n){var e=t.coordinates;n.point(e[0],e[1])},MultiPoint:function(t,n){for(var e,r=t.coordinates,i=-1,u=r.length;u>++i;)e=r[i],n.point(e[0],e[1])},LineString:function(t,n){Xr(t.coordinates,n,0)},MultiLineString:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;i>++r;)Xr(e[r],n,0)},Polygon:function(t,n){Zr(t.coordinates,n)},MultiPolygon:function(t,n){for(var e=t.coordinates,r=-1,i=e.length;i>++r;)Zr(e[r],n)},GeometryCollection:function(t,n){for(var e=t.geometries,r=-1,i=e.length;i>++r;)Vr(e[r],n)}};d3.geo.albersUsa=function(){function t(t){return n(t)(t)}function n(t){var n=t[0],a=t[1];return a>50?r:-140>n?i:21>a?u:e}var e=d3.geo.albers(),r=d3.geo.albers().rotate([160,0]).center([0,60]).parallels([55,65]),i=d3.geo.albers().rotate([160,0]).center([0,20]).parallels([8,18]),u=d3.geo.albers().rotate([60,0]).center([0,10]).parallels([8,18]);return t.scale=function(n){return arguments.length?(e.scale(n),r.scale(.6*n),i.scale(n),u.scale(1.5*n),t.translate(e.translate())):e.scale()},t.translate=function(n){if(!arguments.length)return e.translate();var a=e.scale(),o=n[0],c=n[1];return e.translate(n),r.translate([o-.4*a,c+.17*a]),i.translate([o-.19*a,c+.2*a]),u.translate([o+.58*a,c+.43*a]),t},t.scale(e.scale())},(d3.geo.albers=function(){var t=29.5*ju,n=45.5*ju,e=Pi(ei),r=e(t,n);return r.parallels=function(r){return arguments.length?e(t=r[0]*ju,n=r[1]*ju):[t*Ou,n*Ou]},r.rotate([98,0]).center([0,38]).scale(1e3)}).raw=ei;var eo=Vi(function(t){return Math.sqrt(2/(1+t))},function(t){return 2*Math.asin(t/2)});(d3.geo.azimuthalEqualArea=function(){return Ri(eo)}).raw=eo;var ro=Vi(function(t){var n=Math.acos(t);return n&&n/Math.sin(n)},a);(d3.geo.azimuthalEquidistant=function(){return Ri(ro)}).raw=ro,d3.geo.bounds=ri(a),d3.geo.centroid=function(t){io=uo=ao=oo=co=0,d3.geo.stream(t,lo);var n;return uo&&Math.abs(n=Math.sqrt(ao*ao+oo*oo+co*co))>Pu?[Math.atan2(oo,ao)*Ou,Math.asin(Math.max(-1,Math.min(1,co/n)))*Ou]:void 0};var io,uo,ao,oo,co,lo={sphere:function(){2>io&&(io=2,uo=ao=oo=co=0)},point:ii,lineStart:ai,lineEnd:oi,polygonStart:function(){2>io&&(io=2,uo=ao=oo=co=0),lo.lineStart=ui},polygonEnd:function(){lo.lineStart=ai}};d3.geo.circle=function(){function t(){var t="function"==typeof r?r.apply(this,arguments):r,n=Oi(-t[0]*ju,-t[1]*ju,0).invert,i=[];return e(null,null,1,{point:function(t,e){i.push(t=n(t,e)),t[0]*=Ou,t[1]*=Ou}}),{type:"Polygon",coordinates:[i]}}var n,e,r=[0,0],i=6;return t.origin=function(n){return arguments.length?(r=n,t):r},t.angle=function(r){return arguments.length?(e=ci((n=+r)*ju,i*ju),t):n},t.precision=function(r){return arguments.length?(e=ci(n*ju,(i=+r)*ju),t):i},t.angle(90)};var so=si(o,vi,Mi);(d3.geo.equirectangular=function(){return Ri(_i).scale(250/Ru)}).raw=_i.invert=_i;var fo=Vi(function(t){return 1/t},Math.atan);(d3.geo.gnomonic=function(){return Ri(fo)}).raw=fo,d3.geo.graticule=function(){function t(){return{type:"MultiLineString",coordinates:n()}}function n(){return d3.range(Math.ceil(r/c)*c,e,c).map(a).concat(d3.range(Math.ceil(u/l)*l,i,l).map(o))}var e,r,i,u,a,o,c=22.5,l=c,s=2.5;return t.lines=function(){return n().map(function(t){return{type:"LineString",coordinates:t}})},t.outline=function(){return{type:"Polygon",coordinates:[a(r).concat(o(i).slice(1),a(e).reverse().slice(1),o(u).reverse().slice(1))]}},t.extent=function(n){return arguments.length?(r=+n[0][0],e=+n[1][0],u=+n[0][1],i=+n[1][1],r>e&&(n=r,r=e,e=n),u>i&&(n=u,u=i,i=n),t.precision(s)):[[r,u],[e,i]]},t.step=function(n){return arguments.length?(c=+n[0],l=+n[1],t):[c,l]},t.precision=function(n){return arguments.length?(s=+n,a=wi(u,i,s),o=Si(r,e,s),t):s},t.extent([[-180+Pu,-90+Pu],[180-Pu,90-Pu]])},d3.geo.interpolate=function(t,n){return ki(t[0]*ju,t[1]*ju,n[0]*ju,n[1]*ju)},d3.geo.greatArc=function(){function e(){for(var t=r||a.apply(this,arguments),n=i||o.apply(this,arguments),e=u||d3.geo.interpolate(t,n),l=0,s=c/e.distance,f=[t];1>(l+=s);)f.push(e(l));return f.push(n),{type:"LineString",coordinates:f}}var r,i,u,a=n,o=t,c=6*ju;return e.distance=function(){return(u||d3.geo.interpolate(r||a.apply(this,arguments),i||o.apply(this,arguments))).distance},e.source=function(t){return arguments.length?(a=t,r="function"==typeof t?null:t,u=r&&i?d3.geo.interpolate(r,i):null,e):a},e.target=function(t){return arguments.length?(o=t,i="function"==typeof t?null:t,u=r&&i?d3.geo.interpolate(r,i):null,e):o},e.precision=function(t){return arguments.length?(c=t*ju,e):c/ju},e},Ei.invert=function(t,n){return[2*Ru*t,2*Math.atan(Math.exp(2*Ru*n))-Ru/2]},(d3.geo.mercator=function(){return Ri(Ei).scale(500)}).raw=Ei;var ho=Vi(function(){return 1},Math.asin);(d3.geo.orthographic=function(){return Ri(ho)}).raw=ho,d3.geo.path=function(){function t(t){return t&&d3.geo.stream(t,r(i.pointRadius("function"==typeof u?+u.apply(this,arguments):u))),i.result()}var n,e,r,i,u=4.5;return t.area=function(t){return go=0,d3.geo.stream(t,r(mo)),go},t.centroid=function(t){return io=ao=oo=co=0,d3.geo.stream(t,r(vo)),co?[ao/co,oo/co]:void 0},t.bounds=function(t){return ri(r)(t)},t.projection=function(e){return arguments.length?(r=(n=e)?e.stream||Ni(e):a,t):n},t.context=function(n){return arguments.length?(i=null==(e=n)?new Ti:new qi(n),t):e},t.pointRadius=function(n){return arguments.length?(u="function"==typeof n?n:+n,t):u},t.projection(d3.geo.albersUsa()).context(null)};var go,po,mo={point:Pn,lineStart:Pn,lineEnd:Pn,polygonStart:function(){po=0,mo.lineStart=Ci},polygonEnd:function(){mo.lineStart=mo.lineEnd=mo.point=Pn,go+=Math.abs(po/2)}},vo={point:zi,lineStart:Di,lineEnd:Li,polygonStart:function(){vo.lineStart=Fi},polygonEnd:function(){vo.point=zi,vo.lineStart=Di,vo.lineEnd=Li}};d3.geo.area=function(t){return yo=0,d3.geo.stream(t,bo),yo};var yo,Mo,bo={sphere:function(){yo+=4*Ru},point:Pn,lineStart:Pn,lineEnd:Pn,polygonStart:function(){Mo=0,bo.lineStart=Hi},polygonEnd:function(){yo+=0>Mo?4*Ru+Mo:Mo,bo.lineStart=bo.lineEnd=bo.point=Pn}};d3.geo.projection=Ri,d3.geo.projectionMutator=Pi;var xo=Vi(function(t){return 1/(1+t)},function(t){return 2*Math.atan(t)});(d3.geo.stereographic=function(){return Ri(xo)}).raw=xo,d3.geom={},d3.geom.hull=function(t){if(3>t.length)return[];var n,e,r,i,u,a,o,c,l,s,f=t.length,h=f-1,d=[],g=[],p=0;for(n=1;f>n;++n)t[n][1]<t[p][1]?p=n:t[n][1]==t[p][1]&&(p=t[n][0]<t[p][0]?n:p);for(n=0;f>n;++n)n!==p&&(i=t[n][1]-t[p][1],r=t[n][0]-t[p][0],d.push({angle:Math.atan2(i,r),index:n}));for(d.sort(function(t,n){return t.angle-n.angle}),l=d[0].angle,c=d[0].index,o=0,n=1;h>n;++n)e=d[n].index,l==d[n].angle?(r=t[c][0]-t[p][0],i=t[c][1]-t[p][1],u=t[e][0]-t[p][0],a=t[e][1]-t[p][1],r*r+i*i>=u*u+a*a?d[n].index=-1:(d[o].index=-1,l=d[n].angle,o=n,c=e)):(l=d[n].angle,o=n,c=e);for(g.push(p),n=0,e=0;2>n;++e)-1!==d[e].index&&(g.push(d[e].index),n++);for(s=g.length;h>e;++e)if(-1!==d[e].index){for(;!Xi(g[s-2],g[s-1],d[e].index,t);)--s;g[s++]=d[e].index}var m=[];for(n=0;s>n;++n)m.push(t[g[n]]);return m},d3.geom.polygon=function(t){return t.area=function(){for(var n=0,e=t.length,r=t[e-1][1]*t[0][0]-t[e-1][0]*t[0][1];e>++n;)r+=t[n-1][1]*t[n][0]-t[n-1][0]*t[n][1];return.5*r},t.centroid=function(n){var e,r,i=-1,u=t.length,a=0,o=0,c=t[u-1];for(arguments.length||(n=-1/(6*t.area()));u>++i;)e=c,c=t[i],r=e[0]*c[1]-c[0]*e[1],a+=(e[0]+c[0])*r,o+=(e[1]+c[1])*r;return[a*n,o*n]},t.clip=function(n){for(var e,r,i,u,a,o,c=-1,l=t.length,s=t[l-1];l>++c;){for(e=n.slice(),n.length=0,u=t[c],a=e[(i=e.length)-1],r=-1;i>++r;)o=e[r],Zi(o,s,u)?(Zi(a,s,u)||n.push(Bi(a,o,s,u)),n.push(o)):Zi(a,s,u)&&n.push(Bi(a,o,s,u)),a=o;s=u}return n},t},d3.geom.voronoi=function(t){var n=t.map(function(){return[]}),e=1e6;return $i(t,function(t){var r,i,u,a,o,c;1===t.a&&t.b>=0?(r=t.ep.r,i=t.ep.l):(r=t.ep.l,i=t.ep.r),1===t.a?(o=r?r.y:-e,u=t.c-t.b*o,c=i?i.y:e,a=t.c-t.b*c):(u=r?r.x:-e,o=t.c-t.a*u,a=i?i.x:e,c=t.c-t.a*a);var l=[u,o],s=[a,c];n[t.region.l.index].push(l,s),n[t.region.r.index].push(l,s)}),n=n.map(function(n,e){var r=t[e][0],i=t[e][1],u=n.map(function(t){return Math.atan2(t[0]-r,t[1]-i)});return d3.range(n.length).sort(function(t,n){return u[t]-u[n]}).filter(function(t,n,e){return!n||u[t]-u[e[n-1]]>Pu}).map(function(t){return n[t]})}),n.forEach(function(n,r){var i=n.length;if(!i)return n.push([-e,-e],[-e,e],[e,e],[e,-e]);if(!(i>2)){var u=t[r],a=n[0],o=n[1],c=u[0],l=u[1],s=a[0],f=a[1],h=o[0],d=o[1],g=Math.abs(h-s),p=d-f;if(Pu>Math.abs(p)){var m=f>l?-e:e;n.push([-e,m],[e,m])}else if(Pu>g){var v=s>c?-e:e;n.push([v,-e],[v,e])}else{var m=(s-c)*(d-f)>(h-s)*(f-l)?e:-e,y=Math.abs(p)-g;Pu>Math.abs(y)?n.push([0>p?m:-m,m]):(y>0&&(m*=-1),n.push([-e,m],[e,m]))}}}),n};var _o={l:"r",r:"l"};d3.geom.delaunay=function(t){var n=t.map(function(){return[]}),e=[];return $i(t,function(e){n[e.region.l.index].push(t[e.region.r.index])}),n.forEach(function(n,r){var i=t[r],u=i[0],a=i[1];n.forEach(function(t){t.angle=Math.atan2(t[0]-u,t[1]-a)}),n.sort(function(t,n){return t.angle-n.angle});for(var o=0,c=n.length-1;c>o;o++)e.push([i,n[o],n[o+1]])}),e},d3.geom.quadtree=function(t,n,e,r,i){function u(t,n,e,r,i,u){if(!isNaN(n.x)&&!isNaN(n.y))if(t.leaf){var o=t.point;o?.01>Math.abs(o.x-n.x)+Math.abs(o.y-n.y)?a(t,n,e,r,i,u):(t.point=null,a(t,o,e,r,i,u),a(t,n,e,r,i,u)):t.point=n}else a(t,n,e,r,i,u)}function a(t,n,e,r,i,a){var o=.5*(e+i),c=.5*(r+a),l=n.x>=o,s=n.y>=c,f=(s<<1)+l;t.leaf=!1,t=t.nodes[f]||(t.nodes[f]=Ji()),l?e=o:i=o,s?r=c:a=c,u(t,n,e,r,i,a)}var o,c=-1,l=t.length;if(5>arguments.length)if(3===arguments.length)i=e,r=n,e=n=0;else for(n=e=1/0,r=i=-1/0;l>++c;)o=t[c],n>o.x&&(n=o.x),e>o.y&&(e=o.y),o.x>r&&(r=o.x),o.y>i&&(i=o.y);var s=r-n,f=i-e;s>f?i=e+s:r=n+f;var h=Ji();return h.add=function(t){u(h,t,n,e,r,i)},h.visit=function(t){Gi(t,h,n,e,r,i)},t.forEach(h.add),h},d3.time={};var wo=Date,So=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"];Ki.prototype={getDate:function(){return this._.getUTCDate()},getDay:function(){return this._.getUTCDay()},getFullYear:function(){return this._.getUTCFullYear()},getHours:function(){return this._.getUTCHours()},getMilliseconds:function(){return this._.getUTCMilliseconds()},getMinutes:function(){return this._.getUTCMinutes()},getMonth:function(){return this._.getUTCMonth()},getSeconds:function(){return this._.getUTCSeconds()},getTime:function(){return this._.getTime()},getTimezoneOffset:function(){return 0},valueOf:function(){return this._.valueOf()},setDate:function(){ko.setUTCDate.apply(this._,arguments)},setDay:function(){ko.setUTCDay.apply(this._,arguments)},setFullYear:function(){ko.setUTCFullYear.apply(this._,arguments)},setHours:function(){ko.setUTCHours.apply(this._,arguments)},setMilliseconds:function(){ko.setUTCMilliseconds.apply(this._,arguments)},setMinutes:function(){ko.setUTCMinutes.apply(this._,arguments)},setMonth:function(){ko.setUTCMonth.apply(this._,arguments)},setSeconds:function(){ko.setUTCSeconds.apply(this._,arguments)},setTime:function(){ko.setTime.apply(this._,arguments)}};var ko=Date.prototype,Eo="%a %b %e %X %Y",Ao="%m/%d/%Y",No="%H:%M:%S",To=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],qo=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],Co=["January","February","March","April","May","June","July","August","September","October","November","December"],zo=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];d3.time.format=function(t){function n(n){for(var r,i,u,a=[],o=-1,c=0;e>++o;)37===t.charCodeAt(o)&&(a.push(t.substring(c,o)),null!=(i=jo[r=t.charAt(++o)])&&(r=t.charAt(++o)),(u=Oo[r])&&(r=u(n,null==i?"e"===r?" ":"0":i)),a.push(r),c=o+1);return a.push(t.substring(c,o)),a.join("")}var e=t.length;return n.parse=function(n){var e={y:1900,m:0,d:1,H:0,M:0,S:0,L:0},r=Wi(e,t,n,0);if(r!=n.length)return null;"p"in e&&(e.H=e.H%12+12*e.p);var i=new wo;return i.setFullYear(e.y,e.m,e.d),i.setHours(e.H,e.M,e.S,e.L),i},n.toString=function(){return t},n};var Do=Qi(To),Lo=Qi(qo),Fo=Qi(Co),Ho=tu(Co),Ro=Qi(zo),Po=tu(zo),jo={"-":"",_:" ",0:"0"},Oo={a:function(t){return qo[t.getDay()]},A:function(t){return To[t.getDay()]},b:function(t){return zo[t.getMonth()]},B:function(t){return Co[t.getMonth()]},c:d3.time.format(Eo),d:function(t,n){return nu(t.getDate(),n,2)},e:function(t,n){return nu(t.getDate(),n,2)},H:function(t,n){return nu(t.getHours(),n,2)},I:function(t,n){return nu(t.getHours()%12||12,n,2)},j:function(t,n){return nu(1+d3.time.dayOfYear(t),n,3)},L:function(t,n){return nu(t.getMilliseconds(),n,3)},m:function(t,n){return nu(t.getMonth()+1,n,2)},M:function(t,n){return nu(t.getMinutes(),n,2)},p:function(t){return t.getHours()>=12?"PM":"AM"},S:function(t,n){return nu(t.getSeconds(),n,2)},U:function(t,n){return nu(d3.time.sundayOfYear(t),n,2)},w:function(t){return t.getDay()},W:function(t,n){return nu(d3.time.mondayOfYear(t),n,2)},x:d3.time.format(Ao),X:d3.time.format(No),y:function(t,n){return nu(t.getFullYear()%100,n,2)},Y:function(t,n){return nu(t.getFullYear()%1e4,n,4)},Z:Mu,"%":function(){return"%"}},Yo={a:eu,A:ru,b:iu,B:uu,c:au,d:du,e:du,H:gu,I:gu,L:vu,m:hu,M:pu,p:yu,S:mu,x:ou,X:cu,y:su,Y:lu},Uo=/^\s*\d+/,Io=d3.map({am:0,pm:1});d3.time.format.utc=function(t){function n(t){try{wo=Ki;var n=new wo;return n._=t,e(n)}finally{wo=Date}}var e=d3.time.format(t);return n.parse=function(t){try{wo=Ki;var n=e.parse(t);return n&&n._}finally{wo=Date}},n.toString=e.toString,n};var Vo=d3.time.format.utc("%Y-%m-%dT%H:%M:%S.%LZ");d3.time.format.iso=Date.prototype.toISOString?bu:Vo,bu.parse=function(t){var n=new Date(t);return isNaN(n)?null:n},bu.toString=Vo.toString,d3.time.second=xu(function(t){return new wo(1e3*Math.floor(t/1e3))},function(t,n){t.setTime(t.getTime()+1e3*Math.floor(n))},function(t){return t.getSeconds()}),d3.time.seconds=d3.time.second.range,d3.time.seconds.utc=d3.time.second.utc.range,d3.time.minute=xu(function(t){return new wo(6e4*Math.floor(t/6e4))},function(t,n){t.setTime(t.getTime()+6e4*Math.floor(n))},function(t){return t.getMinutes()}),d3.time.minutes=d3.time.minute.range,d3.time.minutes.utc=d3.time.minute.utc.range,d3.time.hour=xu(function(t){var n=t.getTimezoneOffset()/60;return new wo(36e5*(Math.floor(t/36e5-n)+n))},function(t,n){t.setTime(t.getTime()+36e5*Math.floor(n))},function(t){return t.getHours()}),d3.time.hours=d3.time.hour.range,d3.time.hours.utc=d3.time.hour.utc.range,d3.time.day=xu(function(t){var n=new wo(1970,0);return n.setFullYear(t.getFullYear(),t.getMonth(),t.getDate()),n},function(t,n){t.setDate(t.getDate()+n)},function(t){return t.getDate()-1}),d3.time.days=d3.time.day.range,d3.time.days.utc=d3.time.day.utc.range,d3.time.dayOfYear=function(t){var n=d3.time.year(t);return Math.floor((t-n-6e4*(t.getTimezoneOffset()-n.getTimezoneOffset()))/864e5)},So.forEach(function(t,n){t=t.toLowerCase(),n=7-n;var e=d3.time[t]=xu(function(t){return(t=d3.time.day(t)).setDate(t.getDate()-(t.getDay()+n)%7),t},function(t,n){t.setDate(t.getDate()+7*Math.floor(n))},function(t){var e=d3.time.year(t).getDay();return Math.floor((d3.time.dayOfYear(t)+(e+n)%7)/7)-(e!==n)});d3.time[t+"s"]=e.range,d3.time[t+"s"].utc=e.utc.range,d3.time[t+"OfYear"]=function(t){var e=d3.time.year(t).getDay();return Math.floor((d3.time.dayOfYear(t)+(e+n)%7)/7)}}),d3.time.week=d3.time.sunday,d3.time.weeks=d3.time.sunday.range,d3.time.weeks.utc=d3.time.sunday.utc.range,d3.time.weekOfYear=d3.time.sundayOfYear,d3.time.month=xu(function(t){return t=d3.time.day(t),t.setDate(1),t},function(t,n){t.setMonth(t.getMonth()+n)},function(t){return t.getMonth()}),d3.time.months=d3.time.month.range,d3.time.months.utc=d3.time.month.utc.range,d3.time.year=xu(function(t){return t=d3.time.day(t),t.setMonth(0,1),t},function(t,n){t.setFullYear(t.getFullYear()+n)},function(t){return t.getFullYear()}),d3.time.years=d3.time.year.range,d3.time.years.utc=d3.time.year.utc.range;var Xo=[1e3,5e3,15e3,3e4,6e4,3e5,9e5,18e5,36e5,108e5,216e5,432e5,864e5,1728e5,6048e5,2592e6,7776e6,31536e6],Zo=[[d3.time.second,1],[d3.time.second,5],[d3.time.second,15],[d3.time.second,30],[d3.time.minute,1],[d3.time.minute,5],[d3.time.minute,15],[d3.time.minute,30],[d3.time.hour,1],[d3.time.hour,3],[d3.time.hour,6],[d3.time.hour,12],[d3.time.day,1],[d3.time.day,2],[d3.time.week,1],[d3.time.month,1],[d3.time.month,3],[d3.time.year,1]],Bo=[[d3.time.format("%Y"),o],[d3.time.format("%B"),function(t){return t.getMonth()}],[d3.time.format("%b %d"),function(t){return 1!=t.getDate()}],[d3.time.format("%a %d"),function(t){return t.getDay()&&1!=t.getDate()}],[d3.time.format("%I %p"),function(t){return t.getHours()}],[d3.time.format("%I:%M"),function(t){return t.getMinutes()}],[d3.time.format(":%S"),function(t){return t.getSeconds()}],[d3.time.format(".%L"),function(t){return t.getMilliseconds()}]],$o=d3.scale.linear(),Jo=Eu(Bo);Zo.year=function(t,n){return $o.domain(t.map(Nu)).ticks(n).map(Au)},d3.time.scale=function(){return wu(d3.scale.linear(),Zo,Jo)};var Go=Zo.map(function(t){return[t[0].utc,t[1]]}),Ko=[[d3.time.format.utc("%Y"),o],[d3.time.format.utc("%B"),function(t){return t.getUTCMonth()}],[d3.time.format.utc("%b %d"),function(t){return 1!=t.getUTCDate()}],[d3.time.format.utc("%a %d"),function(t){return t.getUTCDay()&&1!=t.getUTCDate()}],[d3.time.format.utc("%I %p"),function(t){return t.getUTCHours()}],[d3.time.format.utc("%I:%M"),function(t){return t.getUTCMinutes()}],[d3.time.format.utc(":%S"),function(t){return t.getUTCSeconds()}],[d3.time.format.utc(".%L"),function(t){return t.getUTCMilliseconds()}]],Wo=Eu(Ko);Go.year=function(t,n){return $o.domain(t.map(qu)).ticks(n).map(Tu)},d3.time.scale.utc=function(){return wu(d3.scale.linear(),Go,Wo)}})();
\ No newline at end of file
diff --git a/apps/workbench/public/favicon.ico b/apps/workbench/public/favicon.ico
new file mode 100644 (file)
index 0000000..4c763b6
Binary files /dev/null and b/apps/workbench/public/favicon.ico differ
diff --git a/apps/workbench/public/graph-example.html b/apps/workbench/public/graph-example.html
new file mode 100644 (file)
index 0000000..05a957a
--- /dev/null
@@ -0,0 +1,181 @@
+<!DOCTYPE html>
+<!-- from http://bl.ocks.org/1153292 -->
+<html>
+  <head>
+    <meta http-equiv="Content-type" content="text/html; charset=utf-8">
+    <title>Object graph example</title>
+    <script src="d3.v3.min.js"></script>
+    <style type="text/css">
+
+path.link {
+  fill: none;
+  stroke: #666;
+  stroke-width: 1.5px;
+}
+
+marker#can_read {
+  fill: green;
+}
+
+path.link.can_read {
+  stroke: green;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.can_write {
+  stroke: green;
+}
+
+path.link.member_of {
+  stroke: blue;
+  stroke-dasharray: 0,4 1;
+}
+
+path.link.created {
+  stroke: red;
+}
+
+circle {
+  fill: #ccc;
+  stroke: #333;
+  stroke-width: 1.5px;
+}
+
+edgetext {
+  font: 12px sans-serif;
+  pointer-events: none;
+    text-align: center;
+}
+
+text {
+  font: 12px sans-serif;
+  pointer-events: none;
+}
+
+text.shadow {
+  stroke: #fff;
+  stroke-width: 3px;
+  stroke-opacity: .8;
+}
+
+    </style>
+  </head>
+  <body>
+    <script type="text/javascript">
+
+var links = [
+  {source: "user: customer", target: "project: customer_project", type: "can_read"},
+  {source: "user: import robot", target: "project: customer_project", type: "can_read"},
+  {source: "user: pipeline robot", target: "project: customer_project", type: "can_read"},
+  {source: "user: uploader", target: "collection: w3anr2hk2wgfpuo", type: "created"},
+  {source: "user: uploader", target: "project: customer_project", type: "created"},
+  {source: "collection: w3anr2hk2wgfpuo", target: "project: customer_project", type: "member_of"}
+];
+
+var nodes = {};
+
+// Compute the distinct nodes from the links.
+links.forEach(function(link) {
+  link.source = nodes[link.source] || (nodes[link.source] = {name: link.source});
+  link.target = nodes[link.target] || (nodes[link.target] = {name: link.target});
+});
+
+var w = 960,
+    h = 500;
+
+var force = d3.layout.force()
+    .nodes(d3.values(nodes))
+    .links(links)
+    .size([w, h])
+    .linkDistance(250)
+    .charge(-300)
+    .on("tick", tick)
+    .start();
+
+var svg = d3.select("body").append("svg:svg")
+    .attr("width", w)
+    .attr("height", h);
+
+// Per-type markers, as they don't inherit styles.
+svg.append("svg:defs").selectAll("marker")
+    .data(["created", "member_of", "can_read", "can_write"])
+  .enter().append("svg:marker")
+    .attr("id", String)
+    .attr("viewBox", "0 -5 10 10")
+    .attr("refX", 15)
+    .attr("refY", -1.5)
+    .attr("markerWidth", 6)
+    .attr("markerHeight", 6)
+    .attr("orient", "auto")
+  .append("svg:path")
+    .attr("d", "M0,-5L10,0L0,5");
+
+var path = svg.append("svg:g").selectAll("path")
+    .data(force.links())
+  .enter().append("svg:path")
+    .attr("class", function(d) { return "link " + d.type; })
+    .attr("marker-end", function(d) { return "url(#" + d.type + ")"; });
+
+var circle = svg.append("svg:g").selectAll("circle")
+    .data(force.nodes())
+  .enter().append("svg:circle")
+    .attr("r", 6)
+    .call(force.drag);
+
+var text = svg.append("svg:g").selectAll("g")
+    .data(force.nodes())
+  .enter().append("svg:g");
+
+// A copy of the text with a thick white stroke for legibility.
+text.append("svg:text")
+    .attr("x", 8)
+    .attr("y", ".31em")
+    .attr("class", "shadow")
+    .text(function(d) { return d.name; });
+
+text.append("svg:text")
+    .attr("x", 8)
+    .attr("y", ".31em")
+    .text(function(d) { return d.name; });
+
+var edgetext = svg.append("svg:g").selectAll("g")
+    .data(force.links())
+    .enter().append("svg:g");
+
+edgetext
+    .append("svg:text")
+    .attr("x",0)
+    .attr("y","-0.2em")
+    .text(function(d) { return d.type; });
+
+// Use elliptical arc path segments to doubly-encode directionality.
+function tick() {
+  path.attr("d", function(d) {
+    var dx = d.target.x - d.source.x,
+        dy = d.target.y - d.source.y,
+        // dr = Math.sqrt(dx * dx + dy * dy);
+        dr = 0;
+    return "M" + d.source.x + "," + d.source.y + "A" + dr + "," + dr + " 0 0,1 " + d.target.x + "," + d.target.y;
+  });
+
+  circle.attr("transform", function(d) {
+    return "translate(" + d.x + "," + d.y + ")";
+  });
+
+  text.attr("transform", function(d) {
+    return "translate(" + d.x + "," + d.y + ")";
+  });
+
+  edgetext.attr("transform", function(d) {
+      return "translate(" +
+         (d.source.x + d.target.x)/2 + "," +
+         (d.source.y + d.target.y)/2 +
+         ")rotate(" +
+         (Math.atan2(d.target.y - d.source.y, d.target.x - d.source.x) * 180 / Math.PI) +
+         ")";
+  });
+}
+
+    </script>
+  </body>
+</html>
diff --git a/apps/workbench/public/robots.txt b/apps/workbench/public/robots.txt
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/script/rails b/apps/workbench/script/rails
new file mode 100755 (executable)
index 0000000..f8da2cf
--- /dev/null
@@ -0,0 +1,6 @@
+#!/usr/bin/env ruby
+# This command will automatically be run when you run "rails" with Rails 3 gems installed from the root of your application.
+
+APP_PATH = File.expand_path('../../config/application',  __FILE__)
+require File.expand_path('../../config/boot',  __FILE__)
+require 'rails/commands'
diff --git a/apps/workbench/test/controllers/actions_controller_test.rb b/apps/workbench/test/controllers/actions_controller_test.rb
new file mode 100644 (file)
index 0000000..0e28b06
--- /dev/null
@@ -0,0 +1,45 @@
+require 'test_helper'
+
+class ActionsControllerTest < ActionController::TestCase
+
+  test "send report" do
+    post :report_issue, {format: 'js'}, session_for(:admin)
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject.include? "Issue reported by admin"
+        found_email = true
+        break
+      end
+    end
+    assert_equal true, found_email, 'Expected email after issue reported'
+  end
+
+  test "combine files into new collection" do
+    post(:combine_selected_files_into_collection, {
+           selection: ['zzzzz-4zz18-znfnqtbbv4spc3w/foo',
+                       'zzzzz-4zz18-ehbhgtheo8909or/bar',
+                       'zzzzz-4zz18-y9vne9npefyxh8g/baz',
+                       '1fd08fc162a5c6413070a8bd0bffc818+150'],
+           format: "json"},
+         session_for(:active))
+
+    assert_response 302   # collection created and redirected to new collection page
+
+    assert response.headers['Location'].include? '/collections/'
+    new_collection_uuid = response.headers['Location'].split('/')[-1]
+
+    use_token :active
+    collection = Collection.select([:uuid, :manifest_text]).where(uuid: new_collection_uuid).first
+    manifest_text = collection['manifest_text']
+    assert manifest_text.include?('foo'), 'Not found foo in new collection manifest text'
+    assert manifest_text.include?('bar'), 'Not found bar in new collection manifest text'
+    assert manifest_text.include?('baz'), 'Not found baz in new collection manifest text'
+    assert manifest_text.include?('0:0:file1 0:0:file2 0:0:file3'),
+                'Not found 0:0:file1 0:0:file2 0:0:file3 in new collection manifest text'
+    assert manifest_text.include?('dir1/subdir'), 'Not found dir1/subdir in new collection manifest text'
+    assert manifest_text.include?('dir2'), 'Not found dir2 in new collection manifest text'
+  end
+
+end
diff --git a/apps/workbench/test/controllers/api_client_authorizations_controller_test.rb b/apps/workbench/test/controllers/api_client_authorizations_controller_test.rb
new file mode 100644 (file)
index 0000000..0a99a6a
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class ApiClientAuthorizationsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/application_controller_test.rb b/apps/workbench/test/controllers/application_controller_test.rb
new file mode 100644 (file)
index 0000000..d0d9c5d
--- /dev/null
@@ -0,0 +1,328 @@
+require 'test_helper'
+
+class ApplicationControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  setup do
+    @user_dataclass = ArvadosBase.resource_class_for_uuid(api_fixture('users')['active']['uuid'])
+  end
+
+  test "links for object" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    link_head_uuid = api_fixture('links')['foo_file_readable_by_active']['head_uuid']
+
+    links = ac.send :links_for_object, link_head_uuid
+
+    assert links, 'Expected links'
+    assert links.is_a?(Array), 'Expected an array'
+    assert links.size > 0, 'Expected at least one link'
+    assert links[0][:uuid], 'Expected uuid for the head_link'
+  end
+
+  test "preload links for objects and uuids" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    link1_head_uuid = api_fixture('links')['foo_file_readable_by_active']['head_uuid']
+    link2_uuid = api_fixture('links')['bar_file_readable_by_active']['uuid']
+    link3_head_uuid = api_fixture('links')['bar_file_readable_by_active']['head_uuid']
+
+    link2_object = User.find(api_fixture('users')['active']['uuid'])
+    link2_object_uuid = link2_object['uuid']
+
+    uuids = [link1_head_uuid, link2_object, link3_head_uuid]
+    links = ac.send :preload_links_for_objects, uuids
+
+    assert links, 'Expected links'
+    assert links.is_a?(Hash), 'Expected a hash'
+    assert links.size == 3, 'Expected two objects in the preloaded links hash'
+    assert links[link1_head_uuid], 'Expected links for the passed in link head_uuid'
+    assert links[link2_object_uuid], 'Expected links for the passed in object uuid'
+    assert links[link3_head_uuid], 'Expected links for the passed in link head_uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    links = ac.send :preload_links_for_objects, uuids
+    assert links, 'Expected links'
+    assert links.is_a?(Hash), 'Expected a hash'
+    assert links.size == 3, 'Expected two objects in the preloaded links hash'
+    assert links[link1_head_uuid], 'Expected links for the passed in link head_uuid'
+  end
+
+  [ [:preload_links_for_objects, [] ],
+    [:preload_collections_for_objects, [] ],
+    [:preload_log_collections_for_objects, [] ],
+    [:preload_objects_for_dataclass, [] ],
+  ].each do |input|
+    test "preload data for empty array input #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      if input[0] == :preload_objects_for_dataclass
+        objects = ac.send input[0], @user_dataclass, input[1]
+      else
+        objects = ac.send input[0], input[1]
+      end
+
+      assert objects, 'Expected objects'
+      assert objects.is_a?(Hash), 'Expected a hash'
+      assert objects.size == 0, 'Expected no objects in the preloaded hash'
+    end
+  end
+
+  [ [:preload_links_for_objects, 'input not an array'],
+    [:preload_links_for_objects, nil],
+    [:links_for_object, nil],
+    [:preload_collections_for_objects, 'input not an array'],
+    [:preload_collections_for_objects, nil],
+    [:collections_for_object, nil],
+    [:preload_log_collections_for_objects, 'input not an array'],
+    [:preload_log_collections_for_objects, nil],
+    [:log_collections_for_object, nil],
+    [:preload_objects_for_dataclass, 'input not an array'],
+    [:preload_objects_for_dataclass, nil],
+    [:object_for_dataclass, 'some_dataclass', nil],
+    [:object_for_dataclass, nil, 'some_uuid'],
+  ].each do |input|
+    test "preload data for wrong type input #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      if input[0] == :object_for_dataclass
+        assert_raise ArgumentError do
+          ac.send input[0], input[1], input[2]
+        end
+      else
+        assert_raise ArgumentError do
+          ac.send input[0], input[1]
+        end
+      end
+    end
+  end
+
+  [ [:links_for_object, 'no-such-uuid' ],
+    [:collections_for_object, 'no-such-uuid' ],
+    [:log_collections_for_object, 'no-such-uuid' ],
+    [:object_for_dataclass, 'no-such-uuid' ],
+  ].each do |input|
+    test "get data for no such uuid #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      if input[0] == :object_for_dataclass
+        object = ac.send input[0], @user_dataclass, input[1]
+        assert_not object, 'Expected no object'
+      else
+        objects = ac.send input[0], input[1]
+        assert objects, 'Expected objects'
+        assert objects.is_a?(Array), 'Expected a array'
+      end
+    end
+  end
+
+  test "get 10 objects of data class user" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    objects = ac.send :get_n_objects_of_class, @user_dataclass, 10
+
+    assert objects, 'Expected objects'
+    assert objects.is_a?(ArvadosResourceList), 'Expected an ArvadosResourceList'
+
+    first_object = objects.first
+    assert first_object, 'Expected at least one object'
+    assert_equal 'User', first_object.class.name, 'Expected user object'
+
+    # invoke it again. this time, the preloaded info will be returned
+    objects = ac.send :get_n_objects_of_class, @user_dataclass, 10
+    assert objects, 'Expected objects'
+    assert_equal 'User', objects.first.class.name, 'Expected user object'
+  end
+
+  [ ['User', 10],
+    [nil, 10],
+    [@user_dataclass, 0],
+    [@user_dataclass, -1],
+    [@user_dataclass, nil] ].each do |input|
+    test "get_n_objects for incorrect input #{input}" do
+      use_token :active
+
+      ac = ApplicationController.new
+
+      assert_raise ArgumentError do
+        ac.send :get_n_objects_of_class, input[0], input[1]
+      end
+    end
+  end
+
+  test "collections for object" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid = api_fixture('collections')['foo_file']['uuid']
+
+    collections = ac.send :collections_for_object, uuid
+
+    assert collections, 'Expected collections'
+    assert collections.is_a?(Array), 'Expected an array'
+    assert collections.size == 1, 'Expected one collection object'
+    assert_equal collections[0][:uuid], uuid, 'Expected uuid not found in collections'
+  end
+
+  test "preload collections for given uuids" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid1 = api_fixture('collections')['foo_file']['uuid']
+    uuid2 = api_fixture('collections')['bar_file']['uuid']
+
+    uuids = [uuid1, uuid2]
+    collections = ac.send :preload_collections_for_objects, uuids
+
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid1].size, 1, 'Expected one collection for the passed in uuid'
+    assert collections[uuid2], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid2].size, 1, 'Expected one collection for the passed in uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    collections = ac.send :preload_collections_for_objects, uuids
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+  end
+
+  test "log collections for object" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid = api_fixture('logs')['log4']['object_uuid']
+
+    collections = ac.send :log_collections_for_object, uuid
+
+    assert collections, 'Expected collections'
+    assert collections.is_a?(Array), 'Expected an array'
+    assert collections.size == 1, 'Expected one collection object'
+    assert_equal collections[0][:uuid], uuid, 'Expected uuid not found in collections'
+  end
+
+  test "preload log collections for given uuids" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    uuid1 = api_fixture('logs')['log4']['object_uuid']
+    uuid2 = api_fixture('collections')['bar_file']['uuid']
+
+    uuids = [uuid1, uuid2]
+    collections = ac.send :preload_log_collections_for_objects, uuids
+
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid1].size, 1, 'Expected one collection for the passed in uuid'
+    assert collections[uuid2], 'Expected collections for the passed in uuid'
+    assert_equal collections[uuid2].size, 1, 'Expected one collection for the passed in uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    collections = ac.send :preload_log_collections_for_objects, uuids
+    assert collections, 'Expected collection'
+    assert collections.is_a?(Hash), 'Expected a hash'
+    assert collections.size == 2, 'Expected two objects in the preloaded collection hash'
+    assert collections[uuid1], 'Expected collections for the passed in uuid'
+  end
+
+  test "object for dataclass" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    dataclass = ArvadosBase.resource_class_for_uuid(api_fixture('jobs')['running']['uuid'])
+    uuid = api_fixture('jobs')['running']['uuid']
+
+    obj = ac.send :object_for_dataclass, dataclass, uuid
+
+    assert obj, 'Expected object'
+    assert 'Job', obj.class
+    assert_equal uuid, obj['uuid'], 'Expected uuid not found'
+    assert_equal api_fixture('jobs')['running']['script_version'], obj['script_version'],
+      'Expected script_version not found'
+  end
+
+  test "preload objects for dataclass" do
+    use_token :active
+
+    ac = ApplicationController.new
+
+    dataclass = ArvadosBase.resource_class_for_uuid(api_fixture('jobs')['running']['uuid'])
+
+    uuid1 = api_fixture('jobs')['running']['uuid']
+    uuid2 = api_fixture('jobs')['running_cancelled']['uuid']
+
+    uuids = [uuid1, uuid2]
+    users = ac.send :preload_objects_for_dataclass, dataclass, uuids
+
+    assert users, 'Expected objects'
+    assert users.is_a?(Hash), 'Expected a hash'
+
+    assert users.size == 2, 'Expected two objects in the preloaded hash'
+    assert users[uuid1], 'Expected user object for the passed in uuid'
+    assert users[uuid2], 'Expected user object for the passed in uuid'
+
+    # invoke again for this same input. this time, the preloaded data will be returned
+    users = ac.send :preload_objects_for_dataclass, dataclass, uuids
+    assert users, 'Expected objects'
+    assert users.is_a?(Hash), 'Expected a hash'
+    assert users.size == 2, 'Expected two objects in the preloaded hash'
+
+    # invoke again for this with one more uuid
+    uuids << api_fixture('jobs')['foobar']['uuid']
+    users = ac.send :preload_objects_for_dataclass, dataclass, uuids
+    assert users, 'Expected objects'
+    assert users.is_a?(Hash), 'Expected a hash'
+    assert users.size == 3, 'Expected two objects in the preloaded hash'
+  end
+
+  test "requesting a nonexistent object returns 404" do
+    # We're really testing ApplicationController's find_object_by_uuid.
+    # It's easiest to do that by instantiating a concrete controller.
+    @controller = NodesController.new
+    get(:show, {id: "zzzzz-zzzzz-zzzzzzzzzzzzzzz"}, session_for(:admin))
+    assert_response 404
+  end
+
+  test "Workbench returns 4xx when API server is unreachable" do
+    # We're really testing ApplicationController's render_exception.
+    # Our primary concern is that it doesn't raise an error and
+    # return 500.
+    orig_api_server = Rails.configuration.arvados_v1_base
+    begin
+      # The URL should look valid in all respects, and avoid talking over a
+      # network.  100::/64 is the IPv6 discard prefix, so it's perfect.
+      Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
+      @controller = NodesController.new
+      get(:index, {}, session_for(:active))
+      assert_includes(405..422, @response.code.to_i,
+                      "bad response code when API server is unreachable")
+    ensure
+      Rails.configuration.arvados_v1_base = orig_api_server
+    end
+  end
+end
diff --git a/apps/workbench/test/controllers/authorized_keys_controller_test.rb b/apps/workbench/test/controllers/authorized_keys_controller_test.rb
new file mode 100644 (file)
index 0000000..6654101
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class AuthorizedKeysControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/collections_controller_test.rb b/apps/workbench/test/controllers/collections_controller_test.rb
new file mode 100644 (file)
index 0000000..14db674
--- /dev/null
@@ -0,0 +1,316 @@
+require 'test_helper'
+
+class CollectionsControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  include PipelineInstancesHelper
+
+  NONEXISTENT_COLLECTION = "ffffffffffffffffffffffffffffffff+0"
+
+  def stub_file_content
+    # For the duration of the current test case, stub file download
+    # content with a randomized (but recognizable) string. Return the
+    # string, the test case can use it in assertions.
+    txt = 'the quick brown fox ' + rand(2**32).to_s
+    @controller.stubs(:file_enumerator).returns([txt])
+    txt
+  end
+
+  def collection_params(collection_name, file_name=nil)
+    uuid = api_fixture('collections')[collection_name.to_s]['uuid']
+    params = {uuid: uuid, id: uuid}
+    params[:file] = file_name if file_name
+    params
+  end
+
+  def assert_hash_includes(actual_hash, expected_hash, msg=nil)
+    expected_hash.each do |key, value|
+      assert_equal(value, actual_hash[key], msg)
+    end
+  end
+
+  def assert_no_session
+    assert_hash_includes(session, {arvados_api_token: nil},
+                         "session includes unexpected API token")
+  end
+
+  def assert_session_for_auth(client_auth)
+    api_token =
+      api_fixture('api_client_authorizations')[client_auth.to_s]['api_token']
+    assert_hash_includes(session, {arvados_api_token: api_token},
+                         "session token does not belong to #{client_auth}")
+  end
+
+  def show_collection(params, session={}, response=:success)
+    params = collection_params(params) if not params.is_a? Hash
+    session = session_for(session) if not session.is_a? Hash
+    get(:show, params, session)
+    assert_response response
+  end
+
+  test "viewing a collection" do
+    show_collection(:foo_file, :active)
+    assert_equal([['.', 'foo', 3]], assigns(:object).files)
+  end
+
+  test "viewing a collection fetches related projects" do
+    show_collection({id: api_fixture('collections')["foo_file"]['portable_data_hash']}, :active)
+    assert_includes(assigns(:same_pdh).map(&:owner_uuid),
+                    api_fixture('groups')['aproject']['uuid'],
+                    "controller did not find linked project")
+  end
+
+  test "viewing a collection fetches related permissions" do
+    show_collection(:bar_file, :active)
+    assert_includes(assigns(:permissions).map(&:uuid),
+                    api_fixture('links')['bar_file_readable_by_active']['uuid'],
+                    "controller did not find permission link")
+  end
+
+  test "viewing a collection fetches jobs that output it" do
+    show_collection(:bar_file, :active)
+    assert_includes(assigns(:output_of).map(&:uuid),
+                    api_fixture('jobs')['foobar']['uuid'],
+                    "controller did not find output job")
+  end
+
+  test "viewing a collection fetches jobs that logged it" do
+    show_collection(:baz_file, :active)
+    assert_includes(assigns(:log_of).map(&:uuid),
+                    api_fixture('jobs')['foobar']['uuid'],
+                    "controller did not find logger job")
+  end
+
+  test "viewing a collection fetches logs about it" do
+    show_collection(:foo_file, :active)
+    assert_includes(assigns(:logs).map(&:uuid),
+                    api_fixture('logs')['log4']['uuid'],
+                    "controller did not find related log")
+  end
+
+  test "sharing auths available to admin" do
+    show_collection("collection_owned_by_active", "admin_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "sharing auths available to owner" do
+    show_collection("collection_owned_by_active", "active_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "sharing auths available to reader" do
+    show_collection("foo_collection_in_aproject",
+                    "project_viewer_trustedclient")
+    assert_not_nil assigns(:search_sharing)
+  end
+
+  test "viewing collection files with a reader token" do
+    params = collection_params(:foo_file)
+    params[:reader_token] = api_fixture("api_client_authorizations",
+                                        "active_all_collections", "api_token")
+    get(:show_file_links, params)
+    assert_response :success
+    assert_equal([['.', 'foo', 3]], assigns(:object).files)
+    assert_no_session
+  end
+
+  test "fetching collection file with reader token" do
+    expected = stub_file_content
+    params = collection_params(:foo_file, "foo")
+    params[:reader_token] = api_fixture("api_client_authorizations",
+                                        "active_all_collections", "api_token")
+    get(:show_file, params)
+    assert_response :success
+    assert_equal(expected, @response.body,
+                 "failed to fetch a Collection file with a reader token")
+    assert_no_session
+  end
+
+  test "reader token Collection links end with trailing slash" do
+    # Testing the fix for #2937.
+    session = session_for(:active_trustedclient)
+    post(:share, collection_params(:foo_file), session)
+    assert(@controller.download_link.ends_with? '/',
+           "Collection share link does not end with slash for wget")
+  end
+
+  test "getting a file from Keep" do
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:active)
+    expect_content = stub_file_content
+    get(:show_file, params, sess)
+    assert_response :success
+    assert_equal(expect_content, @response.body,
+                 "failed to get a correct file from Keep")
+  end
+
+  test "can't get a file from Keep without permission" do
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:spectator)
+    get(:show_file, params, sess)
+    assert_response 404
+  end
+
+  test "trying to get a nonexistent file from Keep returns a 404" do
+    params = collection_params(:foo_file, 'gone')
+    sess = session_for(:admin)
+    get(:show_file, params, sess)
+    assert_response 404
+  end
+
+  test "getting a file from Keep with a good reader token" do
+    params = collection_params(:foo_file, 'foo')
+    read_token = api_fixture('api_client_authorizations')['active']['api_token']
+    params[:reader_token] = read_token
+    expect_content = stub_file_content
+    get(:show_file, params)
+    assert_response :success
+    assert_equal(expect_content, @response.body,
+                 "failed to get a correct file from Keep using a reader token")
+    assert_not_equal(read_token, session[:arvados_api_token],
+                     "using a reader token set the session's API token")
+  end
+
+  test "trying to get from Keep with an unscoped reader token prompts login" do
+    params = collection_params(:foo_file, 'foo')
+    params[:reader_token] =
+      api_fixture('api_client_authorizations')['active_noscope']['api_token']
+    get(:show_file, params)
+    assert_response :redirect
+  end
+
+  test "can get a file with an unpermissioned auth but in-scope reader token" do
+    params = collection_params(:foo_file, 'foo')
+    sess = session_for(:expired)
+    read_token = api_fixture('api_client_authorizations')['active']['api_token']
+    params[:reader_token] = read_token
+    expect_content = stub_file_content
+    get(:show_file, params, sess)
+    assert_response :success
+    assert_equal(expect_content, @response.body,
+                 "failed to get a correct file from Keep using a reader token")
+    assert_not_equal(read_token, session[:arvados_api_token],
+                     "using a reader token set the session's API token")
+  end
+
+  test "inactive user can retrieve user agreement" do
+    ua_collection = api_fixture('collections')['user_agreement']
+    # Here we don't test whether the agreement can be retrieved from
+    # Keep. We only test that show_file decides to send file content,
+    # so we use the file content stub.
+    stub_file_content
+    get :show_file, {
+      uuid: ua_collection['uuid'],
+      file: ua_collection['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
+    }, session_for(:inactive)
+    assert_nil(assigns(:unsigned_user_agreements),
+               "Did not skip check_user_agreements filter " +
+               "when showing the user agreement.")
+    assert_response :success
+  end
+
+  test "requesting nonexistent Collection returns 404" do
+    show_collection({uuid: NONEXISTENT_COLLECTION, id: NONEXISTENT_COLLECTION},
+                    :active, 404)
+  end
+
+  test "use a reasonable read buffer even if client requests a huge range" do
+    fakefiledata = mock
+    IO.expects(:popen).returns(fakefiledata)
+    fakefiledata.expects(:read).twice.with() do |length|
+      # Fail the test if read() is called with length>1MiB:
+      length < 2**20
+      ## Force the ActionController::Live thread to lose the race to
+      ## verify that @response.body.length actually waits for the
+      ## response (see below):
+      # sleep 3
+    end.returns("foo\n", nil)
+    fakefiledata.expects(:close)
+    foo_file = api_fixture('collections')['foo_file']
+    @request.headers['Range'] = 'bytes=0-4294967296/*'
+    get :show_file, {
+      uuid: foo_file['uuid'],
+      file: foo_file['manifest_text'].match(/ \d+:\d+:(\S+)/)[1]
+    }, session_for(:active)
+    # Wait for the whole response to arrive before deciding whether
+    # mocks' expectations were met. Otherwise, Mocha will fail the
+    # test depending on how slowly the ActionController::Live thread
+    # runs.
+    @response.body.length
+  end
+
+  test "show file in a subdirectory of a collection" do
+    params = collection_params(:collection_with_files_in_subdir, 'subdir2/subdir3/subdir4/file1_in_subdir4.txt')
+    expect_content = stub_file_content
+    get(:show_file, params, session_for(:user1_with_load))
+    assert_response :success
+    assert_equal(expect_content, @response.body, "failed to get a correct file from Keep")
+  end
+
+  test 'provenance graph' do
+    use_token 'admin'
+
+    obj = find_fixture Collection, "graph_test_collection3"
+
+    provenance = obj.provenance.stringify_keys
+
+    [obj[:portable_data_hash]].each do |k|
+      assert_not_nil provenance[k], "Expected key #{k} in provenance set"
+    end
+
+    prov_svg = ProvenanceHelper::create_provenance_graph(provenance, "provenance_svg",
+                                                         {:request => RequestDuck,
+                                                           :direction => :bottom_up,
+                                                           :combine_jobs => :script_only})
+
+    stage1 = find_fixture Job, "graph_stage1"
+    stage3 = find_fixture Job, "graph_stage3"
+    previous_job_run = find_fixture Job, "previous_job_run"
+
+    obj_id = obj.portable_data_hash.gsub('+', '\\\+')
+    stage1_out = stage1.output.gsub('+', '\\\+')
+    stage1_id = "#{stage1.script}_#{Digest::MD5.hexdigest(stage1[:script_parameters].to_json)}"
+    stage3_id = "#{stage3.script}_#{Digest::MD5.hexdigest(stage3[:script_parameters].to_json)}"
+
+    assert /#{obj_id}&#45;&gt;#{stage3_id}/.match(prov_svg)
+
+    assert /#{stage3_id}&#45;&gt;#{stage1_out}/.match(prov_svg)
+
+    assert /#{stage1_out}&#45;&gt;#{stage1_id}/.match(prov_svg)
+
+  end
+
+  test 'used_by graph' do
+    use_token 'admin'
+    obj = find_fixture Collection, "graph_test_collection1"
+
+    used_by = obj.used_by.stringify_keys
+
+    used_by_svg = ProvenanceHelper::create_provenance_graph(used_by, "used_by_svg",
+                                                            {:request => RequestDuck,
+                                                              :direction => :top_down,
+                                                              :combine_jobs => :script_only,
+                                                              :pdata_only => true})
+
+    stage2 = find_fixture Job, "graph_stage2"
+    stage3 = find_fixture Job, "graph_stage3"
+
+    stage2_id = "#{stage2.script}_#{Digest::MD5.hexdigest(stage2[:script_parameters].to_json)}"
+    stage3_id = "#{stage3.script}_#{Digest::MD5.hexdigest(stage3[:script_parameters].to_json)}"
+
+    obj_id = obj.portable_data_hash.gsub('+', '\\\+')
+    stage3_out = stage3.output.gsub('+', '\\\+')
+
+    assert /#{obj_id}&#45;&gt;#{stage2_id}/.match(used_by_svg)
+
+    assert /#{obj_id}&#45;&gt;#{stage3_id}/.match(used_by_svg)
+
+    assert /#{stage3_id}&#45;&gt;#{stage3_out}/.match(used_by_svg)
+
+    assert /#{stage3_id}&#45;&gt;#{stage3_out}/.match(used_by_svg)
+
+  end
+end
diff --git a/apps/workbench/test/controllers/groups_controller_test.rb b/apps/workbench/test/controllers/groups_controller_test.rb
new file mode 100644 (file)
index 0000000..cdbb5d5
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class ProjectsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/humans_controller_test.rb b/apps/workbench/test/controllers/humans_controller_test.rb
new file mode 100644 (file)
index 0000000..2a8b7b8
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class HumansControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/job_tasks_controller_test.rb b/apps/workbench/test/controllers/job_tasks_controller_test.rb
new file mode 100644 (file)
index 0000000..c8089b0
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class JobTasksControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/jobs_controller_test.rb b/apps/workbench/test/controllers/jobs_controller_test.rb
new file mode 100644 (file)
index 0000000..609e58c
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class JobsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/keep_disks_controller_test.rb b/apps/workbench/test/controllers/keep_disks_controller_test.rb
new file mode 100644 (file)
index 0000000..d6f2954
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class KeepDisksControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/links_controller_test.rb b/apps/workbench/test/controllers/links_controller_test.rb
new file mode 100644 (file)
index 0000000..b3c7444
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class MetadataControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/logs_controller_test.rb b/apps/workbench/test/controllers/logs_controller_test.rb
new file mode 100644 (file)
index 0000000..0b8a978
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class LogsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/nodes_controller_test.rb b/apps/workbench/test/controllers/nodes_controller_test.rb
new file mode 100644 (file)
index 0000000..57e35c4
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class NodesControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/pipeline_instances_controller_test.rb b/apps/workbench/test/controllers/pipeline_instances_controller_test.rb
new file mode 100644 (file)
index 0000000..ac36f19
--- /dev/null
@@ -0,0 +1,328 @@
+require 'test_helper'
+
+class PipelineInstancesControllerTest < ActionController::TestCase
+  include PipelineInstancesHelper
+
+  def create_instance_long_enough_to(instance_attrs={})
+    # create 'two_part' pipeline with the given instance attributes
+    pt_fixture = api_fixture('pipeline_templates')['two_part']
+    post :create, {
+      pipeline_instance: instance_attrs.merge({
+        pipeline_template_uuid: pt_fixture['uuid']
+      }),
+      format: :json
+    }, session_for(:active)
+    assert_response :success
+    pi_uuid = assigns(:object).uuid
+    assert_not_nil assigns(:object)
+
+    # yield
+    yield pi_uuid, pt_fixture
+
+    # delete the pipeline instance
+    use_token :active
+    PipelineInstance.where(uuid: pi_uuid).first.destroy
+  end
+
+  test "pipeline instance components populated after create" do
+    create_instance_long_enough_to do |new_instance_uuid, template_fixture|
+      assert_equal(template_fixture['components'].to_json,
+                   assigns(:object).components.to_json)
+    end
+  end
+
+  test "can render pipeline instance with tagged collections" do
+    # Make sure to pass in a tagged collection to test that part of the rendering behavior.
+    get(:show,
+        {id: api_fixture("pipeline_instances")["pipeline_with_tagged_collection_input"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
+
+  test "update script_parameters one at a time using merge param" do
+      template_fixture = api_fixture('pipeline_templates')['two_part']
+      post :update, {
+        id: api_fixture("pipeline_instances")["pipeline_to_merge_params"]["uuid"],
+        pipeline_instance: {
+          components: {
+            "part-two" => {
+              script_parameters: {
+                integer_with_value: {
+                  value: 9
+                },
+                plain_string: {
+                  value: 'quux'
+                },
+              }
+            }
+          }
+        },
+        merge: true,
+        format: :json
+      }, session_for(:active)
+      assert_response :success
+      assert_not_nil assigns(:object)
+      orig_params = template_fixture['components']['part-two']['script_parameters']
+      new_params = assigns(:object).components[:'part-two'][:script_parameters]
+      orig_params.keys.each do |k|
+        unless %w(integer_with_value plain_string).index(k)
+          assert_equal orig_params[k].to_json, new_params[k.to_sym].to_json
+        end
+      end
+  end
+
+  test "component rendering copes with unexpected components format" do
+    get(:show,
+        {id: api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
+
+  test "dates in JSON components are parsed" do
+    get(:show,
+        {id: api_fixture('pipeline_instances')['has_component_with_completed_jobs']['uuid']},
+        session_for(:active))
+    assert_response :success
+    assert_not_nil assigns(:object)
+    assert_not_nil assigns(:object).components[:foo][:job]
+    assert assigns(:object).components[:foo][:job][:started_at].is_a? Time
+    assert assigns(:object).components[:foo][:job][:finished_at].is_a? Time
+  end
+
+  # The next two tests ensure that a pipeline instance can be copied
+  # when the template has components that do not exist in the
+  # instance (ticket #4000).
+
+  test "copy pipeline instance with components=use_latest" do
+    post(:copy,
+         {
+           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
+           components: 'use_latest',
+           script: 'use_latest',
+           pipeline_instance: {
+             state: 'RunningOnServer'
+           }
+         },
+         session_for(:active))
+    assert_response 302
+    assert_not_nil assigns(:object)
+
+    # Component 'foo' has script parameters only in the pipeline instance.
+    # Component 'bar' is present only in the pipeline_template.
+    # Test that the copied pipeline instance includes parameters for
+    # component 'foo' from the source instance, and parameters for
+    # component 'bar' from the source template.
+    #
+    assert_not_nil assigns(:object).components[:foo]
+    foo = assigns(:object).components[:foo]
+    assert_not_nil foo[:script_parameters]
+    assert_not_nil foo[:script_parameters][:input]
+    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
+
+    assert_not_nil assigns(:object).components[:bar]
+    bar = assigns(:object).components[:bar]
+    assert_not_nil bar[:script_parameters]
+    assert_not_nil bar[:script_parameters][:input]
+    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
+  end
+
+  test "copy pipeline instance on newer template works with script=use_same" do
+    post(:copy,
+         {
+           id: api_fixture('pipeline_instances')['pipeline_with_newer_template']['uuid'],
+           components: 'use_latest',
+           script: 'use_same',
+           pipeline_instance: {
+             state: 'RunningOnServer'
+           }
+         },
+         session_for(:active))
+    assert_response 302
+    assert_not_nil assigns(:object)
+
+    # Test that relevant component parameters were copied from both
+    # the source instance and source template, respectively (see
+    # previous test)
+    #
+    assert_not_nil assigns(:object).components[:foo]
+    foo = assigns(:object).components[:foo]
+    assert_not_nil foo[:script_parameters]
+    assert_not_nil foo[:script_parameters][:input]
+    assert_equal 'foo instance input', foo[:script_parameters][:input][:title]
+
+    assert_not_nil assigns(:object).components[:bar]
+    bar = assigns(:object).components[:bar]
+    assert_not_nil bar[:script_parameters]
+    assert_not_nil bar[:script_parameters][:input]
+    assert_equal 'bar template input', bar[:script_parameters][:input][:title]
+  end
+
+  test "generate graph" do
+
+    use_token 'admin'
+
+    pipeline_for_graph = {
+      state: 'Complete',
+      uuid: 'zzzzz-d1hrv-9fm8l10i9z2kqc9',
+      components: {
+        stage1: {
+          repository: 'foo',
+          script: 'hash',
+          script_version: 'master',
+          job: {uuid: 'zzzzz-8i9sb-graphstage10000'},
+          output_uuid: 'zzzzz-4zz18-bv31uwvy3neko22'
+        },
+        stage2: {
+          repository: 'foo',
+          script: 'hash2',
+          script_version: 'master',
+          script_parameters: {
+            input: 'fa7aeb5140e2848d39b416daeef4ffc5+45'
+          },
+          job: {uuid: 'zzzzz-8i9sb-graphstage20000'},
+          output_uuid: 'zzzzz-4zz18-uukreo9rbgwsujx'
+        }
+      }
+    }
+
+    @controller.params['tab_pane'] = "Graph"
+    provenance, pips = @controller.graph([pipeline_for_graph])
+
+    graph_test_collection1 = find_fixture Collection, "graph_test_collection1"
+    stage1 = find_fixture Job, "graph_stage1"
+    stage2 = find_fixture Job, "graph_stage2"
+
+    ['component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage1',
+     'component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage2',
+     stage1.uuid,
+     stage2.uuid,
+     stage1.output,
+     stage2.output,
+     pipeline_for_graph[:components][:stage1][:output_uuid],
+     pipeline_for_graph[:components][:stage2][:output_uuid]
+    ].each do |k|
+
+      assert_not_nil provenance[k], "Expected key #{k} in provenance set"
+      assert_equal 1, pips[k], "Expected key #{k} in pips set" if !k.start_with? "component_"
+    end
+
+    prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+        :request => RequestDuck,
+        :all_script_parameters => true,
+        :combine_jobs => :script_and_version,
+        :pips => pips,
+        :only_components => true }
+
+    stage1_id = "#{stage1[:script]}_#{stage1[:script_version]}_#{Digest::MD5.hexdigest(stage1[:script_parameters].to_json)}"
+    stage2_id = "#{stage2[:script]}_#{stage2[:script_version]}_#{Digest::MD5.hexdigest(stage2[:script_parameters].to_json)}"
+
+    stage1_out = stage1[:output].gsub('+','\\\+')
+
+    assert_match /#{stage1_id}&#45;&gt;#{stage1_out}/, prov_svg
+
+    assert_match /#{stage1_out}&#45;&gt;#{stage2_id}/, prov_svg
+
+  end
+
+  test "generate graph compare" do
+
+    use_token 'admin'
+
+    pipeline_for_graph1 = {
+      state: 'Complete',
+      uuid: 'zzzzz-d1hrv-9fm8l10i9z2kqc9',
+      components: {
+        stage1: {
+          repository: 'foo',
+          script: 'hash',
+          script_version: 'master',
+          job: {uuid: 'zzzzz-8i9sb-graphstage10000'},
+          output_uuid: 'zzzzz-4zz18-bv31uwvy3neko22'
+        },
+        stage2: {
+          repository: 'foo',
+          script: 'hash2',
+          script_version: 'master',
+          script_parameters: {
+            input: 'fa7aeb5140e2848d39b416daeef4ffc5+45'
+          },
+          job: {uuid: 'zzzzz-8i9sb-graphstage20000'},
+          output_uuid: 'zzzzz-4zz18-uukreo9rbgwsujx'
+        }
+      }
+    }
+
+    pipeline_for_graph2 = {
+      state: 'Complete',
+      uuid: 'zzzzz-d1hrv-9fm8l10i9z2kqc0',
+      components: {
+        stage1: {
+          repository: 'foo',
+          script: 'hash',
+          script_version: 'master',
+          job: {uuid: 'zzzzz-8i9sb-graphstage10000'},
+          output_uuid: 'zzzzz-4zz18-bv31uwvy3neko22'
+        },
+        stage2: {
+          repository: 'foo',
+          script: 'hash2',
+          script_version: 'master',
+          script_parameters: {
+          },
+          job: {uuid: 'zzzzz-8i9sb-graphstage30000'},
+          output_uuid: 'zzzzz-4zz18-uukreo9rbgwsujj'
+        }
+      }
+    }
+
+    @controller.params['tab_pane'] = "Graph"
+    provenance, pips = @controller.graph([pipeline_for_graph1, pipeline_for_graph2])
+
+    collection1 = find_fixture Collection, "graph_test_collection1"
+
+    stage1 = find_fixture Job, "graph_stage1"
+    stage2 = find_fixture Job, "graph_stage2"
+    stage3 = find_fixture Job, "graph_stage3"
+
+    [['component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage1', nil],
+     ['component_zzzzz-d1hrv-9fm8l10i9z2kqc9_stage2', nil],
+     ['component_zzzzz-d1hrv-9fm8l10i9z2kqc0_stage1', nil],
+     ['component_zzzzz-d1hrv-9fm8l10i9z2kqc0_stage2', nil],
+     [stage1.uuid, 3],
+     [stage2.uuid, 1],
+     [stage3.uuid, 2],
+     [stage1.output, 3],
+     [stage2.output, 1],
+     [stage3.output, 2],
+     [pipeline_for_graph1[:components][:stage1][:output_uuid], 3],
+     [pipeline_for_graph1[:components][:stage2][:output_uuid], 1],
+     [pipeline_for_graph2[:components][:stage2][:output_uuid], 2]
+    ].each do |k|
+      assert_not_nil provenance[k[0]], "Expected key #{k[0]} in provenance set"
+      assert_equal k[1], pips[k[0]], "Expected key #{k} in pips" if !k[0].start_with? "component_"
+    end
+
+    prov_svg = ProvenanceHelper::create_provenance_graph provenance, "provenance_svg", {
+        :request => RequestDuck,
+        :all_script_parameters => true,
+        :combine_jobs => :script_and_version,
+        :pips => pips,
+        :only_components => true }
+
+    collection1_id = collection1.portable_data_hash.gsub('+','\\\+')
+
+    stage2_id = "#{stage2[:script]}_#{stage2[:script_version]}_#{Digest::MD5.hexdigest(stage2[:script_parameters].to_json)}"
+    stage3_id = "#{stage3[:script]}_#{stage3[:script_version]}_#{Digest::MD5.hexdigest(stage3[:script_parameters].to_json)}"
+
+    stage2_out = stage2[:output].gsub('+','\\\+')
+    stage3_out = stage3[:output].gsub('+','\\\+')
+
+    assert_match /#{collection1_id}&#45;&gt;#{stage2_id}/, prov_svg
+    assert_match /#{collection1_id}&#45;&gt;#{stage3_id}/, prov_svg
+
+    assert_match /#{stage2_id}&#45;&gt;#{stage2_out}/, prov_svg
+    assert_match /#{stage3_id}&#45;&gt;#{stage3_out}/, prov_svg
+
+  end
+
+end
diff --git a/apps/workbench/test/controllers/pipeline_templates_controller_test.rb b/apps/workbench/test/controllers/pipeline_templates_controller_test.rb
new file mode 100644 (file)
index 0000000..82c4fae
--- /dev/null
@@ -0,0 +1,10 @@
+require 'test_helper'
+
+class PipelineTemplatesControllerTest < ActionController::TestCase
+  test "component rendering copes with unexpeceted components format" do
+    get(:show,
+        {id: api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]},
+        session_for(:active))
+    assert_response :success
+  end
+end
diff --git a/apps/workbench/test/controllers/projects_controller_test.rb b/apps/workbench/test/controllers/projects_controller_test.rb
new file mode 100644 (file)
index 0000000..93f794d
--- /dev/null
@@ -0,0 +1,194 @@
+require 'test_helper'
+
+class ProjectsControllerTest < ActionController::TestCase
+  test "invited user is asked to sign user agreements on front page" do
+    get :index, {}, session_for(:inactive)
+    assert_response :redirect
+    assert_match(/^#{Regexp.escape(user_agreements_url)}\b/,
+                 @response.redirect_url,
+                 "Inactive user was not redirected to user_agreements page")
+  end
+
+  test "uninvited user is asked to wait for activation" do
+    get :index, {}, session_for(:inactive_uninvited)
+    assert_response :redirect
+    assert_match(/^#{Regexp.escape(inactive_users_url)}\b/,
+                 @response.redirect_url,
+                 "Uninvited user was not redirected to inactive user page")
+  end
+
+  [[:active, true],
+   [:project_viewer, false]].each do |which_user, should_show|
+    test "create subproject button #{'not ' unless should_show} shown to #{which_user}" do
+      readonly_project_uuid = api_fixture('groups')['aproject']['uuid']
+      get :show, {
+        id: readonly_project_uuid
+      }, session_for(which_user)
+      buttons = css_select('[data-method=post]').select do |el|
+        el.attributes['href'].match /project.*owner_uuid.*#{readonly_project_uuid}/
+      end
+      if should_show
+        assert_not_empty(buttons, "did not offer to create a subproject")
+      else
+        assert_empty(buttons.collect(&:to_s),
+                     "offered to create a subproject in a non-writable project")
+      end
+    end
+  end
+
+  test "sharing a project with a user and group" do
+    uuid_list = [api_fixture("groups")["future_project_viewing_group"]["uuid"],
+                 api_fixture("users")["future_project_user"]["uuid"]]
+    post(:share_with, {
+           id: api_fixture("groups")["asubproject"]["uuid"],
+           uuids: uuid_list,
+           format: "json"},
+         session_for(:active))
+    assert_response :success
+    assert_equal(uuid_list, json_response["success"])
+  end
+
+  test "user with project read permission can't add permissions" do
+    share_uuid = api_fixture("users")["spectator"]["uuid"]
+    post(:share_with, {
+           id: api_fixture("groups")["aproject"]["uuid"],
+           uuids: [share_uuid],
+           format: "json"},
+         session_for(:project_viewer))
+    assert_response 422
+    assert(json_response["errors"].andand.
+             any? { |msg| msg.start_with?("#{share_uuid}: ") },
+           "JSON response missing properly formatted sharing error")
+  end
+
+  def user_can_manage(user_sym, group_key)
+    get(:show, {id: api_fixture("groups")[group_key]["uuid"]},
+        session_for(user_sym))
+    is_manager = assigns(:user_is_manager)
+    assert_not_nil(is_manager, "user_is_manager flag not set")
+    if not is_manager
+      assert_empty(assigns(:share_links),
+                   "non-manager has share links set")
+    end
+    is_manager
+  end
+
+  test "admin can_manage aproject" do
+    assert user_can_manage(:admin, "aproject")
+  end
+
+  test "owner can_manage aproject" do
+    assert user_can_manage(:active, "aproject")
+  end
+
+  test "owner can_manage asubproject" do
+    assert user_can_manage(:active, "asubproject")
+  end
+
+  test "viewer can't manage aproject" do
+    refute user_can_manage(:project_viewer, "aproject")
+  end
+
+  test "viewer can't manage asubproject" do
+    refute user_can_manage(:project_viewer, "asubproject")
+  end
+
+  test "subproject_admin can_manage asubproject" do
+    assert user_can_manage(:subproject_admin, "asubproject")
+  end
+
+  test "detect ownership loop in project breadcrumbs" do
+    # This test has an arbitrary time limit -- otherwise we'd just sit
+    # here forever instead of reporting that the loop was not
+    # detected. The test passes quickly, but fails slowly.
+    Timeout::timeout 10 do
+      get(:show,
+          { id: api_fixture("groups")["project_owns_itself"]["uuid"] },
+          session_for(:admin))
+    end
+    assert_response :success
+  end
+
+  test "project admin can remove items from the project" do
+    coll_key = "collection_to_remove_from_subproject"
+    coll_uuid = api_fixture("collections")[coll_key]["uuid"]
+    delete(:remove_item,
+           { id: api_fixture("groups")["asubproject"]["uuid"],
+             item_uuid: coll_uuid,
+             format: "js" },
+           session_for(:subproject_admin))
+    assert_response :success
+    assert_match(/\b#{coll_uuid}\b/, @response.body,
+                 "removed object not named in response")
+  end
+
+  test 'projects#show tab infinite scroll partial obeys limit' do
+    get_contents_rows(limit: 1, filters: [['uuid','is_a',['arvados#job']]])
+    assert_response :success
+    assert_equal(1, json_response['content'].scan('<tr').count,
+                 "Did not get exactly one row")
+  end
+
+  ['', ' asc', ' desc'].each do |direction|
+    test "projects#show tab partial orders correctly by #{direction}" do
+      _test_tab_content_order direction
+    end
+  end
+
+  def _test_tab_content_order direction
+    get_contents_rows(limit: 100,
+                      order: "created_at#{direction}",
+                      filters: [['uuid','is_a',['arvados#job',
+                                                'arvados#pipelineInstance']]])
+    assert_response :success
+    not_grouped_by_kind = nil
+    last_timestamp = nil
+    last_kind = nil
+    found_kind = {}
+    json_response['content'].scan /<tr[^>]+>/ do |tr_tag|
+      found_timestamps = 0
+      tr_tag.scan(/\ data-object-created-at=\"(.*?)\"/).each do |t,|
+        if last_timestamp
+          correct_operator = / desc$/ =~ direction ? :>= : :<=
+          assert_operator(last_timestamp, correct_operator, t,
+                          "Rows are not sorted by created_at#{direction}")
+        end
+        last_timestamp = t
+        found_timestamps += 1
+      end
+      assert_equal(1, found_timestamps,
+                   "Content row did not have exactly one timestamp")
+
+      # Confirm that the test for timestamp ordering couldn't have
+      # passed merely because the test fixtures have convenient
+      # timestamps (e.g., there is only one pipeline and one job in
+      # the project being tested, or there are no pipelines at all in
+      # the project being tested):
+      tr_tag.scan /\ data-kind=\"(.*?)\"/ do |kind|
+        if last_kind and last_kind != kind and found_kind[kind]
+          # We saw this kind before, then a different kind, then
+          # this kind again. That means objects are not grouped by
+          # kind.
+          not_grouped_by_kind = true
+        end
+        found_kind[kind] ||= 0
+        found_kind[kind] += 1
+        last_kind = kind
+      end
+    end
+    assert_equal(true, not_grouped_by_kind,
+                 "Could not confirm that results are not grouped by kind")
+  end
+
+  def get_contents_rows params
+    params = {
+      id: api_fixture('users')['active']['uuid'],
+      partial: :contents_rows,
+      format: :json,
+    }.merge(params)
+    encoded_params = Hash[params.map { |k,v|
+                            [k, (v.is_a?(Array) || v.is_a?(Hash)) ? v.to_json : v]
+                          }]
+    get :show, encoded_params, session_for(:active)
+  end
+end
diff --git a/apps/workbench/test/controllers/repositories_controller_test.rb b/apps/workbench/test/controllers/repositories_controller_test.rb
new file mode 100644 (file)
index 0000000..15d28bf
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class RepositoriesControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/search_controller_test.rb b/apps/workbench/test/controllers/search_controller_test.rb
new file mode 100644 (file)
index 0000000..a09d966
--- /dev/null
@@ -0,0 +1,42 @@
+require 'test_helper'
+
+class SearchControllerTest < ActionController::TestCase
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  include Rails.application.routes.url_helpers
+
+  test 'Get search dialog' do
+    xhr :get, :choose, {
+      format: :js,
+      title: 'Search',
+      action_name: 'Show',
+      action_href: url_for(host: 'localhost', controller: :actions, action: :show),
+      action_data: {}.to_json,
+    }, session_for(:active)
+    assert_response :success
+  end
+
+  test 'Get search results for all projects' do
+    xhr :get, :choose, {
+      format: :json,
+      partial: true,
+    }, session_for(:active)
+    assert_response :success
+    assert_not_empty(json_response['content'],
+                     'search results for all projects should not be empty')
+  end
+
+  test 'Get search results for empty project' do
+    xhr :get, :choose, {
+      format: :json,
+      partial: true,
+      project_uuid: api_fixture('groups')['empty_project']['uuid'],
+    }, session_for(:active)
+    assert_response :success
+    assert_empty(json_response['content'],
+                 'search results for empty project should be empty')
+  end
+end
diff --git a/apps/workbench/test/controllers/sessions_controller_test.rb b/apps/workbench/test/controllers/sessions_controller_test.rb
new file mode 100644 (file)
index 0000000..a6113da
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class SessionsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/specimens_controller_test.rb b/apps/workbench/test/controllers/specimens_controller_test.rb
new file mode 100644 (file)
index 0000000..c677fa7
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class SpecimensControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/traits_controller_test.rb b/apps/workbench/test/controllers/traits_controller_test.rb
new file mode 100644 (file)
index 0000000..97c6642
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class TraitsControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/controllers/user_agreements_controller_test.rb b/apps/workbench/test/controllers/user_agreements_controller_test.rb
new file mode 100644 (file)
index 0000000..5c75ac8
--- /dev/null
@@ -0,0 +1,16 @@
+require 'test_helper'
+
+class UserAgreementsControllerTest < ActionController::TestCase
+  test 'User agreements page shows form if some user agreements are not signed' do
+    get :index, {}, session_for(:inactive)
+    assert_response 200
+  end
+
+  test 'User agreements page redirects if all user agreements signed' do
+    get :index, {return_to: root_path}, session_for(:active)
+    assert_response :redirect
+    assert_equal(root_url,
+                 @response.redirect_url,
+                 "Active user was not redirected to :return_to param")
+  end
+end
diff --git a/apps/workbench/test/controllers/users_controller_test.rb b/apps/workbench/test/controllers/users_controller_test.rb
new file mode 100644 (file)
index 0000000..213a2a5
--- /dev/null
@@ -0,0 +1,43 @@
+require 'test_helper'
+
+class UsersControllerTest < ActionController::TestCase
+  test "valid token works in controller test" do
+    get :index, {}, session_for(:active)
+    assert_response :success
+  end
+
+  test "ignore previously valid token (for deleted user), don't crash" do
+    get :activity, {}, session_for(:valid_token_deleted_user)
+    assert_response :redirect
+    assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
+    assert_nil assigns(:my_jobs)
+    assert_nil assigns(:my_ssh_keys)
+  end
+
+  test "expired token redirects to api server login" do
+    get :show, {
+      id: api_fixture('users')['active']['uuid']
+    }, session_for(:expired_trustedclient)
+    assert_response :redirect
+    assert_match /^#{Rails.configuration.arvados_login_base}/, @response.redirect_url
+    assert_nil assigns(:my_jobs)
+    assert_nil assigns(:my_ssh_keys)
+  end
+
+  test "show welcome page if no token provided" do
+    get :index, {}
+    assert_response :redirect
+    assert_match /\/users\/welcome/, @response.redirect_url
+  end
+
+  test "show repositories with read, write, or manage permission" do
+    get :manage_account, {}, session_for(:active)
+    assert_response :success
+    repos = assigns(:my_repositories)
+    assert repos
+    assert_not_empty repos, "my_repositories should not be empty"
+    editables = repos.collect { |r| !!assigns(:repo_writable)[r.uuid] }
+    assert_includes editables, true, "should have a writable repository"
+    assert_includes editables, false, "should have a readonly repository"
+  end
+end
diff --git a/apps/workbench/test/controllers/virtual_machines_controller_test.rb b/apps/workbench/test/controllers/virtual_machines_controller_test.rb
new file mode 100644 (file)
index 0000000..54fe206
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class VirtualMachinesControllerTest < ActionController::TestCase
+end
diff --git a/apps/workbench/test/diagnostics/pipeline_test.rb b/apps/workbench/test/diagnostics/pipeline_test.rb
new file mode 100644 (file)
index 0000000..eb9cfe5
--- /dev/null
@@ -0,0 +1,94 @@
+require 'diagnostics_test_helper'
+
+class PipelineTest < DiagnosticsTest
+  pipelines_to_test = Rails.configuration.pipelines_to_test.andand.keys
+
+  setup do
+    need_selenium 'to make websockets work'
+  end
+
+  pipelines_to_test.andand.each do |pipeline_to_test|
+    test "run pipeline: #{pipeline_to_test}" do
+      visit_page_with_token 'active'
+      pipeline_config = Rails.configuration.pipelines_to_test[pipeline_to_test]
+
+      # Search for tutorial template
+      find '.navbar-fixed-top'
+      within('.navbar-fixed-top') do
+        page.find_field('search').set pipeline_config['template_uuid']
+        page.find('.glyphicon-search').click
+      end
+
+      # Run the pipeline
+      assert_triggers_dom_event 'shown.bs.modal' do
+        find('a,button', text: 'Run').click
+      end
+
+      # Choose project
+      within('.modal-dialog') do
+        find('.selectable', text: 'Home').click
+        find('button', text: 'Choose').click
+      end
+
+      page.assert_selector('a.disabled,button.disabled', text: 'Run') if pipeline_config['input_paths'].any?
+
+      # Choose input for the pipeline
+      pipeline_config['input_paths'].each do |look_for|
+        select_input look_for
+      end
+      wait_for_ajax
+
+      # All needed input are filled in. Run this pipeline now
+      find('a,button', text: 'Run').click
+
+      # Pipeline is running. We have a "Stop" button instead now.
+      page.assert_selector 'a,button', text: 'Pause'
+
+      # Wait for pipeline run to complete
+      wait_until_page_has 'Complete', pipeline_config['max_wait_seconds']
+    end
+  end
+
+  def select_input look_for
+    inputs_needed = page.all('.btn', text: 'Choose')
+    return if (!inputs_needed || !inputs_needed.any?)
+
+    look_for_uuid = nil
+    look_for_file = nil
+    if look_for.andand.index('/').andand.>0
+      partitions = look_for.partition('/')
+      look_for_uuid = partitions[0]
+      look_for_file = partitions[2]
+    else
+      look_for_uuid = look_for
+      look_for_file = nil
+    end
+
+    assert_triggers_dom_event 'shown.bs.modal' do
+      inputs_needed[0].click
+    end
+
+    within('.modal-dialog') do
+      if look_for_uuid
+        fill_in('Search', with: look_for_uuid, exact: true)
+        wait_for_ajax
+      end
+             
+      page.all('.selectable').first.click
+      wait_for_ajax
+      # ajax reload is wiping out input selection after search results; so, select again.
+      page.all('.selectable').first.click
+      wait_for_ajax
+
+      if look_for_file
+        wait_for_ajax
+        within('.collection_files_name', text: look_for_file) do
+          find('.fa-file').click
+        end
+      end
+      
+      find('button', text: 'OK').click
+      wait_for_ajax
+    end
+  end
+end
diff --git a/apps/workbench/test/diagnostics_test_helper.rb b/apps/workbench/test/diagnostics_test_helper.rb
new file mode 100644 (file)
index 0000000..c7433bb
--- /dev/null
@@ -0,0 +1,32 @@
+require 'integration_helper'
+require 'yaml'
+
+# Diagnostics tests are executed when "RAILS_ENV=diagnostics" is used.
+# When "RAILS_ENV=test" is used, tests in the "diagnostics" directory
+# will not be executed.
+
+# Command to run diagnostics tests:
+#   RAILS_ENV=diagnostics bundle exec rake TEST=test/diagnostics/**/*.rb
+
+class DiagnosticsTest < ActionDispatch::IntegrationTest
+
+  # Prepends workbench URL to the path provided and visits that page
+  # Expects path parameters such as "/collections/<uuid>"
+  def visit_page_with_token token_name, path='/'
+    workbench_url = Rails.configuration.arvados_workbench_url
+    if workbench_url.end_with? '/'
+      workbench_url = workbench_url[0, workbench_url.size-1]
+    end
+    tokens = Rails.configuration.user_tokens
+    visit page_with_token(tokens[token_name], (workbench_url + path))
+  end
+
+  # Looks for the text_to_look_for for up to the max_time provided
+  def wait_until_page_has text_to_look_for, max_time=30
+    max_time = 30 if (!max_time || (max_time.to_s != max_time.to_i.to_s))
+    Timeout.timeout(max_time) do
+      loop until page.has_text?(text_to_look_for)
+    end
+  end
+
+end
diff --git a/apps/workbench/test/fixtures/.gitkeep b/apps/workbench/test/fixtures/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/test/helpers/pipeline_instances_helper_test.rb b/apps/workbench/test/helpers/pipeline_instances_helper_test.rb
new file mode 100644 (file)
index 0000000..a785683
--- /dev/null
@@ -0,0 +1,38 @@
+require 'test_helper'
+
+class PipelineInstancesHelperTest < ActionView::TestCase
+  test "one" do
+    r = [{started_at: 1, finished_at: 3}]
+    assert_equal 2, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 5}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 2}, {started_at: 3, finished_at: 5}]
+    assert_equal 3, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2}]
+    assert_equal 3, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 2},
+         {started_at: 2, finished_at: 4}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 5}, {started_at: 2, finished_at: 3}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 3, finished_at: 5}, {started_at: 1, finished_at: 4}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5}]
+    assert_equal 4, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
+         {started_at: 5, finished_at: 8}]
+    assert_equal 7, determine_wallclock_runtime(r)
+
+    r = [{started_at: 1, finished_at: 4}, {started_at: 3, finished_at: 5},
+         {started_at: 6, finished_at: 8}]
+    assert_equal 6, determine_wallclock_runtime(r)
+  end
+end
diff --git a/apps/workbench/test/helpers/search_helper_test.rb b/apps/workbench/test/helpers/search_helper_test.rb
new file mode 100644 (file)
index 0000000..3034163
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class SearchHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/integration/.gitkeep b/apps/workbench/test/integration/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/test/integration/application_layout_test.rb b/apps/workbench/test/integration/application_layout_test.rb
new file mode 100644 (file)
index 0000000..8a2906a
--- /dev/null
@@ -0,0 +1,139 @@
+require 'integration_helper'
+
+class ApplicationLayoutTest < ActionDispatch::IntegrationTest
+  # These tests don't do state-changing API calls. Save some time by
+  # skipping the database reset.
+  reset_api_fixtures :after_each_test, false
+  reset_api_fixtures :after_suite, true
+
+  setup do
+    need_javascript
+  end
+
+  def verify_homepage user, invited, has_profile
+    profile_config = Rails.configuration.user_profile_form_fields
+
+    if !user
+      assert page.has_text?('Please log in'), 'Not found text - Please log in'
+      assert page.has_text?('The "Log in" button below will show you a Google sign-in page'), 'Not found text - google sign in page'
+      assert page.has_no_text?('My projects'), 'Found text - My projects'
+      assert page.has_link?("Log in to #{Rails.configuration.site_name}"), 'Not found text - log in to'
+    elsif user['is_active']
+      if profile_config && !has_profile
+        assert page.has_text?('Save profile'), 'No text - Save profile'
+      else
+        assert page.has_link?("Projects"), 'Not found link - Projects'
+        page.find("#projects-menu").click
+        assert page.has_text?('Projects shared with me'), 'Not found text - Project shared with me'
+      end
+    elsif invited
+      assert page.has_text?('Please check the box below to indicate that you have read and accepted the user agreement'), 'Not found text - Please check the box below . . .'
+    else
+      assert page.has_text?('Your account is inactive'), 'Not found text - Your account is inactive'
+    end
+
+    within('.navbar-fixed-top') do
+      if !user
+        assert page.has_link?('Log in'), 'Not found link - Log in'
+      else
+        # my account menu
+        assert page.has_link?("#{user['email']}"), 'Not found link - email'
+        find('a', text: "#{user['email']}").click
+        within('.dropdown-menu') do
+          if user['is_active']
+            assert page.has_no_link?('Not active'), 'Found link - Not active'
+            assert page.has_no_link?('Sign agreements'), 'Found link - Sign agreements'
+
+            assert page.has_link?('Manage account'), 'No link - Manage account'
+
+            if profile_config
+              assert page.has_link?('Manage profile'), 'No link - Manage profile'
+            else
+              assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+            end
+          else
+            assert page.has_no_link?('Manage account'), 'Found link - Manage account'
+            assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+          end
+          assert page.has_link?('Log out'), 'No link - Log out'
+        end
+      end
+    end
+  end
+
+  # test the help menu
+  def check_help_menu
+    within('.navbar-fixed-top') do
+      page.find("#arv-help").click
+      within('.dropdown-menu') do
+        assert page.has_link?('Tutorials and User guide'), 'No link - Tutorials and User guide'
+        assert page.has_link?('API Reference'), 'No link - API Reference'
+        assert page.has_link?('SDK Reference'), 'No link - SDK Reference'
+        assert page.has_link?('Show version / debugging info ...'), 'No link - Show version / debugging info'
+        assert page.has_link?('Report a problem ...'), 'No link - Report a problem'
+        # Version info and Report a problem are tested in "report_issue_test.rb"
+      end
+    end
+  end
+
+  def verify_system_menu user
+    if user && user['is_admin']
+      assert page.has_link?('system-menu'), 'No link - system menu'
+      within('.navbar-fixed-top') do
+        page.find("#system-menu").click
+        within('.dropdown-menu') do
+          assert page.has_text?('Groups'), 'No text - Groups'
+          assert page.has_link?('Repositories'), 'No link - Repositories'
+          assert page.has_link?('Virtual machines'), 'No link - Virtual machines'
+          assert page.has_link?('SSH keys'), 'No link - SSH keys'
+          assert page.has_link?('API tokens'), 'No link - API tokens'
+          find('a', text: 'Users').click
+        end
+      end
+      assert page.has_text? 'Add a new user'
+    else
+      assert page.has_no_link?('system-menu'), 'Found link - system menu'
+    end
+  end
+
+  [
+    [nil, nil, false, false],
+    ['inactive', api_fixture('users')['inactive'], true, false],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited'], false, false],
+    ['active', api_fixture('users')['active'], true, true],
+    ['admin', api_fixture('users')['admin'], true, true],
+    ['active_no_prefs', api_fixture('users')['active_no_prefs'], true, false],
+    ['active_no_prefs_profile', api_fixture('users')['active_no_prefs_profile'], true, false],
+  ].each do |token, user, invited, has_profile|
+
+    test "visit home page for user #{token}" do
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      verify_homepage user, invited, has_profile
+    end
+
+    test "check help for user #{token}" do
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      check_help_menu
+    end
+
+    test "test system menu for user #{token}" do
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      verify_system_menu user
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/collection_upload_test.rb b/apps/workbench/test/integration/collection_upload_test.rb
new file mode 100644 (file)
index 0000000..a240576
--- /dev/null
@@ -0,0 +1,96 @@
+require 'integration_helper'
+
+class CollectionUploadTest < ActionDispatch::IntegrationTest
+  setup do
+    testfiles.each do |filename, content|
+      open(testfile_path(filename), 'w') do |io|
+        io.write content
+      end
+    end
+  end
+
+  teardown do
+    testfiles.each do |filename, _|
+      File.unlink(testfile_path filename)
+    end
+  end
+
+  test "Create new collection using upload button" do
+    need_javascript
+    visit page_with_token 'active', aproject_path
+    find('.btn', text: 'Add data').click
+    click_link 'Upload files from my computer'
+    # Should be looking at a new empty collection.
+    assert_text 'New collection'
+    assert_text 'd41d8cd98f00b204e9800998ecf8427e+0'
+    # The "Upload" tab should be active and loaded.
+    assert_selector 'div#Upload.active div.panel'
+  end
+
+  test "No Upload tab on non-writable collection" do
+    need_javascript
+    visit(page_with_token 'active',
+          '/collections/'+api_fixture('collections')['user_agreement']['uuid'])
+    assert_no_selector '.nav-tabs Upload'
+  end
+
+  test "Upload two empty files with the same name" do
+    need_selenium "to make file uploads work"
+    visit page_with_token 'active', sandbox_path
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('empty.txt')
+    assert_selector 'div', text: 'empty.txt'
+    attach_file 'file_selector', testfile_path('empty.txt')
+    assert_selector 'div.row div span[title]', text: 'empty.txt', count: 2
+    click_button 'Start'
+    assert_text :visible, 'Done!'
+    visit sandbox_path+'.json'
+    assert_match /_text":"\. d41d8\S+ 0:0:empty.txt\\n\. d41d8\S+ 0:0:empty\\\\040\(1\).txt\\n"/, body
+  end
+
+  test "Upload non-empty files, report errors" do
+    need_selenium "to make file uploads work"
+    visit page_with_token 'active', sandbox_path
+    find('.nav-tabs a', text: 'Upload').click
+    attach_file 'file_selector', testfile_path('a')
+    attach_file 'file_selector', testfile_path('foo.txt')
+    assert_selector 'button:not([disabled])', text: 'Start'
+    click_button 'Start'
+    if "test environment does not have a keepproxy yet, see #4534" != "fixed"
+      using_wait_time 20 do
+        assert_text :visible, 'error'
+      end
+    else
+      assert_text :visible, 'Done!'
+      visit sandbox_path+'.json'
+      assert_match /_text":"\. 0cc1\S+ 0:1:a\\n\. acbd\S+ 0:3:foo.txt\\n"/, body
+    end
+  end
+
+  protected
+
+  def aproject_path
+    '/projects/' + api_fixture('groups')['aproject']['uuid']
+  end
+
+  def sandbox_uuid
+    api_fixture('collections')['upload_sandbox']['uuid']
+  end
+
+  def sandbox_path
+    '/collections/' + sandbox_uuid
+  end
+
+  def testfiles
+    {
+      'empty.txt' => '',
+      'a' => 'a',
+      'foo.txt' => 'foo'
+    }
+  end
+
+  def testfile_path filename
+    # Must be an absolute path. https://github.com/jnicklas/capybara/issues/621
+    File.join Dir.getwd, 'tmp', filename
+  end
+end
diff --git a/apps/workbench/test/integration/collections_test.rb b/apps/workbench/test/integration/collections_test.rb
new file mode 100644 (file)
index 0000000..4338d19
--- /dev/null
@@ -0,0 +1,343 @@
+require 'integration_helper'
+
+class CollectionsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  # check_checkboxes_state asserts that the page holds at least one
+  # checkbox matching 'selector', and that all matching checkboxes
+  # are in state 'checkbox_status' (i.e. checked if true, unchecked otherwise)
+  def assert_checkboxes_state(selector, checkbox_status, msg=nil)
+    assert page.has_selector?(selector)
+    page.all(selector).each do |checkbox|
+      assert(checkbox.checked? == checkbox_status, msg)
+    end
+  end
+
+  test "Can copy a collection to a project" do
+    collection_uuid = api_fixture('collections')['foo_file']['uuid']
+    collection_name = api_fixture('collections')['foo_file']['name']
+    project_uuid = api_fixture('groups')['aproject']['uuid']
+    project_name = api_fixture('groups')['aproject']['name']
+    visit page_with_token('active', "/collections/#{collection_uuid}")
+    click_link 'Copy to project...'
+    find('.selectable', text: project_name).click
+    find('.modal-footer a,button', text: 'Copy').click
+    wait_for_ajax
+    # It should navigate to the project after copying...
+    assert(page.has_text?(project_name))
+    assert(page.has_text?("Copy of #{collection_name}"))
+  end
+
+  test "Collection page renders name" do
+    Capybara.current_driver = :rack_test
+    uuid = api_fixture('collections')['foo_file']['uuid']
+    coll_name = api_fixture('collections')['foo_file']['name']
+    visit page_with_token('active', "/collections/#{uuid}")
+    assert(page.has_text?(coll_name), "Collection page did not include name")
+    # Now check that the page is otherwise normal, and the collection name
+    # isn't only showing up in an error message.
+    assert(page.has_link?('foo'), "Collection page did not include file link")
+  end
+
+  def check_sharing(want_state, link_regexp)
+    # We specifically want to click buttons.  See #4291.
+    if want_state == :off
+      click_button "Unshare"
+      text_assertion = :assert_no_text
+      link_assertion = :assert_empty
+    else
+      click_button "Create sharing link"
+      text_assertion = :assert_text
+      link_assertion = :refute_empty
+    end
+    using_wait_time(Capybara.default_wait_time * 3) do
+      send(text_assertion, "Shared at:")
+    end
+    send(link_assertion, all("a").select { |a| a[:href] =~ link_regexp })
+  end
+
+  test "creating and uncreating a sharing link" do
+    coll_uuid = api_fixture("collections", "collection_owned_by_active", "uuid")
+    download_link_re =
+      Regexp.new(Regexp.escape("/collections/download/#{coll_uuid}/"))
+    visit page_with_token("active_trustedclient", "/collections/#{coll_uuid}")
+    within "#sharing-button" do
+      check_sharing(:on, download_link_re)
+      check_sharing(:off, download_link_re)
+    end
+  end
+
+  test "can download an entire collection with a reader token" do
+    Capybara.current_driver = :rack_test
+    CollectionsController.any_instance.
+      stubs(:file_enumerator).returns(["foo\n", "file\n"])
+    uuid = api_fixture('collections')['foo_file']['uuid']
+    token = api_fixture('api_client_authorizations')['active_all_collections']['api_token']
+    url_head = "/collections/download/#{uuid}/#{token}/"
+    visit url_head
+    # It seems that Capybara can't inspect tags outside the body, so this is
+    # a very blunt approach.
+    assert_no_match(/<\s*meta[^>]+\bnofollow\b/i, page.html,
+                    "wget prohibited from recursing the collection page")
+    # Look at all the links that wget would recurse through using our
+    # recommended options, and check that it's exactly the file list.
+    hrefs = page.all('a').map do |anchor|
+      link = anchor[:href] || ''
+      if link.start_with? url_head
+        link[url_head.size .. -1]
+      elsif link.start_with? '/'
+        nil
+      else
+        link
+      end
+    end
+    assert_equal(['foo'], hrefs.compact.sort,
+                 "download page did provide strictly file links")
+    within "#collection_files" do
+      click_link "foo"
+      assert_equal("foo\nfile\n", page.html)
+    end
+  end
+
+  test "can view empty collection" do
+    Capybara.current_driver = :rack_test
+    uuid = 'd41d8cd98f00b204e9800998ecf8427e+0'
+    visit page_with_token('active', "/collections/#{uuid}")
+    assert page.has_text?(/This collection is empty|The following collections have this content/)
+  end
+
+  test "combine selected collections into new collection" do
+    foo_collection = api_fixture('collections')['foo_file']
+    bar_collection = api_fixture('collections')['bar_file']
+
+    visit page_with_token('active', "/collections")
+
+    assert(page.has_text?(foo_collection['uuid']), "Collection page did not include foo file")
+    assert(page.has_text?(bar_collection['uuid']), "Collection page did not include bar file")
+
+    within('tr', text: foo_collection['uuid']) do
+      find('input[type=checkbox]').click
+    end
+
+    within('tr', text: bar_collection['uuid']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Create new collection with selected collections'
+    end
+
+    # now in the newly created collection page
+    assert(page.has_text?('Copy to project'), "Copy to project text not found in new collection page")
+    assert(page.has_no_text?(foo_collection['name']), "Collection page did not include foo file")
+    assert(page.has_text?('foo'), "Collection page did not include foo file")
+    assert(page.has_no_text?(bar_collection['name']), "Collection page did not include foo file")
+    assert(page.has_text?('bar'), "Collection page did not include bar file")
+    assert(page.has_text?('Created new collection in your Home project'),
+                          'Not found flash message that new collection is created in Home project')
+  end
+
+  [
+    ['active', 'foo_file', false],
+    ['active', 'foo_collection_in_aproject', true],
+    ['project_viewer', 'foo_file', false],
+    ['project_viewer', 'foo_collection_in_aproject', false], #aproject not writable
+  ].each do |user, collection, expect_collection_in_aproject|
+    test "combine selected collection files into new collection #{user} #{collection} #{expect_collection_in_aproject}" do
+      my_collection = api_fixture('collections')[collection]
+
+      visit page_with_token(user, "/collections")
+
+      # choose file from foo collection
+      within('tr', text: my_collection['uuid']) do
+        click_link 'Show'
+      end
+
+      # now in collection page
+      find('input[type=checkbox]').click
+
+      click_button 'Selection...'
+      within('.selection-action-container') do
+        click_link 'Create new collection with selected files'
+      end
+
+      # now in the newly created collection page
+      assert(page.has_text?('Copy to project'), "Copy to project text not found in new collection page")
+      assert(page.has_no_text?(my_collection['name']), "Collection page did not include foo file")
+      assert(page.has_text?('foo'), "Collection page did not include foo file")
+      if expect_collection_in_aproject
+        aproject = api_fixture('groups')['aproject']
+        assert page.has_text?("Created new collection in the project #{aproject['name']}"),
+                              'Not found flash message that new collection is created in aproject'
+      else
+        assert page.has_text?("Created new collection in your Home project"),
+                              'Not found flash message that new collection is created in Home project'
+      end
+    end
+  end
+
+  test "combine selected collection files from collection subdirectory" do
+    visit page_with_token('user1_with_load', "/collections/zzzzz-4zz18-filesinsubdir00")
+
+    # now in collection page
+    input_files = page.all('input[type=checkbox]')
+    (0..input_files.count-1).each do |i|
+      input_files[i].click
+    end
+
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Create new collection with selected files'
+    end
+
+    # now in the newly created collection page
+    assert(page.has_text?('file_in_subdir1'), 'file not found - file_in_subdir1')
+    assert(page.has_text?('file1_in_subdir3.txt'), 'file not found - file1_in_subdir3.txt')
+    assert(page.has_text?('file2_in_subdir3.txt'), 'file not found - file2_in_subdir3.txt')
+    assert(page.has_text?('file1_in_subdir4.txt'), 'file not found - file1_in_subdir4.txt')
+    assert(page.has_text?('file2_in_subdir4.txt'), 'file not found - file1_in_subdir4.txt')
+  end
+
+  test "Collection portable data hash redirect" do
+    di = api_fixture('collections')['docker_image']
+    visit page_with_token('active', "/collections/#{di['portable_data_hash']}")
+
+    # check redirection
+    assert current_path.end_with?("/collections/#{di['uuid']}")
+    assert page.has_text?("docker_image")
+    assert page.has_text?("Activity")
+    assert page.has_text?("Sharing and permissions")
+  end
+
+  test "Collection portable data hash with multiple matches" do
+    pdh = api_fixture('collections')['baz_file']['portable_data_hash']
+    visit page_with_token('admin', "/collections/#{pdh}")
+
+    matches = api_fixture('collections').select {|k,v| v["portable_data_hash"] == pdh}
+    assert matches.size > 1
+
+    matches.each do |k,v|
+      assert page.has_link?(v["name"]), "Page /collections/#{pdh} should contain link '#{v['name']}'"
+    end
+    assert page.has_no_text?("Activity")
+    assert page.has_no_text?("Sharing and permissions")
+  end
+
+  test "Filtering collection files by regexp" do
+    col = api_fixture('collections', 'multilevel_collection_1')
+    visit page_with_token('active', "/collections/#{col['uuid']}")
+
+    # Filter file list to some but not all files in the collection
+    page.find_field('file_regex').set('file[12]')
+    assert page.has_text?("file1")
+    assert page.has_text?("file2")
+    assert page.has_no_text?("file3")
+
+    # Filter file list with a regex matching all files
+    page.find_field('file_regex').set('.*')
+    assert page.has_text?("file1")
+    assert page.has_text?("file2")
+    assert page.has_text?("file3")
+
+    # Filter file list to a regex matching no files
+    page.find_field('file_regex').set('file9')
+    assert page.has_no_text?("file1")
+    assert page.has_no_text?("file2")
+    assert page.has_no_text?("file3")
+    # make sure that we actually are looking at the collections
+    # page and not e.g. a fiddlesticks
+    assert page.has_text?("multilevel_collection_1")
+    assert page.has_text?(col['portable_data_hash'])
+
+    # Set filename filter to a syntactically invalid regex
+    # Page loads, but stops filtering after the last valid regex parse
+    page.find_field('file_regex').set('file[2')
+    assert page.has_text?("multilevel_collection_1")
+    assert page.has_text?(col['portable_data_hash'])
+    assert page.has_text?("file1")
+    assert page.has_text?("file2")
+    assert page.has_text?("file3")
+
+    # Test the "Select all" button
+
+    # Note: calling .set('') on a Selenium element is not sufficient
+    # to reset the field for this test, as it does not send any key
+    # events to the browser. To clear the field, we must instead send
+    # a backspace character.
+    # See https://selenium.googlecode.com/svn/trunk/docs/api/rb/Selenium/WebDriver/Element.html#clear-instance_method
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#select-all').click
+    assert_checkboxes_state('input[type=checkbox]', true, '"select all" should check all checkboxes')
+
+    # Test the "Unselect all" button
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#unselect-all').click
+    assert_checkboxes_state('input[type=checkbox]', false, '"unselect all" should clear all checkboxes')
+
+    # Filter files, then "select all", then unfilter
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#unselect-all').click
+    page.find_field('file_regex').set('file[12]')
+    find('button#select-all').click
+    page.find_field('file_regex').set("\b") # backspace
+
+    # all "file1" and "file2" checkboxes must be selected
+    # all "file3" checkboxes must be clear
+    assert_checkboxes_state('[value*="file1"]', true, 'checkboxes for file1 should be selected after filtering')
+    assert_checkboxes_state('[value*="file2"]', true, 'checkboxes for file2 should be selected after filtering')
+    assert_checkboxes_state('[value*="file3"]', false, 'checkboxes for file3 should be clear after filtering')
+
+    # Select all files, then filter, then "unselect all", then unfilter
+    page.find_field('file_regex').set("\b") # backspace
+    find('button#select-all').click
+    page.find_field('file_regex').set('file[12]')
+    find('button#unselect-all').click
+    page.find_field('file_regex').set("\b") # backspace
+
+    # all "file1" and "file2" checkboxes must be clear
+    # all "file3" checkboxes must be selected
+    assert_checkboxes_state('[value*="file1"]', false, 'checkboxes for file1 should be clear after filtering')
+    assert_checkboxes_state('[value*="file2"]', false, 'checkboxes for file2 should be clear after filtering')
+    assert_checkboxes_state('[value*="file3"]', true, 'checkboxes for file3 should be selected after filtering')
+  end
+
+  test "Creating collection from list of filtered files" do
+    col = api_fixture('collections', 'collection_with_files_in_subdir')
+    visit page_with_token('user1_with_load', "/collections/#{col['uuid']}")
+    assert page.has_text?('file_in_subdir1'), 'expected file_in_subdir1 not found'
+    assert page.has_text?('file1_in_subdir3'), 'expected file1_in_subdir3 not found'
+    assert page.has_text?('file2_in_subdir3'), 'expected file2_in_subdir3 not found'
+    assert page.has_text?('file1_in_subdir4'), 'expected file1_in_subdir4 not found'
+    assert page.has_text?('file2_in_subdir4'), 'expected file2_in_subdir4 not found'
+
+    # Select all files but then filter them to files in subdir1, subdir2 or subdir3
+    find('button#select-all').click
+    page.find_field('file_regex').set('_in_subdir[123]')
+    assert page.has_text?('file_in_subdir1'), 'expected file_in_subdir1 not in filtered files'
+    assert page.has_text?('file1_in_subdir3'), 'expected file1_in_subdir3 not in filtered files'
+    assert page.has_text?('file2_in_subdir3'), 'expected file2_in_subdir3 not in filtered files'
+    assert page.has_no_text?('file1_in_subdir4'), 'file1_in_subdir4 found in filtered files'
+    assert page.has_no_text?('file2_in_subdir4'), 'file2_in_subdir4 found in filtered files'
+
+    # Create a new collection
+    click_button 'Selection...'
+    within('.selection-action-container') do
+      click_link 'Create new collection with selected files'
+    end
+
+    # now in the newly created collection page
+    assert page.has_text?('Content hash:'), 'not on new collection page'
+    assert page.has_no_text?(col['uuid']), 'new collection page has old collection uuid'
+    assert page.has_no_text?(col['portable_data_hash']), 'new collection page has old portable_data_hash'
+
+    # must have files in subdir1 and subdir3 but not subdir4
+    assert page.has_text?('file_in_subdir1'), 'file_in_subdir1 missing from new collection'
+    assert page.has_text?('file1_in_subdir3'), 'file1_in_subdir3 missing from new collection'
+    assert page.has_text?('file2_in_subdir3'), 'file2_in_subdir3 missing from new collection'
+    assert page.has_no_text?('file1_in_subdir4'), 'file1_in_subdir4 found in new collection'
+    assert page.has_no_text?('file2_in_subdir4'), 'file2_in_subdir4 found in new collection'
+  end
+end
diff --git a/apps/workbench/test/integration/errors_test.rb b/apps/workbench/test/integration/errors_test.rb
new file mode 100644 (file)
index 0000000..03c359e
--- /dev/null
@@ -0,0 +1,129 @@
+require 'integration_helper'
+
+class ErrorsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  BAD_UUID = "ffffffffffffffffffffffffffffffff+0"
+
+  test "error page renders user navigation" do
+    visit(page_with_token("active", "/collections/#{BAD_UUID}"))
+    assert(page.has_text?(api_fixture("users")["active"]["email"]),
+           "User information missing from error page")
+    assert(page.has_no_text?(/log ?in/i),
+           "Logged in user prompted to log in on error page")
+  end
+
+  test "no user navigation with expired token" do
+    visit(page_with_token("expired", "/collections/#{BAD_UUID}"))
+    assert(page.has_no_text?(api_fixture("users")["active"]["email"]),
+           "Page visited with expired token included user information")
+    assert(page.has_selector?("a", text: /log ?in/i),
+           "Login prompt missing on expired token error page")
+  end
+
+  test "error page renders without login" do
+    visit "/collections/download/#{BAD_UUID}/#{@@API_AUTHS['active']['api_token']}"
+    assert(page.has_no_text?(/\b500\b/),
+           "Error page without login returned 500")
+  end
+
+  test "'object not found' page includes search link" do
+    visit(page_with_token("active", "/collections/#{BAD_UUID}"))
+    assert(all("a").any? { |a| a[:href] =~ %r{/collections/?(\?|$)} },
+           "no search link found on 404 page")
+  end
+
+  def now_timestamp
+    Time.now.utc.to_i
+  end
+
+  def page_has_error_token?(start_stamp)
+    matching_stamps = (start_stamp .. now_timestamp).to_a.join("|")
+    # Check the page HTML because we really don't care how it's presented.
+    # I think it would even be reasonable to put it in a comment.
+    page.html =~ /\b(#{matching_stamps})\+[0-9A-Fa-f]{8}\b/
+  end
+
+  # We use API tokens with limited scopes as the quickest way to get the API
+  # server to return an error.  If Workbench gets smarter about coping when
+  # it has a too-limited token, these tests will need to be adjusted.
+  test "API error page includes error token" do
+    start_stamp = now_timestamp
+    visit(page_with_token("active_readonly", "/authorized_keys"))
+    click_on "Add a new authorized key"
+    assert(page.has_text?(/fiddlesticks/i),
+           "Not on an error page after making an SSH key out of scope")
+    assert(page_has_error_token?(start_stamp), "no error token on 404 page")
+  end
+
+  test "showing a bad UUID returns 404" do
+    visit(page_with_token("active", "/pipeline_templates/zzz"))
+    assert(page.has_no_text?(/fiddlesticks/i),
+           "trying to show a bad UUID rendered a fiddlesticks page, not 404")
+  end
+
+  test "404 page includes information about missing object" do
+    visit(page_with_token("active", "/groups/zazazaz"))
+    assert(page.has_text?(/group with UUID zazazaz/i),
+           "name of searched group missing from 404 page")
+  end
+
+  test "unrouted 404 page works" do
+    visit(page_with_token("active", "/__asdf/ghjk/zxcv"))
+    assert(page.has_text?(/not found/i),
+           "unrouted page missing 404 text")
+    assert(page.has_no_text?(/fiddlesticks/i),
+           "unrouted request returned a generic error page, not 404")
+  end
+
+  test "API error page has Report problem button" do
+    original_arvados_v1_base = Rails.configuration.arvados_v1_base
+
+    begin
+      # point to a bad api server url to generate fiddlesticks error
+      Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
+
+      visit page_with_token("active")
+
+      assert(page.has_text?(/fiddlesticks/i), 'Expected to be in error page')
+
+      # reset api server base config to let the popup rendering to work
+      Rails.configuration.arvados_v1_base = original_arvados_v1_base
+
+      # check the "Report problem" button
+      assert page.has_link? 'Report problem', 'Report problem link not found'
+
+      click_link 'Report problem'
+      within '.modal-content' do
+        assert page.has_text?('Report a problem'), 'Report a problem text not found'
+        assert page.has_no_text?('Version / debugging info'), 'Version / debugging info is not expected'
+        assert page.has_text?('Describe the problem'), 'Describe the problem text not found'
+        assert page.has_text?('Send problem report'), 'Send problem report button text is not found'
+        assert page.has_no_button?('Send problem report'), 'Send problem report button is not disabled before entering problem description'
+        assert page.has_button?('Cancel'), 'Cancel button not found'
+
+        # enter a report text and click on report
+        page.find_field('report_issue_text').set 'my test report text'
+        assert page.has_button?('Send problem report'), 'Send problem report button not enabled after entering text'
+        click_button 'Send problem report'
+
+        # ajax success updated button texts and added footer message
+        assert page.has_no_text?('Send problem report'), 'Found button - Send problem report'
+        assert page.has_no_button?('Cancel'), 'Found button - Cancel'
+        assert page.has_text?('Report sent'), 'No text - Report sent'
+        assert page.has_button?('Close'), 'No button - Close'
+        assert page.has_text?('Thanks for reporting this issue'), 'No text - Thanks for reporting this issue'
+
+        click_button 'Close'
+      end
+
+      # out of the popup now and should be back in the error page
+      assert(page.has_text?(/fiddlesticks/i), 'Expected to be in error page after closing report issue popup')
+    ensure
+      Rails.configuration.arvados_v1_base = original_arvados_v1_base
+    end
+  end
+
+end
diff --git a/apps/workbench/test/integration/filterable_infinite_scroll_test.rb b/apps/workbench/test/integration/filterable_infinite_scroll_test.rb
new file mode 100644 (file)
index 0000000..b4dadcd
--- /dev/null
@@ -0,0 +1,27 @@
+require 'integration_helper'
+
+class FilterableInfiniteScrollTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  # Chrome remembers what you had in the text field when you hit
+  # "back". Here, we simulate the same effect by sending an otherwise
+  # unused ?search=foo param to pre-populate the search field.
+  test 'no double-load if text input has a value at page load time' do
+    visit page_with_token('admin', '/pipeline_instances')
+    assert_text 'pipeline_2'
+    visit page_with_token('admin', '/pipeline_instances?search=pipeline_1')
+    # Horrible hack to ensure the search results can't load correctly
+    # on the second attempt.
+    assert_selector '#recent-pipeline-instances'
+    assert page.evaluate_script('$("#recent-pipeline-instances[data-infinite-content-href0]").attr("data-infinite-content-href0","/give-me-an-error").length == 1')
+    # Wait for the first page of results to appear.
+    assert_text 'pipeline_1'
+    # Make sure the results are filtered.
+    assert_no_text 'pipeline_2'
+    # Make sure pipeline_2 didn't disappear merely because the results
+    # were replaced with an error message.
+    assert_text 'pipeline_1'
+  end
+end
diff --git a/apps/workbench/test/integration/jobs_test.rb b/apps/workbench/test/integration/jobs_test.rb
new file mode 100644 (file)
index 0000000..716e731
--- /dev/null
@@ -0,0 +1,75 @@
+require 'fileutils'
+require 'tmpdir'
+
+require 'integration_helper'
+
+class JobsTest < ActionDispatch::IntegrationTest
+
+  def fakepipe_with_log_data
+    content =
+      "2014-01-01_12:00:01 zzzzz-8i9sb-0vsrcqi7whchuil 0  log message 1\n" +
+      "2014-01-01_12:00:02 zzzzz-8i9sb-0vsrcqi7whchuil 0  log message 2\n" +
+      "2014-01-01_12:00:03 zzzzz-8i9sb-0vsrcqi7whchuil 0  log message 3\n"
+    StringIO.new content, 'r'
+  end
+
+  test "add job description" do
+    need_javascript
+    visit page_with_token("active", "/jobs")
+
+    # go to job running the script "doesnotexist"
+    within first('tr', text: 'doesnotexist') do
+      find("a").click
+    end
+
+    # edit job description
+    within('.arv-description-as-subtitle') do
+      find('.fa-pencil').click
+      find('.editable-input textarea').set('*Textile description for job* - "Go to dashboard":/')
+      find('.editable-submit').click
+    end
+    wait_for_ajax
+
+    # Verify edited description
+    assert page.has_no_text? '*Textile description for job*'
+    assert page.has_text? 'Textile description for job'
+    assert page.has_link? 'Go to dashboard'
+    click_link 'Go to dashboard'
+    assert page.has_text? 'Active pipelines'
+  end
+
+  test "view job log" do
+    need_javascript
+    job = api_fixture('jobs')['job_with_real_log']
+
+    IO.expects(:popen).returns(fakepipe_with_log_data)
+
+    visit page_with_token("active", "/jobs/#{job['uuid']}")
+    assert page.has_text? job['script_version']
+
+    click_link 'Log'
+    wait_for_ajax
+    assert page.has_text? 'Started at'
+    assert page.has_text? 'Finished at'
+    assert page.has_text? 'log message 1'
+    assert page.has_text? 'log message 2'
+    assert page.has_text? 'log message 3'
+    assert page.has_no_text? 'Showing only 100 bytes of this log'
+  end
+
+  test 'view partial job log' do
+    need_javascript
+    # This config will be restored during teardown by ../test_helper.rb:
+    Rails.configuration.log_viewer_max_bytes = 100
+
+    IO.expects(:popen).returns(fakepipe_with_log_data)
+    job = api_fixture('jobs')['job_with_real_log']
+
+    visit page_with_token("active", "/jobs/#{job['uuid']}")
+    assert page.has_text? job['script_version']
+
+    click_link 'Log'
+    wait_for_ajax
+    assert page.has_text? 'Showing only 100 bytes of this log'
+  end
+end
diff --git a/apps/workbench/test/integration/logins_test.rb b/apps/workbench/test/integration/logins_test.rb
new file mode 100644 (file)
index 0000000..2e2de70
--- /dev/null
@@ -0,0 +1,22 @@
+require 'integration_helper'
+
+class LoginsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  test "login with api_token works after redirect" do
+    visit page_with_token('active_trustedclient')
+    assert page.has_text?('Active pipelines'), "Missing 'Active pipelines' from page"
+    assert_no_match(/\bapi_token=/, current_path)
+  end
+
+  test "trying to use expired token redirects to login page" do
+    visit page_with_token('expired_trustedclient')
+    buttons = all("a.btn", text: /Log in/)
+    assert_equal(1, buttons.size, "Failed to find one login button")
+    login_link = buttons.first[:href]
+    assert_match(%r{//[^/]+/login}, login_link)
+    assert_no_match(/\bapi_token=/, login_link)
+  end
+end
diff --git a/apps/workbench/test/integration/pipeline_instances_test.rb b/apps/workbench/test/integration/pipeline_instances_test.rb
new file mode 100644 (file)
index 0000000..bab40cc
--- /dev/null
@@ -0,0 +1,489 @@
+require 'integration_helper'
+
+class PipelineInstancesTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  test 'Create and run a pipeline' do
+    visit page_with_token('active_trustedclient')
+
+    visit '/pipeline_templates'
+    within('tr', text: 'Two Part Pipeline Template') do
+      find('a,button', text: 'Run').click
+    end
+
+    # project chooser
+    within('.modal-dialog') do
+      find('.selectable', text: 'A Project').click
+      find('button', text: 'Choose').click
+    end
+
+    # This pipeline needs input. So, Run should be disabled
+    page.assert_selector 'a.disabled,button.disabled', text: 'Run'
+
+    instance_page = current_path
+
+    # Add this collection to the project
+    visit '/projects'
+    find("#projects-menu").click
+    find('.dropdown-menu a,button', text: 'A Project').click
+    find('.btn', text: 'Add data').click
+    find('.dropdown-menu a,button', text: 'Copy data from another project').click
+    within('.modal-dialog') do
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('.btn', text: 'Copy').click
+    end
+    using_wait_time(Capybara.default_wait_time * 3) do
+      wait_for_ajax
+    end
+
+    click_link 'Jobs and pipelines'
+    find('tr[data-kind="arvados#pipelineInstance"]', text: '(none)').
+      find('a', text: 'Show').
+      click
+
+    assert find('p', text: 'Provide a value')
+
+    find('div.form-group', text: 'Foo/bar pair').
+      find('.btn', text: 'Choose').
+      click
+
+    within('.modal-dialog') do
+      assert(has_text?("Foo/bar pair"),
+             "pipeline input picker missing name of input")
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('button', text: 'OK').click
+    end
+    wait_for_ajax
+
+    # The input, after being specified, should still be displayed (#3382)
+    assert find('div.form-group', text: 'Foo/bar pair')
+
+    # The input, after being specified, should still be editable (#3382)
+    find('div.form-group', text: 'Foo/bar pair').
+      find('.btn', text: 'Choose').click
+
+    within('.modal-dialog') do
+      assert(has_text?("Foo/bar pair"),
+             "pipeline input picker missing name of input")
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('button', text: 'OK').click
+    end
+
+    # For good measure, check one last time that the input, after being specified twice, is still be displayed (#3382)
+    assert find('div.form-group', text: 'Foo/bar pair')
+
+    # Ensure that the collection's portable_data_hash, uuid and name
+    # are saved in the desired places. (#4015)
+
+    # foo_collection_in_aproject is the collection tagged with foo_tag.
+    collection = api_fixture('collections', 'foo_collection_in_aproject')
+    click_link 'Advanced'
+    click_link 'API response'
+    api_response = JSON.parse(find('div#advanced_api_response pre').text)
+    input_params = api_response['components']['part-one']['script_parameters']['input']
+    assert_equal input_params['value'], collection['portable_data_hash']
+    assert_equal input_params['selection_name'], collection['name']
+    assert_equal input_params['selection_uuid'], collection['uuid']
+
+    # "Run" button is now enabled
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+
+    first('a,button', text: 'Run').click
+
+    # Pipeline is running. We have a "Pause" button instead now.
+    page.assert_selector 'a,button', text: 'Pause'
+    find('a,button', text: 'Pause').click
+
+    # Pipeline is stopped. It should now be in paused state and Runnable again.
+    assert page.has_text? 'Paused'
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+    page.assert_selector 'a,button', text: 'Re-run with latest'
+    page.assert_selector 'a,button', text: 'Re-run options'
+
+    # Since it is test env, no jobs are created to run. So, graph not visible
+    assert_not page.has_text? 'Graph'
+  end
+
+  # Create a pipeline instance from within a project and run
+  test 'Create pipeline inside a project and run' do
+    visit page_with_token('active_trustedclient')
+
+    # Add this collection to the project using collections menu from top nav
+    visit '/projects'
+    find("#projects-menu").click
+    find('.dropdown-menu a,button', text: 'A Project').click
+    find('.btn', text: 'Add data').click
+    find('.dropdown-menu a,button', text: 'Copy data from another project').click
+    within('.modal-dialog') do
+      wait_for_ajax
+      first('span', text: 'foo_tag').click
+      find('.btn', text: 'Copy').click
+    end
+    using_wait_time(Capybara.default_wait_time * 3) do
+      wait_for_ajax
+    end
+
+    create_and_run_pipeline_in_aproject true, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false
+  end
+
+  # Create a pipeline instance from outside of a project
+  test 'Run a pipeline from dashboard' do
+    visit page_with_token('active_trustedclient')
+    create_and_run_pipeline_in_aproject false, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false
+  end
+
+  test 'view pipeline with job and see graph' do
+    visit page_with_token('active_trustedclient')
+
+    visit '/pipeline_instances'
+    assert page.has_text? 'pipeline_with_job'
+
+    find('a', text: 'pipeline_with_job').click
+
+    # since the pipeline component has a job, expect to see the graph
+    assert page.has_text? 'Graph'
+    click_link 'Graph'
+    page.assert_selector "#provenance_graph"
+  end
+
+  test 'pipeline description' do
+    visit page_with_token('active_trustedclient')
+
+    visit '/pipeline_instances'
+    assert page.has_text? 'pipeline_with_job'
+
+    find('a', text: 'pipeline_with_job').click
+
+    within('.arv-description-as-subtitle') do
+      find('.fa-pencil').click
+      find('.editable-input textarea').set('*Textile description for pipeline instance*')
+      find('.editable-submit').click
+    end
+    wait_for_ajax
+
+    # verify description
+    assert page.has_no_text? '*Textile description for pipeline instance*'
+    assert page.has_text? 'Textile description for pipeline instance'
+  end
+
+  test "JSON popup available for strange components" do
+    uuid = api_fixture("pipeline_instances")["components_is_jobspec"]["uuid"]
+    visit page_with_token("active", "/pipeline_instances/#{uuid}")
+    click_on "Components"
+    assert(page.has_no_text?("script_parameters"),
+           "components JSON visible without popup")
+    click_on "Show components JSON"
+    assert(page.has_text?("script_parameters"),
+           "components JSON not found")
+  end
+
+  PROJECT_WITH_SEARCH_COLLECTION = "A Subproject"
+  def check_parameter_search(proj_name)
+    template = api_fixture("pipeline_templates")["parameter_with_search"]
+    search_text = template["components"]["with-search"]["script_parameters"]["input"]["search_for"]
+    visit page_with_token("active", "/pipeline_templates/#{template['uuid']}")
+    click_on "Run this pipeline"
+    within(".modal-dialog") do  # Set project for the new pipeline instance
+      find(".selectable", text: proj_name).click
+      click_on "Choose"
+    end
+    assert(has_text?("This pipeline was created from the template"), "did not land on pipeline instance page")
+    first("a.btn,button", text: "Choose").click
+    within(".modal-body") do
+      if (proj_name != PROJECT_WITH_SEARCH_COLLECTION)
+        # Switch finder modal to Subproject to find the Collection.
+        click_on proj_name
+        click_on PROJECT_WITH_SEARCH_COLLECTION
+      end
+      assert_equal(search_text, first("input").value,
+                   "parameter search not preseeded")
+      assert(has_text?(api_fixture("collections")["baz_collection_name_in_asubproject"]["name"]),
+             "baz Collection not in preseeded search results")
+    end
+  end
+
+  test "Workbench respects search_for parameter in templates" do
+    check_parameter_search(PROJECT_WITH_SEARCH_COLLECTION)
+  end
+
+  test "Workbench preserves search_for parameter after project switch" do
+    check_parameter_search("A Project")
+  end
+
+  [
+    [true, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
+    [false, 'Two Part Pipeline Template', 'foo_collection_in_aproject', false],
+    [true, 'Two Part Template with dataclass File', 'foo_collection_in_aproject', true],
+    [false, 'Two Part Template with dataclass File', 'foo_collection_in_aproject', true],
+    [true, 'Two Part Pipeline Template', 'collection_with_no_name_in_aproject', false],
+  ].each do |in_aproject, template_name, collection, choose_file|
+    test "Run pipeline instance in #{in_aproject} with #{template_name} with #{collection} file #{choose_file}" do
+      if in_aproject
+        visit page_with_token 'active', \
+        '/projects/'+api_fixture('groups')['aproject']['uuid']
+      else
+        visit page_with_token 'active', '/'
+      end
+
+      # need bigger modal size when choosing a file from collection
+      if Capybara.current_driver == :selenium
+        Capybara.current_session.driver.browser.manage.window.resize_to(1200, 800)
+      end
+
+      create_and_run_pipeline_in_aproject in_aproject, template_name, collection, choose_file
+      instance_path = current_path
+
+      # Pause the pipeline
+      find('a,button', text: 'Pause').click
+      assert page.has_text? 'Paused'
+      page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+      page.assert_selector 'a,button', text: 'Re-run with latest'
+      page.assert_selector 'a,button', text: 'Re-run options'
+
+      # Verify that the newly created instance is created in the right project.
+      assert page.has_text? 'Home'
+      if in_aproject
+        assert page.has_text? 'A Project'
+      else
+        assert page.has_no_text? 'A Project'
+      end
+    end
+  end
+
+  [
+    ['active', false, false, false],
+    ['active', false, false, true],
+    ['active', true, false, false],
+    ['active', true, true, false],
+    ['active', true, false, true],
+    ['active', true, true, true],
+    ['project_viewer', false, false, true],
+    ['project_viewer', true, true, true],
+  ].each do |user, with_options, choose_options, in_aproject|
+    test "Rerun pipeline instance as #{user} using options #{with_options} #{choose_options} in #{in_aproject}" do
+      if in_aproject
+        visit page_with_token 'active', \
+        '/projects/'+api_fixture('groups')['aproject']['uuid']
+      else
+        visit page_with_token 'active', '/'
+      end
+
+      # need bigger modal size when choosing a file from collection
+      if Capybara.current_driver == :selenium
+        Capybara.current_session.driver.browser.manage.window.resize_to(1200, 800)
+      end
+
+      create_and_run_pipeline_in_aproject in_aproject, 'Two Part Pipeline Template', 'foo_collection_in_aproject'
+      instance_path = current_path
+
+      # Pause the pipeline
+      find('a,button', text: 'Pause').click
+      assert page.has_text? 'Paused'
+      page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+      page.assert_selector 'a,button', text: 'Re-run with latest'
+      page.assert_selector 'a,button', text: 'Re-run options'
+
+      # Pipeline can be re-run now. Access it as the specified user, and re-run
+      if user == 'project_viewer'
+        visit page_with_token(user, instance_path)
+        assert page.has_text? 'A Project'
+        page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+        page.assert_selector 'a,button', text: 'Re-run with latest'
+        page.assert_selector 'a,button', text: 'Re-run options'
+      end
+
+      # Now re-run the pipeline
+      if with_options
+        assert_triggers_dom_event 'shown.bs.modal' do
+          find('a,button', text: 'Re-run options').click
+        end
+        within('.modal-dialog') do
+          page.assert_selector 'a,button', text: 'Copy and edit inputs'
+          page.assert_selector 'a,button', text: 'Run now'
+          if choose_options
+            find('button', text: 'Copy and edit inputs').click
+          else
+            find('button', text: 'Run now').click
+          end
+        end
+      else
+        find('a,button', text: 'Re-run with latest').click
+      end
+
+      # Verify that the newly created instance is created in the right
+      # project. In case of project_viewer user, since the user cannot
+      # write to the project, the pipeline should have been created in
+      # the user's Home project.
+      assert_not_equal instance_path, current_path, 'Rerun instance path expected to be different'
+      assert_text 'Home'
+      if in_aproject && (user != 'project_viewer')
+        assert_text 'A Project'
+      else
+        assert_no_text 'A Project'
+      end
+    end
+  end
+
+  # Create and run a pipeline for 'Two Part Pipeline Template' in 'A Project'
+  def create_and_run_pipeline_in_aproject in_aproject, template_name, collection_fixture, choose_file=false
+    # collection in aproject to be used as input
+    collection = api_fixture('collections', collection_fixture)
+
+    # create a pipeline instance
+    find('.btn', text: 'Run a pipeline').click
+    within('.modal-dialog') do
+      find('.selectable', text: template_name).click
+      find('.btn', text: 'Next: choose inputs').click
+    end
+
+    assert find('p', text: 'Provide a value')
+
+    find('div.form-group', text: 'Foo/bar pair').
+      find('.btn', text: 'Choose').
+      click
+
+    within('.modal-dialog') do
+      if in_aproject
+        assert_selector 'button.dropdown-toggle', text: 'A Project'
+        wait_for_ajax
+      else
+        assert_selector 'button.dropdown-toggle', text: 'Home'
+        wait_for_ajax
+        click_button "Home"
+        click_link "A Project"
+        wait_for_ajax
+      end
+
+      if collection_fixture == 'foo_collection_in_aproject'
+        first('span', text: 'foo_tag').click
+      elsif collection['name']
+        first('span', text: "#{collection['name']}").click
+      else
+        collection_uuid = collection['uuid']
+        find("div[data-object-uuid=#{collection_uuid}]").click
+      end
+
+      if choose_file
+        wait_for_ajax
+        find('.preview-selectable', text: 'foo').click
+      end
+      find('button', text: 'OK').click
+    end
+
+    # The input, after being specified, should still be displayed (#3382)
+    assert find('div.form-group', text: 'Foo/bar pair')
+
+    # Ensure that the collection's portable_data_hash, uuid and name
+    # are saved in the desired places. (#4015)
+    click_link 'Advanced'
+    click_link 'API response'
+
+    api_response = JSON.parse(find('div#advanced_api_response pre').text)
+    input_params = api_response['components']['part-one']['script_parameters']['input']
+    assert_equal(input_params['selection_uuid'], collection['uuid'], "Not found expected input param uuid")
+    if choose_file
+      assert_equal(input_params['value'], collection['portable_data_hash']+'/foo', "Not found expected input file param value")
+      assert_equal(input_params['selection_name'], collection['name']+'/foo', "Not found expected input file param name")
+    else
+      assert_equal(input_params['value'], collection['portable_data_hash'], "Not found expected input param value")
+      assert_equal(input_params['selection_name'], collection['name'], "Not found expected input selection name")
+    end
+
+    # "Run" button present and enabled
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Run'
+    first('a,button', text: 'Run').click
+
+    # Pipeline is running. We have a "Pause" button instead now.
+    page.assert_no_selector 'a,button', text: 'Run'
+    page.assert_no_selector 'a.disabled,button.disabled', text: 'Resume'
+    page.assert_selector 'a,button', text: 'Pause'
+
+    # Since it is test env, no jobs are created to run. So, graph not visible
+    assert_not page.has_text? 'Graph'
+  end
+
+  [
+    [1, 0], # run time 0 minutes
+    [10, 17*60*60 + 51*60], # run time 17 hours and 51 minutes
+  ].each do |index, run_time|
+    test "pipeline start and finish time display #{index}" do
+      visit page_with_token("user1_with_load", "/pipeline_instances/zzzzz-d1hrv-10pipelines0#{index.to_s.rjust(3, '0')}")
+
+      assert page.has_text? 'This pipeline started at'
+      page_text = page.text
+
+      match = /This pipeline started at (.*)\. It failed after (.*) seconds at (.*)\. Check the Log/.match page_text
+      assert_not_nil(match, 'Did not find text - This pipeline started at . . . ')
+
+      start_at = match[1]
+      finished_at = match[3]
+      assert_not_nil(start_at, 'Did not find start_at time')
+      assert_not_nil(finished_at, 'Did not find finished_at time')
+
+      # start and finished time display is of the format '2:20 PM 10/20/2014'
+      start_time = DateTime.strptime(start_at, '%H:%M %p %m/%d/%Y').to_time
+      finished_time = DateTime.strptime(finished_at, '%H:%M %p %m/%d/%Y').to_time
+      assert_equal(run_time, finished_time-start_time,
+        "Time difference did not match for start_at #{start_at}, finished_at #{finished_at}, ran_for #{match[2]}")
+    end
+  end
+
+  [
+    ['fuse', nil, 2, 20],                           # has 2 as of 11-07-2014
+    ['fuse', 'FUSE project', 1, 1],                 # 1 with this name
+    ['user1_with_load', nil, 30, 100],              # has 37 as of 11-07-2014
+    ['user1_with_load', 'pipeline_10', 2, 2],       # 2 with this name
+    ['user1_with_load', '000010pipelines', 10, 10], # owned_by the project zzzzz-j7d0g-000010pipelines
+    ['user1_with_load', '000025pipelines', 25, 25], # owned_by the project zzzzz-j7d0g-000025pipelines, two pages
+    ['admin', nil, 40, 200],
+    ['admin', 'FUSE project', 1, 1],
+    ['admin', 'pipeline_10', 2, 2],
+    ['active', 'containing at least two', 2, 100],  # component description
+    ['admin', 'containing at least two', 2, 100],
+    ['active', nil, 10, 100],
+    ['active', 'no such match', 0, 0],
+  ].each do |user, search_filter, expected_min, expected_max|
+    test "scroll pipeline instances page for #{user} with search filter #{search_filter}
+          and expect #{expected_min} <= found_items <= #{expected_max}" do
+      visit page_with_token(user, "/pipeline_instances")
+
+      if search_filter
+        find('.recent-pipeline-instances-filterable-control').set(search_filter)
+        # Wait for 250ms debounce timer (see filterable.js)
+        sleep 0.350
+        wait_for_ajax
+      end
+
+      page_scrolls = expected_max/20 + 2    # scroll num_pages+2 times to test scrolling is disabled when it should be
+      within('.arv-recent-pipeline-instances') do
+        (0..page_scrolls).each do |i|
+          page.driver.scroll_to 0, 999000
+          begin
+            wait_for_ajax
+          rescue
+          end
+        end
+      end
+
+      # Verify that expected number of pipeline instances are found
+      found_items = page.all('tr[data-kind="arvados#pipelineInstance"]')
+      found_count = found_items.count
+      if expected_min == expected_max
+        assert_equal(true, found_count == expected_min,
+          "Not found expected number of items. Expected #{expected_min} and found #{found_count}")
+        assert page.has_no_text? 'request failed'
+      else
+        assert_equal(true, found_count>=expected_min,
+          "Found too few items. Expected at least #{expected_min} and found #{found_count}")
+        assert_equal(true, found_count<=expected_max,
+          "Found too many items. Expected at most #{expected_max} and found #{found_count}")
+      end
+    end
+  end
+
+end
diff --git a/apps/workbench/test/integration/pipeline_templates_test.rb b/apps/workbench/test/integration/pipeline_templates_test.rb
new file mode 100644 (file)
index 0000000..19a5109
--- /dev/null
@@ -0,0 +1,45 @@
+require 'integration_helper'
+
+class PipelineTemplatesTest < ActionDispatch::IntegrationTest
+  test "JSON popup available for strange components" do
+    need_javascript
+    uuid = api_fixture("pipeline_templates")["components_is_jobspec"]["uuid"]
+    visit page_with_token("active", "/pipeline_templates/#{uuid}")
+    click_on "Components"
+    assert(page.has_no_text?("script_parameters"),
+           "components JSON visible without popup")
+    click_on "Show components JSON"
+    assert(page.has_text?("script_parameters"),
+           "components JSON not found")
+  end
+
+  test "pipeline template description" do
+    need_javascript
+    visit page_with_token("active", "/pipeline_templates")
+
+    # go to Two Part pipeline template
+    within first('tr', text: 'Two Part Pipeline Template') do
+      find(".fa-gears").click
+    end
+
+    # edit template description
+    within('.arv-description-as-subtitle') do
+      find('.fa-pencil').click
+      find('.editable-input textarea').set('*Textile description for pipeline template* - "Go to dashboard":/')
+      find('.editable-submit').click
+    end
+    wait_for_ajax
+
+    # Verfiy edited description
+    assert page.has_no_text? '*Textile description for pipeline template*'
+    assert page.has_text? 'Textile description for pipeline template'
+    assert page.has_link? 'Go to dashboard'
+    click_link 'Go to dashboard'
+    assert page.has_text? 'Active pipelines'
+
+    # again visit recent templates page and verify edited description
+    visit page_with_token("active", "/pipeline_templates")
+    assert page.has_no_text? '*Textile description for pipeline template*'
+    assert page.has_text? 'Textile description for pipeline template'
+  end
+end
diff --git a/apps/workbench/test/integration/projects_test.rb b/apps/workbench/test/integration/projects_test.rb
new file mode 100644 (file)
index 0000000..ce5b47e
--- /dev/null
@@ -0,0 +1,759 @@
+require 'integration_helper'
+
+class ProjectsTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  test 'Check collection count for A Project in the tab pane titles' do
+    project_uuid = api_fixture('groups')['aproject']['uuid']
+    visit page_with_token 'active', '/projects/' + project_uuid
+    wait_for_ajax
+    collection_count = page.all("[data-pk*='collection']").count
+    assert_selector '#Data_collections-tab span', text: "(#{collection_count})"
+  end
+
+  test 'Find a project and edit its description' do
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: "A Project").click
+    within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
+      find('span', text: api_fixture('groups')['aproject']['name']).click
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('I just edited this.')
+        find('.editable-submit').click
+      end
+      wait_for_ajax
+    end
+    visit current_path
+    assert(find?('.container-fluid', text: 'I just edited this.'),
+           "Description update did not survive page refresh")
+  end
+
+  test 'Find a project and edit description to textile description' do
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: "A Project").click
+    within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
+      find('span', text: api_fixture('groups')['aproject']['name']).click
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('<p>*Textile description for A project* - "take me home":/ </p><p>And a new paragraph in description.</p>')
+        find('.editable-submit').click
+      end
+      wait_for_ajax
+    end
+
+    # visit project page
+    visit current_path
+    assert_no_text '*Textile description for A project*'
+    assert(find?('.container-fluid', text: 'Textile description for A project'),
+           "Description update did not survive page refresh")
+    assert(find?('.container-fluid', text: 'And a new paragraph in description'),
+           "Description did not contain the expected new paragraph")
+    assert(page.has_link?("take me home"), "link not found in description")
+
+    click_link 'take me home'
+
+    # now in dashboard
+    assert(page.has_text?('Active pipelines'), 'Active pipelines - not found on dashboard')
+  end
+
+  test 'Find a project and edit description to html description' do
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: "A Project").click
+    within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
+      find('span', text: api_fixture('groups')['aproject']['name']).click
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('<br>Textile description for A project</br> - <a href="/">take me home</a>')
+        find('.editable-submit').click
+      end
+      wait_for_ajax
+    end
+    visit current_path
+    assert(find?('.container-fluid', text: 'Textile description for A project'),
+           "Description update did not survive page refresh")
+    assert(!find?('.container-fluid', text: '<br>Textile description for A project</br>'),
+           "Textile description is displayed with uninterpreted formatting characters")
+    assert(page.has_link?("take me home"),"link not found in description")
+    click_link 'take me home'
+    assert page.has_text?('Active pipelines')
+  end
+
+  test 'Find a project and edit description to textile description with link to object' do
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: "A Project").click
+    within('.container-fluid', text: api_fixture('groups')['aproject']['name']) do
+      find('span', text: api_fixture('groups')['aproject']['name']).click
+      within('.arv-description-as-subtitle') do
+        find('.fa-pencil').click
+        find('.editable-input textarea').set('*Textile description for A project* - "go to sub-project":' + api_fixture('groups')['asubproject']['uuid'] + "'")
+        find('.editable-submit').click
+      end
+      wait_for_ajax
+    end
+    visit current_path
+    assert(find?('.container-fluid', text: 'Textile description for A project'),
+           "Description update did not survive page refresh")
+    assert(!find?('.container-fluid', text: '*Textile description for A project*'),
+           "Textile description is displayed with uninterpreted formatting characters")
+    assert(page.has_link?("go to sub-project"), "link not found in description")
+    click_link 'go to sub-project'
+    assert(page.has_text?(api_fixture('groups')['asubproject']['name']), 'sub-project name not found after clicking link')
+  end
+
+  test 'Add a new name, then edit it, without creating a duplicate' do
+    project_uuid = api_fixture('groups')['aproject']['uuid']
+    specimen_uuid = api_fixture('traits')['owned_by_aproject_with_no_name']['uuid']
+    visit page_with_token 'active', '/projects/' + project_uuid
+    click_link 'Other objects'
+    within '.selection-action-container' do
+      # Wait for the tab to load:
+      assert_selector 'tr[data-kind="arvados#trait"]'
+      within first('tr', text: 'Trait') do
+        find(".fa-pencil").click
+        find('.editable-input input').set('Now I have a name.')
+        find('.glyphicon-ok').click
+        assert_selector '.editable', text: 'Now I have a name.'
+        find(".fa-pencil").click
+        find('.editable-input input').set('Now I have a new name.')
+        find('.glyphicon-ok').click
+      end
+      wait_for_ajax
+      assert_selector '.editable', text: 'Now I have a new name.'
+    end
+    visit current_path
+    click_link 'Other objects'
+    within '.selection-action-container' do
+      find '.editable', text: 'Now I have a new name.'
+      assert_no_selector '.editable', text: 'Now I have a name.'
+    end
+  end
+
+  test 'Create a project and move it into a different project' do
+    visit page_with_token 'active', '/projects'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: "Home").click
+    find('.btn', text: "Add a subproject").click
+
+    within('h2') do
+      find('.fa-pencil').click
+      find('.editable-input input').set('Project 1234')
+      find('.glyphicon-ok').click
+    end
+    wait_for_ajax
+
+    visit '/projects'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: "Home").click
+    find('.btn', text: "Add a subproject").click
+    within('h2') do
+      find('.fa-pencil').click
+      find('.editable-input input').set('Project 5678')
+      find('.glyphicon-ok').click
+    end
+    wait_for_ajax
+
+    click_link 'Move project...'
+    find('.selectable', text: 'Project 1234').click
+    find('.modal-footer a,button', text: 'Move').click
+    wait_for_ajax
+
+    # Wait for the page to refresh and show the new parent in Sharing panel
+    click_link 'Sharing'
+    assert(page.has_link?("Project 1234"),
+           "Project 5678 should now be inside project 1234")
+  end
+
+  def show_project_using(auth_key, proj_key='aproject')
+    project_uuid = api_fixture('groups')[proj_key]['uuid']
+    visit(page_with_token(auth_key, "/projects/#{project_uuid}"))
+    assert(page.has_text?("A Project"), "not on expected project page")
+  end
+
+  def share_rows
+    find('#project_sharing').all('tr')
+  end
+
+  def add_share_and_check(share_type, name, obj=nil)
+    assert(page.has_no_text?(name), "project is already shared with #{name}")
+    start_share_count = share_rows.size
+    click_on("Share with #{share_type}")
+    within(".modal-container") do
+      # Order is important here: we should find something that appears in the
+      # modal before we make any assertions about what's not in the modal.
+      # Otherwise, the not-included assertions might falsely pass because
+      # the modal hasn't loaded yet.
+      find(".selectable", text: name).click
+      assert(has_no_selector?(".modal-dialog-preview-pane"),
+             "preview pane available in sharing dialog")
+      if share_type == 'users' and obj and obj['email']
+        assert(page.has_text?(obj['email']), "Did not find user's email")
+      end
+      assert_raises(Capybara::ElementNotFound,
+                    "Projects pulldown available from sharing dialog") do
+        click_on "All projects"
+      end
+      click_on "Add"
+    end
+    using_wait_time(Capybara.default_wait_time * 3) do
+      assert(page.has_link?(name),
+             "new share was not added to sharing table")
+      assert_equal(start_share_count + 1, share_rows.size,
+                   "new share did not add row to sharing table")
+    end
+  end
+
+  def modify_share_and_check(name)
+    start_rows = share_rows
+    link_row = start_rows.select { |row| row.has_text?(name) }
+    assert_equal(1, link_row.size, "row with new permission not found")
+    within(link_row.first) do
+      click_on("Read")
+      select("Write", from: "share_change_level")
+      click_on("editable-submit")
+      assert(has_link?("Write"),
+             "failed to change access level on new share")
+      click_on "Revoke"
+      if Capybara.current_driver == :selenium
+        page.driver.browser.switch_to.alert.accept
+      else
+        # poltergeist returns true for confirm(), so we don't need to accept.
+      end
+    end
+    wait_for_ajax
+    using_wait_time(Capybara.default_wait_time * 3) do
+      assert(page.has_no_text?(name),
+             "new share row still exists after being revoked")
+      assert_equal(start_rows.size - 1, share_rows.size,
+                   "revoking share did not remove row from sharing table")
+    end
+  end
+
+  test "project viewer can't see project sharing tab" do
+    show_project_using("project_viewer")
+    assert(page.has_no_link?("Sharing"),
+           "read-only project user sees sharing tab")
+  end
+
+  test "project owner can manage sharing for another user" do
+    add_user = api_fixture('users')['future_project_user']
+    new_name = ["first_name", "last_name"].map { |k| add_user[k] }.join(" ")
+
+    show_project_using("active")
+    click_on "Sharing"
+    add_share_and_check("users", new_name, add_user)
+    modify_share_and_check(new_name)
+  end
+
+  test "project owner can manage sharing for another group" do
+    new_name = api_fixture('groups')['future_project_viewing_group']['name']
+
+    show_project_using("active")
+    click_on "Sharing"
+    add_share_and_check("groups", new_name)
+    modify_share_and_check(new_name)
+  end
+
+  test "'share with group' listing does not offer projects" do
+    show_project_using("active")
+    click_on "Sharing"
+    click_on "Share with groups"
+    good_uuid = api_fixture("groups")["private"]["uuid"]
+    assert(page.has_selector?(".selectable[data-object-uuid=\"#{good_uuid}\"]"),
+           "'share with groups' listing missing owned user group")
+    bad_uuid = api_fixture("groups")["asubproject"]["uuid"]
+    assert(page.has_no_selector?(".selectable[data-object-uuid=\"#{bad_uuid}\"]"),
+           "'share with groups' listing includes project")
+  end
+
+  [
+    ['Move',api_fixture('collections')['collection_to_move_around_in_aproject'],
+      api_fixture('groups')['aproject'],api_fixture('groups')['asubproject']],
+    ['Remove',api_fixture('collections')['collection_to_move_around_in_aproject'],
+      api_fixture('groups')['aproject']],
+    ['Copy',api_fixture('collections')['collection_to_move_around_in_aproject'],
+      api_fixture('groups')['aproject'],api_fixture('groups')['asubproject']],
+    ['Remove',api_fixture('collections')['collection_in_aproject_with_same_name_as_in_home_project'],
+      api_fixture('groups')['aproject'],nil,true],
+  ].each do |action, my_collection, src, dest=nil, expect_name_change=nil|
+    test "selection #{action} -> #{expect_name_change.inspect} for project" do
+      perform_selection_action src, dest, my_collection, action
+
+      case action
+      when 'Copy'
+        assert page.has_text?(my_collection['name']), 'Collection not found in src project after copy'
+        visit page_with_token 'active', '/'
+        find("#projects-menu").click
+        find(".dropdown-menu a", text: dest['name']).click
+        assert page.has_text?(my_collection['name']), 'Collection not found in dest project after copy'
+
+      when 'Move'
+        assert page.has_no_text?(my_collection['name']), 'Collection still found in src project after move'
+        visit page_with_token 'active', '/'
+        find("#projects-menu").click
+        find(".dropdown-menu a", text: dest['name']).click
+        assert page.has_text?(my_collection['name']), 'Collection not found in dest project after move'
+
+      when 'Remove'
+        assert page.has_no_text?(my_collection['name']), 'Collection still found in src project after remove'
+        visit page_with_token 'active', '/'
+        find("#projects-menu").click
+        find(".dropdown-menu a", text: "Home").click
+        assert page.has_text?(my_collection['name']), 'Collection not found in home project after remove'
+        if expect_name_change
+          assert page.has_text?(my_collection['name']+' removed from ' + src['name']),
+            'Collection with update name is not found in home project after remove'
+        end
+      end
+    end
+  end
+
+  def perform_selection_action src, dest, item, action
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: src['name']).click
+    assert page.has_text?(item['name']), 'Collection not found in src project'
+
+    within('tr', text: item['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+
+    within('.selection-action-container') do
+      assert page.has_text?("Compare selected"), "Compare selected link text not found"
+      assert page.has_link?("Copy selected"), "Copy selected link not found"
+      assert page.has_link?("Move selected"), "Move selected link not found"
+      assert page.has_link?("Remove selected"), "Remove selected link not found"
+
+      click_link "#{action} selected"
+    end
+
+    # select the destination project if a Copy or Move action is being performed
+    if action == 'Copy' || action == 'Move'
+      within(".modal-container") do
+        find('.selectable', text: dest['name']).click
+        find('.modal-footer a,button', text: action).click
+        wait_for_ajax
+      end
+    end
+  end
+
+  # Test copy action state. It should not be available when a subproject is selected.
+  test "copy action is disabled when a subproject is selected" do
+    my_project = api_fixture('groups')['aproject']
+    my_collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+    my_subproject = api_fixture('groups')['asubproject']
+
+    # verify that selection options are disabled on the project until an item is selected
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li.disabled', text: 'Remove selected'
+    end
+
+    # select collection and verify links are enabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+    assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+    within('tr', text: my_collection['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_no_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+
+    # select subproject and verify that copy action is disabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    click_link 'Subprojects'
+    assert page.has_text?(my_subproject['name']), 'Subproject not found in project'
+
+    within('tr', text: my_subproject['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+
+    # select subproject and a collection and verify that copy action is still disabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    click_link 'Subprojects'
+    assert page.has_text?(my_subproject['name']), 'Subproject not found in project'
+
+    within('tr', text: my_subproject['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_link 'Data collections'
+    assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+    within('tr', text: my_collection['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    click_link 'Subprojects'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  # When project tabs are switched, only options applicable to the current tab's selections are enabled.
+  test "verify selection options when tabs are switched" do
+    my_project = api_fixture('groups')['aproject']
+    my_collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+    my_subproject = api_fixture('groups')['asubproject']
+
+    # select subproject and a collection and verify that copy action is still disabled
+    visit page_with_token 'active', '/'
+    find("#projects-menu").click
+    find(".dropdown-menu a", text: my_project['name']).click
+
+    # Select a sub-project
+    click_link 'Subprojects'
+    assert page.has_text?(my_subproject['name']), 'Subproject not found in project'
+
+    within('tr', text: my_subproject['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    # Select a collection
+    click_link 'Data collections'
+    assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+    within('tr', text: my_collection['name']) do
+      find('input[type=checkbox]').click
+    end
+
+    # Go back to Subprojects tab
+    click_link 'Subprojects'
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_selector 'li.disabled', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+
+    # Close the dropdown by clicking outside it.
+    find('.dropdown-toggle', text: 'Selection').find(:xpath, '..').click
+
+    # Go back to Data collections tab
+    find('.nav-tabs a', text: 'Data collections').click
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_no_selector 'li.disabled', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li.disabled', text: 'Compare selected'
+      assert_no_selector 'li.disabled', text: 'Copy selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li.disabled', text: 'Move selected'
+      assert_selector 'li', text: 'Move selected'
+      assert_no_selector 'li.disabled', text: 'Remove selected'
+      assert_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  # "Move selected" and "Remove selected" options should not be available when current user cannot write to the project
+  test "move selected and remove selected actions not available when current user cannot write to project" do
+    my_project = api_fixture('groups')['anonymously_accessible_project']
+    visit page_with_token 'active', "/projects/#{my_project['uuid']}"
+
+    click_button 'Selection'
+    within('.selection-action-container') do
+      assert_selector 'li', text: 'Create new collection with selected collections'
+      assert_selector 'li', text: 'Compare selected'
+      assert_selector 'li', text: 'Copy selected'
+      assert_no_selector 'li', text: 'Move selected'
+      assert_no_selector 'li', text: 'Remove selected'
+    end
+  end
+
+  [
+    ['active', true],
+    ['project_viewer', false],
+  ].each do |user, expect_collection_in_aproject|
+    test "combine selected collections into new collection #{user} #{expect_collection_in_aproject}" do
+      my_project = api_fixture('groups')['aproject']
+      my_collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+
+      visit page_with_token user, '/'
+      find("#projects-menu").click
+      find(".dropdown-menu a", text: my_project['name']).click
+      assert page.has_text?(my_collection['name']), 'Collection not found in project'
+
+      within('tr', text: my_collection['name']) do
+        find('input[type=checkbox]').click
+      end
+
+      click_button 'Selection'
+      within('.selection-action-container') do
+        click_link 'Create new collection with selected collections'
+      end
+
+      # now in the new collection page
+      if expect_collection_in_aproject
+        assert page.has_text?("Created new collection in the project #{my_project['name']}"),
+                              'Not found flash message that new collection is created in aproject'
+      else
+        assert page.has_text?("Created new collection in your Home project"),
+                              'Not found flash message that new collection is created in Home project'
+      end
+      assert page.has_text?('Content hash'), 'Not found content hash in collection page'
+    end
+  end
+
+  [
+    ["jobs", "/jobs"],
+    ["pipelines", "/pipeline_instances"],
+    ["collections", "/collections"]
+  ].each do |target,path|
+    test "Test dashboard button all #{target}" do
+      visit page_with_token 'active', '/'
+      click_link "All #{target}"
+      assert_equal path, current_path
+    end
+  end
+
+  def scroll_setup(project_name,
+                   total_nbr_items,
+                   item_list_parameter,
+                   sorted = false,
+                   sort_parameters = nil)
+    project_uuid = api_fixture('groups')[project_name]['uuid']
+    visit page_with_token 'user1_with_load', '/projects/' + project_uuid
+
+    assert(page.has_text?("#{item_list_parameter.humanize} (#{total_nbr_items})"), "Number of #{item_list_parameter.humanize} did not match the input amount")
+
+    click_link item_list_parameter.humanize
+    wait_for_ajax
+
+    if sorted
+      find("th[data-sort-order='#{sort_parameters.gsub(/\s/,'')}']").click
+      wait_for_ajax
+    end
+  end
+
+  def scroll_items_check(nbr_items,
+                         fixture_prefix,
+                         item_list_parameter,
+                         item_selector,
+                         sorted = false)
+    items = []
+    for i in 1..nbr_items
+      items << "#{fixture_prefix}#{i}"
+    end
+
+    verify_items = items.dup
+    unexpected_items = []
+    item_count = 0
+    within(".arv-project-#{item_list_parameter}") do
+      page.execute_script "window.scrollBy(0,999000)"
+      begin
+        wait_for_ajax
+      rescue
+      end
+
+      # Visit all rows. If not all expected items are found, retry
+      found_items = page.all(item_selector)
+      item_count = found_items.count
+
+      previous = nil
+      (0..item_count-1).each do |i|
+        # Found row text using the fixture string e.g. "Show Collection_#{n} "
+        item_name = found_items[i].text.split[1]
+        if !items.include? item_name
+          unexpected_items << item_name
+        else
+          verify_items.delete item_name
+        end
+        if sorted
+          # check sort order
+          assert_operator( previous.downcase, :<=, item_name.downcase) if previous
+          previous = item_name
+        end
+      end
+
+      assert_equal true, unexpected_items.empty?, "Found unexpected #{item_list_parameter.humanize} #{unexpected_items.inspect}"
+      assert_equal nbr_items, item_count, "Found different number of #{item_list_parameter.humanize}"
+      assert_equal true, verify_items.empty?, "Did not find all the #{item_list_parameter.humanize}"
+    end
+  end
+
+  [
+    ['project_with_10_collections', 10],
+    ['project_with_201_collections', 201], # two pages of data
+  ].each do |project_name, nbr_items|
+    test "scroll collections tab for #{project_name} with #{nbr_items} objects" do
+      item_list_parameter = "Data_collections"
+      scroll_setup project_name,
+                   nbr_items,
+                   item_list_parameter
+      scroll_items_check nbr_items,
+                         "Collection_",
+                         item_list_parameter,
+                         'tr[data-kind="arvados#collection"]'
+    end
+  end
+
+  [
+    ['project_with_10_collections', 10],
+    ['project_with_201_collections', 201], # two pages of data
+  ].each do |project_name, nbr_items|
+    test "scroll collections tab for #{project_name} with #{nbr_items} objects with ascending sort (case insensitive)" do
+      item_list_parameter = "Data_collections"
+      scroll_setup project_name,
+                   nbr_items,
+                   item_list_parameter,
+                   true,
+                   "collections.name"
+      scroll_items_check nbr_items,
+                         "Collection_",
+                         item_list_parameter,
+                         'tr[data-kind="arvados#collection"]',
+                         true
+    end
+  end
+
+  [
+    ['project_with_10_pipelines', 10, 0],
+    ['project_with_2_pipelines_and_60_jobs', 2, 60],
+    ['project_with_25_pipelines', 25, 0],
+  ].each do |project_name, num_pipelines, num_jobs|
+    test "scroll pipeline instances tab for #{project_name} with #{num_pipelines} pipelines and #{num_jobs} jobs" do
+      item_list_parameter = "Jobs_and_pipelines"
+      scroll_setup project_name,
+                   num_pipelines + num_jobs,
+                   item_list_parameter
+      # check the general scrolling and the pipelines
+      scroll_items_check num_pipelines,
+                         "pipeline_",
+                         item_list_parameter,
+                         'tr[data-kind="arvados#pipelineInstance"]'
+      # Check job count separately
+      jobs_found = page.all('tr[data-kind="arvados#job"]')
+      found_job_count = jobs_found.count
+      assert_equal num_jobs, found_job_count, 'Did not find expected number of jobs'
+    end
+  end
+
+  # Move button accessibility
+  [
+    ['admin', true],
+    ['active', true],  # project owner
+    ['project_viewer', false],
+    ].each do |user, can_move|
+    test "#{user} can move subproject under another user's Home #{can_move}" do
+      project = api_fixture('groups')['aproject']
+      collection = api_fixture('collections')['collection_to_move_around_in_aproject']
+
+      # verify the project move button
+      visit page_with_token user, "/projects/#{project['uuid']}"
+      if can_move
+        assert page.has_link? 'Move project...'
+      else
+        assert page.has_no_link? 'Move project...'
+      end
+    end
+  end
+
+  test "error while loading tab" do
+    original_arvados_v1_base = Rails.configuration.arvados_v1_base
+
+    visit page_with_token 'active', '/projects/' + api_fixture('groups')['aproject']['uuid']
+
+    # Point to a bad api server url to generate error
+    Rails.configuration.arvados_v1_base = "https://[100::f]:1/"
+    click_link 'Other objects'
+    within '#Other_objects' do
+      # Error
+      assert_selector('a', text: 'Reload tab')
+
+      # Now point back to the orig api server and reload tab
+      Rails.configuration.arvados_v1_base = original_arvados_v1_base
+      click_link 'Reload tab'
+      assert_no_selector('a', text: 'Reload tab')
+      assert_selector('button', text: 'Selection')
+      within '.selection-action-container' do
+        assert_selector 'tr[data-kind="arvados#trait"]'
+      end
+    end
+  end
+
+  test "add new project using projects dropdown" do
+    # verify that selection options are disabled on the project until an item is selected
+    visit page_with_token 'active', '/'
+
+    # Add a new project
+    find("#projects-menu").click
+    click_link 'Add a new project'
+    assert_text 'New project'
+    assert_text 'No description provided'
+
+    # Add one more new project
+    find("#projects-menu").click
+    click_link 'Add a new project'
+    match = /New project \(\d\)/.match page.text
+    assert match, 'Expected project name not found'
+    assert_text 'No description provided'
+  end
+
+  test "first tab loads data when visiting other tab directly" do
+    # As of 2014-12-19, the first tab of project#show uses infinite scrolling.
+    # Make sure that it loads data even if we visit another tab directly.
+    need_selenium 'to land on specified tab using {url}#Advanced'
+    project = api_fixture("groups", "aproject")
+    visit(page_with_token("active_trustedclient",
+                          "/projects/#{project['uuid']}#Advanced"))
+    assert_text("API response")
+    find("#page-wrapper .nav-tabs :first-child a").click
+    assert_text("bytes Collection")
+  end
+end
diff --git a/apps/workbench/test/integration/report_issue_test.rb b/apps/workbench/test/integration/report_issue_test.rb
new file mode 100644 (file)
index 0000000..7d4058d
--- /dev/null
@@ -0,0 +1,97 @@
+require 'integration_helper'
+
+class ReportIssueTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+    @user_profile_form_fields = Rails.configuration.user_profile_form_fields
+  end
+
+  teardown do
+    Rails.configuration.user_profile_form_fields = @user_profile_form_fields
+  end
+
+  # test version info and report issue from help menu
+  def check_version_info_and_report_issue_from_help_menu
+    within '.navbar-fixed-top' do
+      find('.help-menu > a').click
+      within '.help-menu .dropdown-menu' do
+        assert page.has_link?('Tutorials and User guide'), 'No link - Tutorials and User guide'
+        assert page.has_link?('API Reference'), 'No link - API Reference'
+        assert page.has_link?('SDK Reference'), 'No link - SDK Reference'
+        assert page.has_link?('Show version / debugging info ...'), 'No link - Show version / debugging info'
+        assert page.has_link?('Report a problem ...'), 'No link - Report a problem'
+
+        # check show version info link
+        click_link 'Show version / debugging info ...'
+      end
+    end
+
+    within '.modal-content' do
+      assert page.has_text?('Version / debugging info'), 'No text - Version / debugging info'
+      assert page.has_no_text?('Report a problem'), 'Found text - Report a problem'
+      assert page.has_no_text?('Describe the problem?'), 'Found text - Describe the problem'
+      assert page.has_button?('Close'), 'No button - Close'
+      assert page.has_no_button?('Send problem report'), 'Found button - Send problem report'
+      history_links = all('a').select do |a|
+        a[:href] =~ %r!^https://arvados.org/projects/arvados/repository/changes\?rev=[0-9a-f]+$!
+      end
+      assert_operator(2, :<=, history_links.count,
+                      "Should have found two links to revision history " +
+                      "in #{history_links.inspect}")
+      click_button 'Close'
+    end
+
+    # check report issue link
+    within '.navbar-fixed-top' do
+      find('.help-menu > a').click
+      find('.help-menu .dropdown-menu a', text: 'Report a problem ...').click
+    end
+
+    within '.modal-content' do
+      assert page.has_text?('Report a problem'), 'No text - Report a problem'
+      assert page.has_no_text?('Version / debugging info'), 'Found text - Version / debugging info'
+      assert page.has_text?('Describe the problem'), 'No text - Describe the problem'
+      assert page.has_no_button?('Close'), 'Found button - Close'
+      assert page.has_text?('Send problem report'), 'Send problem report button text is not found'
+      assert page.has_no_button?('Send problem report'), 'Send problem report button is not disabled before entering problem description'
+      assert page.has_button?('Cancel'), 'No button - Cancel'
+
+      # enter a report text and click on report
+      page.find_field('report_issue_text').set 'my test report text'
+      assert page.has_button?('Send problem report'), 'Send problem report button not enabled after entering text'
+      click_button 'Send problem report'
+
+      # ajax success updated button texts and added footer message
+      assert page.has_no_text?('Send problem report'), 'Found button - Send problem report'
+      assert page.has_no_button?('Cancel'), 'Found button - Cancel'
+      assert page.has_text?('Report sent'), 'No text - Report sent'
+      assert page.has_button?('Close'), 'No text - Close'
+      assert page.has_text?('Thanks for reporting this issue'), 'No text - Thanks for reporting this issue'
+
+      click_button 'Close'
+    end
+  end
+
+  [
+    [nil, nil],
+    ['inactive', api_fixture('users')['inactive']],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited']],
+    ['active', api_fixture('users')['active']],
+    ['admin', api_fixture('users')['admin']],
+    ['active_no_prefs', api_fixture('users')['active_no_prefs']],
+    ['active_no_prefs_profile', api_fixture('users')['active_no_prefs_profile']],
+  ].each do |token, user|
+
+    test "check version info and report issue for user #{token}" do
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      check_version_info_and_report_issue_from_help_menu
+    end
+
+  end
+
+end
diff --git a/apps/workbench/test/integration/search_box_test.rb b/apps/workbench/test/integration/search_box_test.rb
new file mode 100644 (file)
index 0000000..05c7f25
--- /dev/null
@@ -0,0 +1,101 @@
+require 'integration_helper'
+
+class SearchBoxTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  # test the search box
+  def verify_search_box user
+    if user && user['is_active']
+      # let's search for a valid uuid
+      within('.navbar-fixed-top') do
+        page.has_field?('search')
+        page.find_field('search').set user['uuid']
+        page.find('.glyphicon-search').click
+      end
+
+      # we should now be in the user's home project as a result of search
+      assert_selector "#Data_collections[data-object-uuid='#{user['uuid']}']", "Expected to be in user page after search click"
+
+      # let's search again for an invalid valid uuid
+      within('.navbar-fixed-top') do
+        search_for = String.new user['uuid']
+        search_for[0]='1'
+        page.find_field('search').set search_for
+        page.find('.glyphicon-search').click
+      end
+
+      # we should see 'not found' error page
+      assert page.has_text?('Not Found'), 'No text - Not Found'
+      assert page.has_link?('Report problem'), 'No text - Report problem'
+      click_link 'Report problem'
+      within '.modal-content' do
+        assert page.has_text?('Report a problem'), 'No text - Report a problem'
+        assert page.has_no_text?('Version / debugging info'), 'No text - Version / debugging info'
+        assert page.has_text?('Describe the problem'), 'No text - Describe the problem'
+        assert page.has_text?('Send problem report'), 'Send problem report button text is not found'
+        assert page.has_no_button?('Send problem report'), 'Send problem report button is not disabled before entering problem description'
+        assert page.has_button?('Cancel'), 'No button - Cancel'
+
+        # enter a report text and click on report
+        page.find_field('report_issue_text').set 'my test report text'
+        assert page.has_button?('Send problem report'), 'Send problem report button not enabled after entering text'
+        click_button 'Send problem report'
+
+        # ajax success updated button texts and added footer message
+        assert page.has_no_text?('Send problem report'), 'Found button - Send problem report'
+        assert page.has_no_button?('Cancel'), 'Found button - Cancel'
+        assert page.has_text?('Report sent'), 'No text - Report sent'
+        assert page.has_button?('Close'), 'No text - Close'
+        assert page.has_text?('Thanks for reporting this issue'), 'No text - Thanks for reporting this issue'
+
+        click_button 'Close'
+      end
+
+      # let's search for the anonymously accessible project
+      publicly_accessible_project = api_fixture('groups')['anonymously_accessible_project']
+
+      within('.navbar-fixed-top') do
+        # search again for the anonymously accessible project
+        page.find_field('search').set publicly_accessible_project['name'][0,10]
+        page.find('.glyphicon-search').click
+      end
+
+      within '.modal-content' do
+        assert page.has_text?('All projects'), 'No text - All projects'
+        assert page.has_text?('Search'), 'No text - Search'
+        assert page.has_text?('Cancel'), 'No text - Cancel'
+        assert_selector('div', text: publicly_accessible_project['name'])
+        find(:xpath, '//div[./span[contains(.,publicly_accessible_project["uuid"])]]').click
+
+        click_button 'Show'
+      end
+
+      # seeing "Unrestricted public data" now
+      assert page.has_text?(publicly_accessible_project['name']), 'No text - publicly accessible project name'
+      assert page.has_text?(publicly_accessible_project['description']), 'No text - publicly accessible project description'
+    else
+      within('.navbar-fixed-top') do
+        page.has_no_field?('search')
+      end
+    end
+  end
+
+  [
+    [nil, nil],
+    ['inactive', api_fixture('users')['inactive']],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited']],
+    ['active', api_fixture('users')['active']],
+    ['admin', api_fixture('users')['admin']],
+  ].each do |token, user|
+
+    test "test search box for user #{token}" do
+      visit page_with_token(token)
+
+      verify_search_box user
+    end
+
+  end
+
+end
diff --git a/apps/workbench/test/integration/smoke_test.rb b/apps/workbench/test/integration/smoke_test.rb
new file mode 100644 (file)
index 0000000..a626e24
--- /dev/null
@@ -0,0 +1,46 @@
+require 'integration_helper'
+require 'uri'
+
+class SmokeTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  def assert_visit_success(allowed=[200])
+    assert_includes(allowed, status_code,
+                    "#{current_url} returned #{status_code}, not one of " +
+                    allowed.inspect)
+  end
+
+  def all_links_in(find_spec, text_regexp=//)
+    all(find_spec + ' a').collect { |tag|
+      if tag[:href].nil? or tag[:href].empty? or (tag.text !~ text_regexp)
+        nil
+      elsif tag[:'data-remote']
+        # these don't necessarily work with format=html
+        nil
+      else
+        url = URI(tag[:href])
+        url.host.nil? ? url.path : nil
+      end
+    }.compact
+  end
+
+  test "all first-level links succeed" do
+    visit page_with_token('active_trustedclient', '/')
+    assert_visit_success
+    click_link 'notifications-menu'
+    urls = [all_links_in('nav'),
+            all_links_in('.navbar', /^Manage /)].flatten
+    seen_urls = ['/']
+    while not (url = urls.shift).nil?
+      next if seen_urls.include? url
+      visit url
+      seen_urls << url
+      assert_visit_success
+      # Uncommenting the line below lets you crawl the entire site for a
+      # more thorough test.
+      # urls += all_links_in('body')
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/user_agreements_test.rb b/apps/workbench/test/integration/user_agreements_test.rb
new file mode 100644 (file)
index 0000000..16b3208
--- /dev/null
@@ -0,0 +1,27 @@
+require 'integration_helper'
+
+class UserAgreementsTest < ActionDispatch::IntegrationTest
+
+  setup do
+    need_javascript
+  end
+
+  def continuebutton_selector
+    'input[type=submit][disabled][value=Continue]'
+  end
+
+  test "cannot click continue without ticking checkbox" do
+    visit page_with_token('inactive')
+    assert_selector continuebutton_selector
+  end
+
+  test "continue button is enabled after ticking checkbox" do
+    visit page_with_token('inactive')
+    assert_selector continuebutton_selector
+    find('input[type=checkbox]').click
+    assert_no_selector continuebutton_selector
+    assert_nil(find_button('Continue')[:disabled],
+               'Continue button did not become enabled')
+  end
+
+end
diff --git a/apps/workbench/test/integration/user_manage_account_test.rb b/apps/workbench/test/integration/user_manage_account_test.rb
new file mode 100644 (file)
index 0000000..fae7e62
--- /dev/null
@@ -0,0 +1,100 @@
+require 'integration_helper'
+
+class UserManageAccountTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+  end
+
+  # test manage_account page
+  def verify_manage_account user
+    if user['is_active']
+      within('.navbar-fixed-top') do
+        find('a', text: "#{user['email']}").click
+        within('.dropdown-menu') do
+          find('a', text: 'Manage account').click
+        end
+      end
+
+      # now in manage account page
+      assert page.has_text?('Virtual Machines'), 'No text - Virtual Machines'
+      assert page.has_text?('Repositories'), 'No text - Repositories'
+      assert page.has_text?('SSH Keys'), 'No text - SSH Keys'
+      assert page.has_text?('Current Token'), 'No text - Current Token'
+      assert page.has_text?('The Arvados API token is a secret key that enables the Arvados SDKs to access Arvados'), 'No text - Arvados API token'
+      add_and_verify_ssh_key
+    else  # inactive user
+      within('.navbar-fixed-top') do
+        find('a', text: "#{user['email']}").click
+        within('.dropdown-menu') do
+          assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+        end
+      end
+    end
+  end
+
+  def add_and_verify_ssh_key
+      click_link 'Add new SSH key'
+
+      within '.modal-content' do
+        assert page.has_text?('Public Key'), 'No text - Public Key'
+        assert page.has_button?('Cancel'), 'No button - Cancel'
+        assert page.has_button?('Submit'), 'No button - Submit'
+
+        page.find_field('public_key').set 'first test with an incorrect ssh key value'
+        click_button 'Submit'
+        assert page.has_text?('Public key does not appear to be a valid ssh-rsa or dsa public key'), 'No text - Public key does not appear to be a valid'
+
+        public_key_str = api_fixture('authorized_keys')['active']['public_key']
+        page.find_field('public_key').set public_key_str
+        page.find_field('name').set 'added_in_test'
+        click_button 'Submit'
+        assert page.has_text?('Public key already exists in the database, use a different key.'), 'No text - Public key already exists'
+
+        new_key = SSHKey.generate
+        page.find_field('public_key').set new_key.ssh_public_key
+        page.find_field('name').set 'added_in_test'
+        click_button 'Submit'
+      end
+
+      # key must be added. look for it in the refreshed page
+      assert page.has_text?('added_in_test'), 'No text - added_in_test'
+  end
+
+  [
+    ['inactive', api_fixture('users')['inactive']],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited']],
+    ['active', api_fixture('users')['active']],
+    ['admin', api_fixture('users')['admin']],
+  ].each do |token, user|
+    test "test manage account for user #{token}" do
+      visit page_with_token(token)
+      verify_manage_account user
+    end
+  end
+
+  [
+    ['inactive_but_signed_user_agreement', true],
+    ['active', false],
+  ].each do |user, notifications|
+    test "test manage account for #{user} with notifications #{notifications}" do
+      visit page_with_token(user)
+      click_link 'notifications-menu'
+      if notifications
+        assert_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+        click_link('Click here to set up an SSH public key for use with Arvados')
+        assert_selector('a', text: 'Add new SSH key')
+
+        add_and_verify_ssh_key
+
+        # No more SSH notification
+        click_link 'notifications-menu'
+        assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        assert_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+      else
+        assert_no_selector('a', text: 'Click here to set up an SSH public key for use with Arvados')
+        assert_no_selector('a', text: 'Click here to learn how to run an Arvados Crunch pipeline')
+      end
+    end
+  end
+end
diff --git a/apps/workbench/test/integration/user_profile_test.rb b/apps/workbench/test/integration/user_profile_test.rb
new file mode 100644 (file)
index 0000000..cbd591a
--- /dev/null
@@ -0,0 +1,142 @@
+require 'integration_helper'
+
+class UserProfileTest < ActionDispatch::IntegrationTest
+  setup do
+    need_javascript
+    @user_profile_form_fields = Rails.configuration.user_profile_form_fields
+  end
+
+  teardown do
+    Rails.configuration.user_profile_form_fields = @user_profile_form_fields
+  end
+
+  def verify_homepage_with_profile user, invited, has_profile
+    profile_config = Rails.configuration.user_profile_form_fields
+
+    if !user
+      assert page.has_text?('Please log in'), 'Not found text - Please log in'
+    elsif user['is_active']
+      if profile_config && !has_profile
+        assert page.has_text?('Save profile'), 'No text - Save profile'
+        add_profile user
+      else
+        assert page.has_text?('Active pipelines'), 'Not found text - Active pipelines'
+        assert page.has_no_text?('Save profile'), 'Found text - Save profile'
+      end
+    elsif invited
+      assert page.has_text?('Please check the box below to indicate that you have read and accepted the user agreement'), 'Not found text - Please check the box below . . .'
+      assert page.has_no_text?('Save profile'), 'Found text - Save profile'
+    else
+      assert page.has_text?('Your account is inactive'), 'Not found text - Your account is inactive'
+      assert page.has_no_text?('Save profile'), 'Found text - Save profile'
+    end
+
+    within('.navbar-fixed-top') do
+      if !user
+        assert page.has_link?('Log in'), 'Not found link - Log in'
+      else
+        # my account menu
+        assert page.has_link?("#{user['email']}"), 'Not found link - email'
+        find('a', text: "#{user['email']}").click
+        within('.dropdown-menu') do
+          if user['is_active']
+            assert page.has_no_link?('Not active'), 'Found link - Not active'
+            assert page.has_no_link?('Sign agreements'), 'Found link - Sign agreements'
+
+            assert page.has_link?('Manage account'), 'No link - Manage account'
+
+            if profile_config
+              assert page.has_link?('Manage profile'), 'No link - Manage profile'
+            else
+              assert page.has_no_link?('Manage profile'), 'Found link - Manage profile'
+            end
+          end
+          assert page.has_link?('Log out'), 'No link - Log out'
+        end
+      end
+    end
+  end
+
+  # Check manage profile page and add missing profile to the user
+  def add_profile user
+    assert page.has_no_text?('My projects'), 'Found text - My projects'
+    assert page.has_no_text?('Projects shared with me'), 'Found text - Projects shared with me'
+
+    assert page.has_text?('Profile'), 'No text - Profile'
+    assert page.has_text?('First name'), 'No text - First name'
+    assert page.has_text?('Last name'), 'No text - Last name'
+    assert page.has_text?('Identity URL'), 'No text - Identity URL'
+    assert page.has_text?('Email'), 'No text - Email'
+    assert page.has_text?(user['email']), 'No text - user email'
+
+    # Using the default profile which has message and one required field
+
+    # Save profile without filling in the required field. Expect to be back in this profile page again
+    click_button "Save profile"
+    assert page.has_text?('Profile'), 'No text - Profile'
+    assert page.has_text?('First name'), 'No text - First name'
+    assert page.has_text?('Last name'), 'No text - Last name'
+    assert page.has_text?('Save profile'), 'No text - Save profile'
+
+    # This time fill in required field and then save. Expect to go to requested page after that.
+    profile_message = Rails.configuration.user_profile_form_message
+    required_field_title = ''
+    required_field_key = ''
+    profile_config = Rails.configuration.user_profile_form_fields
+    profile_config.andand.each do |entry|
+      if entry['required']
+        required_field_key = entry['key']
+        required_field_title = entry['form_field_title']
+      end
+    end
+
+    assert page.has_text? profile_message.gsub(/<.*?>/,'')
+    assert page.has_text?(required_field_title), 'No text - configured required field title'
+
+    page.find_field('user[prefs][:profile][:'+required_field_key+']').set 'value to fill required field'
+
+    click_button "Save profile"
+    # profile saved and in profile page now with success
+    assert page.has_text?('Thank you for filling in your profile'), 'No text - Thank you for filling'
+    click_link 'Back to work!'
+
+    # profile saved and in home page now
+    assert page.has_text?('Active pipelines'), 'No text - Active pipelines'
+  end
+
+  [
+    [nil, nil, false, false],
+    ['inactive', api_fixture('users')['inactive'], true, false],
+    ['inactive_uninvited', api_fixture('users')['inactive_uninvited'], false, false],
+    ['active', api_fixture('users')['active'], true, true],
+    ['admin', api_fixture('users')['admin'], true, true],
+    ['active_no_prefs', api_fixture('users')['active_no_prefs'], true, false],
+    ['active_no_prefs_profile', api_fixture('users')['active_no_prefs_profile'], true, false],
+  ].each do |token, user, invited, has_profile|
+
+    test "visit home page when profile is configured for user #{token}" do
+      # Our test config enabled profile by default. So, no need to update config
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      verify_homepage_with_profile user, invited, has_profile
+    end
+
+    test "visit home page when profile not configured for user #{token}" do
+      Rails.configuration.user_profile_form_fields = false
+
+      if !token
+        visit ('/')
+      else
+        visit page_with_token(token)
+      end
+
+      verify_homepage_with_profile user, invited, has_profile
+    end
+
+  end
+
+end
diff --git a/apps/workbench/test/integration/users_test.rb b/apps/workbench/test/integration/users_test.rb
new file mode 100644 (file)
index 0000000..4a45a6a
--- /dev/null
@@ -0,0 +1,207 @@
+require 'integration_helper'
+
+class UsersTest < ActionDispatch::IntegrationTest
+
+  test "login as active user but not admin" do
+    need_javascript
+    visit page_with_token('active_trustedclient')
+
+    assert page.has_no_link? 'Users' 'Found Users link for non-admin user'
+  end
+
+  test "login as admin user and verify active user data" do
+    need_javascript
+    visit page_with_token('admin_trustedclient')
+
+    # go to Users list page
+    find('#system-menu').click
+    click_link 'Users'
+
+    # check active user attributes in the list page
+    page.within(:xpath, '//tr[@data-object-uuid="zzzzz-tpzed-xurymjxw79nv3jz"]') do
+      assert (text.include? 'true false'), 'Expected is_active'
+    end
+
+    find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+      find('a', text: 'Show').
+      click
+    assert page.has_text? 'Attributes'
+    assert page.has_text? 'Advanced'
+    assert page.has_text? 'Admin'
+
+    # go to the Attributes tab
+    click_link 'Attributes'
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "true", text, "Expected user's is_active to be true"
+    end
+    page.within(:xpath, '//span[@data-name="is_admin"]') do
+      assert_equal "false", text, "Expected user's is_admin to be false"
+    end
+
+  end
+
+  test "create a new user" do
+    need_javascript
+
+    visit page_with_token('admin_trustedclient')
+
+    find('#system-menu').click
+    click_link 'Users'
+
+    assert page.has_text? 'zzzzz-tpzed-d9tiejq69daie8f'
+
+    click_link 'Add a new user'
+
+    within '.modal-content' do
+      find 'label', text: 'Virtual Machine'
+      fill_in "email", :with => "foo@example.com"
+      fill_in "repo_name", :with => "test_repo"
+      click_button "Submit"
+      wait_for_ajax
+    end
+
+    visit '/users'
+
+    # verify that the new user showed up in the users page and find
+    # the new user's UUID
+    new_user_uuid =
+      find('tr[data-object-uuid]', text: 'foo@example.com')['data-object-uuid']
+    assert new_user_uuid, "Expected new user uuid not found"
+
+    # go to the new user's page
+    find('tr', text: new_user_uuid).
+      find('a', text: 'Show').
+      click
+
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "false", text, "Expected new user's is_active to be false"
+    end
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_text? 'Repository: test_repo'
+    assert !(page.has_text? 'VirtualMachine:')
+  end
+
+  test "setup the active user" do
+    need_javascript
+    visit page_with_token('admin_trustedclient')
+
+    find('#system-menu').click
+    click_link 'Users'
+
+    # click on active user
+    find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+      find('a', text: 'Show').
+      click
+    user_url = page.current_url
+
+    # Setup user
+    click_link 'Admin'
+    assert page.has_text? 'As an admin, you can setup'
+
+    click_link 'Setup Active User'
+
+    within '.modal-content' do
+      find 'label', text: 'Virtual Machine'
+      fill_in "repo_name", :with => "test_repo"
+      click_button "Submit"
+    end
+
+    visit user_url
+    assert page.has_text? 'modified_by_client_uuid'
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_text? 'Repository: test_repo'
+    assert !(page.has_text? 'VirtualMachine:')
+
+    # Click on Setup button again and this time also choose a VM
+    click_link 'Admin'
+    click_link 'Setup Active User'
+
+    within '.modal-content' do
+      fill_in "repo_name", :with => "second_test_repo"
+      select("testvm.shell", :from => 'vm_uuid')
+      click_button "Submit"
+    end
+
+    visit user_url
+    find '#Attributes', text: 'modified_by_client_uuid'
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_text? 'Repository: second_test_repo'
+    assert page.has_text? 'VirtualMachine: testvm.shell'
+  end
+
+  test "unsetup active user" do
+    need_javascript
+
+    visit page_with_token('admin_trustedclient')
+
+    find('#system-menu').click
+    click_link 'Users'
+
+    # click on active user
+    find('tr', text: 'zzzzz-tpzed-xurymjxw79nv3jz').
+      find('a', text: 'Show').
+      click
+    user_url = page.current_url
+
+    # Verify that is_active is set
+    find('a,button', text: 'Attributes').click
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "true", text, "Expected user's is_active to be true"
+    end
+
+    # go to Admin tab
+    click_link 'Admin'
+    assert page.has_text? 'As an admin, you can deactivate and reset this user'
+
+    # unsetup user and verify all the above links are deleted
+    click_link 'Admin'
+    click_button 'Deactivate Active User'
+
+    if Capybara.current_driver == :selenium
+      sleep(0.1)
+      page.driver.browser.switch_to.alert.accept
+    else
+      # poltergeist returns true for confirm(), so we don't need to accept.
+    end
+
+    # Should now be back in the Attributes tab for the user
+    assert page.has_text? 'modified_by_user_uuid'
+    page.within(:xpath, '//span[@data-name="is_active"]') do
+      assert_equal "false", text, "Expected user's is_active to be false after unsetup"
+    end
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert !(page.has_text? 'Repository: test_repo')
+    assert !(page.has_text? 'Repository: second_test_repo')
+    assert !(page.has_text? 'VirtualMachine: testvm.shell')
+
+    # setup user again and verify links present
+    click_link 'Admin'
+    click_link 'Setup Active User'
+
+    within '.modal-content' do
+      fill_in "repo_name", :with => "second_test_repo"
+      select("testvm.shell", :from => 'vm_uuid')
+      click_button "Submit"
+    end
+
+    visit user_url
+    assert page.has_text? 'modified_by_client_uuid'
+
+    click_link 'Advanced'
+    click_link 'Metadata'
+    assert page.has_text? 'Repository: second_test_repo'
+    assert page.has_text? 'VirtualMachine: testvm.shell'
+  end
+
+end
diff --git a/apps/workbench/test/integration/virtual_machines_test.rb b/apps/workbench/test/integration/virtual_machines_test.rb
new file mode 100644 (file)
index 0000000..1d398a5
--- /dev/null
@@ -0,0 +1,18 @@
+require 'integration_helper'
+
+class VirtualMachinesTest < ActionDispatch::IntegrationTest
+  test "make and name a new virtual machine" do
+    need_javascript
+    visit page_with_token('admin_trustedclient')
+    find('#system-menu').click
+    click_link 'Virtual machines'
+    assert page.has_text? 'testvm.shell'
+    click_on 'Add a new virtual machine'
+    find('tr', text: 'hostname').
+      find('a[data-original-title=edit]').click
+    assert page.has_text? 'Edit hostname'
+    fill_in 'editable-text', with: 'testname'
+    click_button 'editable-submit'
+    assert page.has_text? 'testname'
+  end
+end
diff --git a/apps/workbench/test/integration/websockets_test.rb b/apps/workbench/test/integration/websockets_test.rb
new file mode 100644 (file)
index 0000000..efc2539
--- /dev/null
@@ -0,0 +1,196 @@
+require 'integration_helper'
+
+class WebsocketTest < ActionDispatch::IntegrationTest
+  setup do
+    need_selenium "to make websockets work"
+  end
+
+  test "test page" do
+    visit(page_with_token("admin", "/websockets"))
+    fill_in("websocket-message-content", :with => "Stuff")
+    click_button("Send")
+    assert_text '"status":400'
+  end
+
+  test "test live logging" do
+    visit(page_with_token("admin", "/pipeline_instances/zzzzz-d1hrv-9fm8l10i9z2kqc6"))
+    click_link("Log")
+    assert_no_text '123 hello'
+
+    api = ArvadosApiClient.new
+
+    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
+    api.api("logs", "", {log: {
+                object_uuid: "zzzzz-d1hrv-9fm8l10i9z2kqc6",
+                event_type: "stderr",
+                properties: {"text" => "123 hello"}}})
+    assert_text '123 hello'
+    Thread.current[:arvados_api_token] = nil
+  end
+
+
+  [["pipeline_instances", api_fixture("pipeline_instances")['pipeline_with_newer_template']['uuid']],
+   ["jobs", api_fixture("jobs")['running']['uuid']]].each do |c|
+    test "test live logging scrolling #{c[0]}" do
+
+      controller = c[0]
+      uuid = c[1]
+
+      visit(page_with_token("admin", "/#{controller}/#{uuid}"))
+      click_link("Log")
+      assert_no_text '123 hello'
+
+      api = ArvadosApiClient.new
+
+      text = ""
+      (1..1000).each do |i|
+        text << "#{i} hello\n"
+      end
+
+      Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
+      api.api("logs", "", {log: {
+                  object_uuid: uuid,
+                  event_type: "stderr",
+                  properties: {"text" => text}}})
+      assert_text '1000 hello'
+
+      # First test that when we're already at the bottom of the page, it scrolls down
+      # when a new line is added.
+      old_top = page.evaluate_script("$('#event_log_div').scrollTop()")
+
+      api.api("logs", "", {log: {
+                  object_uuid: uuid,
+                  event_type: "stderr",
+                  properties: {"text" => "1001 hello\n"}}})
+      assert_text '1001 hello'
+
+      # Check that new value of scrollTop is greater than the old one
+      assert page.evaluate_script("$('#event_log_div').scrollTop()") > old_top
+
+      # Now scroll to 30 pixels from the top
+      page.execute_script "$('#event_log_div').scrollTop(30)"
+      assert_equal 30, page.evaluate_script("$('#event_log_div').scrollTop()")
+
+      api.api("logs", "", {log: {
+                  object_uuid: uuid,
+                  event_type: "stderr",
+                  properties: {"text" => "1002 hello\n"}}})
+      assert_text '1002 hello'
+
+      # Check that we haven't changed scroll position
+      assert_equal 30, page.evaluate_script("$('#event_log_div').scrollTop()")
+
+      Thread.current[:arvados_api_token] = nil
+    end
+  end
+
+  test "pipeline instance arv-refresh-on-log-event" do
+    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
+    # Do something and check that the pane reloads.
+    p = PipelineInstance.create({state: "RunningOnServer",
+                                  components: {
+                                    c1: {
+                                      script: "test_hash.py",
+                                      script_version: "1de84a854e2b440dc53bf42f8548afa4c17da332"
+                                    }
+                                  }
+                                })
+
+    visit(page_with_token("admin", "/pipeline_instances/#{p.uuid}"))
+
+    assert_text 'Active'
+    assert page.has_link? 'Pause'
+    assert_no_text 'Complete'
+    assert page.has_no_link? 'Re-run with latest'
+
+    p.state = "Complete"
+    p.save!
+
+    assert_no_text 'Active'
+    assert page.has_no_link? 'Pause'
+    assert_text 'Complete'
+    assert page.has_link? 'Re-run with latest'
+
+    Thread.current[:arvados_api_token] = nil
+  end
+
+  test "job arv-refresh-on-log-event" do
+    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
+    # Do something and check that the pane reloads.
+    p = Job.where(uuid: api_fixture('jobs')['running_will_be_completed']['uuid']).results.first
+
+    visit(page_with_token("admin", "/jobs/#{p.uuid}"))
+
+    assert_no_text 'complete'
+    assert_no_text 'Re-run same version'
+
+    p.state = "Complete"
+    p.save!
+
+    assert_text 'complete'
+    assert_text 'Re-run same version'
+
+    Thread.current[:arvados_api_token] = nil
+  end
+
+  test "dashboard arv-refresh-on-log-event" do
+    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
+
+    visit(page_with_token("admin", "/"))
+
+    assert_no_text 'test dashboard arv-refresh-on-log-event'
+
+    # Do something and check that the pane reloads.
+    p = PipelineInstance.create({state: "RunningOnServer",
+                                  name: "test dashboard arv-refresh-on-log-event",
+                                  components: {
+                                  }
+                                })
+
+    assert_text 'test dashboard arv-refresh-on-log-event'
+
+    Thread.current[:arvados_api_token] = nil
+  end
+
+  test "live log charting" do
+    uuid = api_fixture("jobs")['running']['uuid']
+
+    visit page_with_token "admin", "/jobs/#{uuid}"
+    click_link "Log"
+
+    api = ArvadosApiClient.new
+
+    # should give 45.3% or (((36.39+0.86)/10.0002)/8)*100 rounded to 1 decimal place
+    text = "2014-11-07_23:33:51 #{uuid} 31708 1 stderr crunchstat: cpu 1970.8200 user 60.2700 sys 8 cpus -- interval 10.0002 seconds 35.3900 user 0.8600 sys"
+
+    Thread.current[:arvados_api_token] = @@API_AUTHS["admin"]['api_token']
+    api.api("logs", "", {log: {
+                object_uuid: uuid,
+                event_type: "stderr",
+                properties: {"text" => text}}})
+    wait_for_ajax
+
+    # using datapoint 1 instead of datapoint 0 because there will be a "dummy" datapoint with no actual stats 10 minutes previous to the one we're looking for, for the sake of making the x-axis of the graph show a full 10 minutes of time even though there is only a single real datapoint
+    cpu_stat = page.evaluate_script("jobGraphData[1]['T1-cpu']")
+
+    assert_equal 45.3, (cpu_stat.to_f*100).round(1)
+
+    Thread.current[:arvados_api_token] = nil
+  end
+
+  test "live log charting from replayed log" do
+    uuid = api_fixture("jobs")['running']['uuid']
+
+    visit page_with_token "admin", "/jobs/#{uuid}"
+    click_link "Log"
+
+    ApiServerForTests.new.run_rake_task("replay_job_log", "test/job_logs/crunchstatshort.log,1.0,#{uuid}")
+    wait_for_ajax
+
+    # see above comment as to why we use datapoint 1 rather than 0
+    cpu_stat = page.evaluate_script("jobGraphData[1]['T1-cpu']")
+
+    assert_equal 45.3, (cpu_stat.to_f*100).round(1)
+  end
+
+end
diff --git a/apps/workbench/test/integration_helper.rb b/apps/workbench/test/integration_helper.rb
new file mode 100644 (file)
index 0000000..cb07725
--- /dev/null
@@ -0,0 +1,142 @@
+require 'test_helper'
+require 'capybara/rails'
+require 'capybara/poltergeist'
+require 'uri'
+require 'yaml'
+
+Capybara.register_driver :poltergeist do |app|
+  Capybara::Poltergeist::Driver.new app, {
+    window_size: [1200, 800],
+    phantomjs_options: ['--ignore-ssl-errors=true'],
+    inspector: true,
+  }
+end
+
+module WaitForAjax
+  Capybara.default_wait_time = 5
+  def wait_for_ajax
+    Timeout.timeout(Capybara.default_wait_time) do
+      loop until finished_all_ajax_requests?
+    end
+  end
+
+  def finished_all_ajax_requests?
+    page.evaluate_script('jQuery.active').zero?
+  end
+end
+
+module AssertDomEvent
+  # Yield the supplied block, then wait for an event to arrive at a
+  # DOM element.
+  def assert_triggers_dom_event events, target='body'
+    magic = 'received-dom-event-' + rand(2**30).to_s(36)
+    page.evaluate_script <<eos
+      $('#{target}').one('#{events}', function() {
+        $('body').addClass('#{magic}');
+      });
+eos
+    yield
+    assert_selector "body.#{magic}"
+    page.evaluate_script "$('body').removeClass('#{magic}');";
+  end
+end
+
+module HeadlessHelper
+  class HeadlessSingleton
+    def self.get
+      @headless ||= Headless.new reuse: false
+    end
+  end
+
+  Capybara.default_driver = :rack_test
+
+  def self.included base
+    base.class_eval do
+      setup do
+        Capybara.use_default_driver
+        @headless = false
+      end
+
+      teardown do
+        if @headless
+          @headless.stop
+          @headless = false
+        end
+      end
+    end
+  end
+
+  def need_selenium reason=nil
+    Capybara.current_driver = :selenium
+    unless ENV['ARVADOS_TEST_HEADFUL'] or @headless
+      @headless = HeadlessSingleton.get
+      @headless.start
+    end
+  end
+
+  def need_javascript reason=nil
+    unless Capybara.current_driver == :selenium
+      Capybara.current_driver = :poltergeist
+    end
+  end
+end
+
+class ActionDispatch::IntegrationTest
+  # Make the Capybara DSL available in all integration tests
+  include Capybara::DSL
+  include ApiFixtureLoader
+  include WaitForAjax
+  include AssertDomEvent
+  include HeadlessHelper
+
+  @@API_AUTHS = self.api_fixture('api_client_authorizations')
+
+  def page_with_token(token, path='/')
+    # Generate a page path with an embedded API token.
+    # Typical usage: visit page_with_token('token_name', page)
+    # The token can be specified by the name of an api_client_authorizations
+    # fixture, or passed as a raw string.
+    api_token = ((@@API_AUTHS.include? token) ?
+                 @@API_AUTHS[token]['api_token'] : token)
+    path_parts = path.partition("#")
+    sep = (path_parts.first.include? '?') ? '&' : '?'
+    q_string = URI.encode_www_form('api_token' => api_token)
+    path_parts.insert(1, "#{sep}#{q_string}")
+    path_parts.join("")
+  end
+
+  # Find a page element, but return false instead of raising an
+  # exception if not found. Use this with assertions to explain that
+  # the error signifies a failed test rather than an unexpected error
+  # during a testing procedure.
+  def find? *args
+    begin
+      find *args
+    rescue Capybara::ElementNotFound
+      false
+    end
+  end
+
+  @@screenshot_count = 1
+  def screenshot
+    image_file = "./tmp/workbench-fail-#{@@screenshot_count}.png"
+    begin
+      page.save_screenshot image_file
+    rescue Capybara::NotSupportedByDriverError
+      # C'est la vie.
+    else
+      puts "Saved #{image_file}"
+      @@screenshot_count += 1
+    end
+  end
+
+  teardown do
+    if not passed?
+      screenshot
+    end
+    if Capybara.current_driver == :selenium
+      page.execute_script("window.localStorage.clear()")
+    end
+    Capybara.reset_sessions!
+  end
+end
diff --git a/apps/workbench/test/performance/browsing_test.rb b/apps/workbench/test/performance/browsing_test.rb
new file mode 100644 (file)
index 0000000..f15e3ea
--- /dev/null
@@ -0,0 +1,46 @@
+# http://guides.rubyonrails.org/v3.2.13/performance_testing.html
+
+require 'test_helper'
+require 'rails/performance_test_help'
+require 'performance_test_helper'
+require 'selenium-webdriver'
+require 'headless'
+
+class BrowsingTest < WorkbenchPerformanceTest
+  self.profile_options = { :runs => 5,
+                           :metrics => [:wall_time],
+                           :output => 'tmp/performance',
+                           :formats => [:flat] }
+
+  setup do
+    need_javascript
+  end
+
+  test "home page" do
+    visit_page_with_token
+    wait_for_ajax
+    assert_text 'Dashboard'
+    assert_selector 'a', text: 'Run a pipeline'
+  end
+
+  test "search for hash" do
+    visit_page_with_token
+    wait_for_ajax
+    assert_text 'Dashboard'
+
+    within('.navbar-fixed-top') do
+      page.find_field('search').set('hash')
+      wait_for_ajax
+      page.find('.glyphicon-search').click
+    end
+
+    # In the search dialog now. Expect at least one item in the result display.
+    within '.modal-content' do
+      wait_for_ajax
+      assert_text 'All projects'
+      assert_text 'Search'
+      assert(page.has_selector?(".selectable[data-object-uuid]"))
+      click_button 'Cancel'
+    end
+  end
+end
diff --git a/apps/workbench/test/performance_test_helper.rb b/apps/workbench/test/performance_test_helper.rb
new file mode 100644 (file)
index 0000000..7d335d8
--- /dev/null
@@ -0,0 +1,32 @@
+require 'integration_helper'
+
+# Performance test can run in two two different ways:
+#
+# 1. Similar to other integration tests using the command:
+#     RAILS_ENV=test bundle exec rake test:benchmark
+#
+# 2. Against a configured workbench url using "RAILS_ENV=performance".
+#     RAILS_ENV=performance bundle exec rake test:benchmark
+
+class WorkbenchPerformanceTest < ActionDispatch::PerformanceTest
+
+  # When running in "RAILS_ENV=performance" mode, uses performance
+  # config params.  In this mode, prepends workbench URL to the given
+  # path provided, and visits that page using the configured
+  # "user_token".
+  def visit_page_with_token path='/'
+    if Rails.env == 'performance'
+      token = Rails.configuration.user_token
+      workbench_url = Rails.configuration.arvados_workbench_url
+      if workbench_url.end_with? '/'
+        workbench_url = workbench_url[0, workbench_url.size-1]
+      end
+    else
+      token = 'active'
+      workbench_url = ''
+    end
+
+    visit page_with_token(token, (workbench_url + path))
+  end
+
+end
diff --git a/apps/workbench/test/test_helper.rb b/apps/workbench/test/test_helper.rb
new file mode 100644 (file)
index 0000000..2b480f9
--- /dev/null
@@ -0,0 +1,313 @@
+ENV["RAILS_ENV"] = "test" if (ENV["RAILS_ENV"] != "diagnostics" and ENV["RAILS_ENV"] != "performance")
+
+unless ENV["NO_COVERAGE_TEST"]
+  begin
+    require 'simplecov'
+    require 'simplecov-rcov'
+    class SimpleCov::Formatter::MergedFormatter
+      def format(result)
+        SimpleCov::Formatter::HTMLFormatter.new.format(result)
+        SimpleCov::Formatter::RcovFormatter.new.format(result)
+      end
+    end
+    SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+    SimpleCov.start do
+      add_filter '/test/'
+      add_filter 'initializers/secret_token'
+    end
+  rescue Exception => e
+    $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+  end
+end
+
+require File.expand_path('../../config/environment', __FILE__)
+require 'rails/test_help'
+require 'mocha/mini_test'
+
+class ActiveSupport::TestCase
+  # Setup all fixtures in test/fixtures/*.(yml|csv) for all tests in
+  # alphabetical order.
+  #
+  # Note: You'll currently still have to declare fixtures explicitly
+  # in integration tests -- they do not yet inherit this setting
+  fixtures :all
+  def use_token token_name
+    auth = api_fixture('api_client_authorizations')[token_name.to_s]
+    Thread.current[:arvados_api_token] = auth['api_token']
+  end
+
+  teardown do
+    Thread.current[:arvados_api_token] = nil
+    Thread.current[:user] = nil
+    Thread.current[:reader_tokens] = nil
+    # Diagnostics suite doesn't run a server, so there's no cache to clear.
+    Rails.cache.clear unless (Rails.env == "diagnostics")
+    # Restore configuration settings changed during tests
+    $application_config.each do |k,v|
+      if k.match /^[^.]*$/
+        Rails.configuration.send (k + '='), v
+      end
+    end
+  end
+end
+
+module ApiFixtureLoader
+  def self.included(base)
+    base.extend(ClassMethods)
+  end
+
+  module ClassMethods
+    @@api_fixtures = {}
+    def api_fixture(name, *keys)
+      # Returns the data structure from the named API server test fixture.
+      @@api_fixtures[name] ||= \
+      begin
+        path = File.join(ApiServerForTests::ARV_API_SERVER_DIR,
+                         'test', 'fixtures', "#{name}.yml")
+        file = IO.read(path)
+        trim_index = file.index('# Test Helper trims the rest of the file')
+        file = file[0, trim_index] if trim_index
+        YAML.load(file)
+      end
+      keys.inject(@@api_fixtures[name]) { |hash, key| hash[key] }
+    end
+  end
+  def api_fixture(name, *keys)
+    self.class.api_fixture(name, *keys)
+  end
+
+  def find_fixture(object_class, name)
+    object_class.find(api_fixture(object_class.to_s.pluralize.underscore,
+                                  name, "uuid"))
+  end
+end
+
+class ActiveSupport::TestCase
+  include ApiFixtureLoader
+  def session_for api_client_auth_name
+    {
+      arvados_api_token: api_fixture('api_client_authorizations')[api_client_auth_name.to_s]['api_token']
+    }
+  end
+  def json_response
+    Oj.load(@response.body)
+  end
+end
+
+class ApiServerForTests
+  ARV_API_SERVER_DIR = File.expand_path('../../../../services/api', __FILE__)
+  SERVER_PID_PATH = File.expand_path('tmp/pids/wbtest-server.pid', ARV_API_SERVER_DIR)
+  WEBSOCKET_PID_PATH = File.expand_path('tmp/pids/wstest-server.pid', ARV_API_SERVER_DIR)
+  @main_process_pid = $$
+
+  def _system(*cmd)
+    $stderr.puts "_system #{cmd.inspect}"
+    Bundler.with_clean_env do
+      if not system({'RAILS_ENV' => 'test', "ARVADOS_WEBSOCKETS" => (if @websocket then "ws-only" end)}, *cmd)
+        raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+      end
+    end
+  end
+
+  def make_ssl_cert
+    unless File.exists? './self-signed.key'
+      _system('openssl', 'req', '-new', '-x509', '-nodes',
+              '-out', './self-signed.pem',
+              '-keyout', './self-signed.key',
+              '-days', '3650',
+              '-subj', '/CN=localhost')
+    end
+  end
+
+  def kill_server
+    if (pid = find_server_pid)
+      $stderr.puts "Sending TERM to API server, pid #{pid}"
+      Process.kill 'TERM', pid
+    end
+  end
+
+  def find_server_pid
+    pid = nil
+    begin
+      pid = IO.read(@pidfile).to_i
+      $stderr.puts "API server is running, pid #{pid.inspect}"
+    rescue Errno::ENOENT
+    end
+    return pid
+  end
+
+  def run(args=[])
+    ::MiniTest.after_run do
+      self.kill_server
+    end
+
+    @websocket = args.include?("--websockets")
+
+    @pidfile = if @websocket
+                 WEBSOCKET_PID_PATH
+               else
+                 SERVER_PID_PATH
+               end
+
+    # Kill server left over from previous test run
+    self.kill_server
+
+    Capybara.javascript_driver = :poltergeist
+    Dir.chdir(ARV_API_SERVER_DIR) do |apidir|
+      ENV["NO_COVERAGE_TEST"] = "1"
+      if @websocket
+        _system('bundle', 'exec', 'passenger', 'start', '-d', '-p3333',
+                '--pid-file', @pidfile)
+      else
+        make_ssl_cert
+        _system('bundle', 'exec', 'rake', 'db:test:load')
+        _system('bundle', 'exec', 'rake', 'db:fixtures:load')
+        _system('bundle', 'exec', 'passenger', 'start', '-d', '-p3000',
+                '--pid-file', @pidfile,
+                '--ssl',
+                '--ssl-certificate', 'self-signed.pem',
+                '--ssl-certificate-key', 'self-signed.key')
+      end
+      timeout = Time.now.tv_sec + 10
+      good_pid = false
+      while (not good_pid) and (Time.now.tv_sec < timeout)
+        sleep 0.2
+        server_pid = find_server_pid
+        good_pid = (server_pid and
+                    (server_pid > 0) and
+                    (Process.kill(0, server_pid) rescue false))
+      end
+      if not good_pid
+        raise RuntimeError, "could not find API server Rails pid"
+      end
+    end
+  end
+
+  def run_rake_task(task_name, arg_string)
+    Dir.chdir(ARV_API_SERVER_DIR) do
+      _system('bundle', 'exec', 'rake', "#{task_name}[#{arg_string}]")
+    end
+  end
+end
+
+class ActionController::TestCase
+  setup do
+    @counter = 0
+  end
+
+  def check_counter action
+    @counter += 1
+    if @counter == 2
+      assert_equal 1, 2, "Multiple actions in controller test"
+    end
+  end
+
+  [:get, :post, :put, :patch, :delete].each do |method|
+    define_method method do |action, *args|
+      check_counter action
+      super action, *args
+    end
+  end
+end
+
+# Test classes can call reset_api_fixtures(when_to_reset,flag) to
+# override the default. Example:
+#
+# class MySuite < ActionDispatch::IntegrationTest
+#   reset_api_fixtures :after_each_test, false
+#   reset_api_fixtures :after_suite, true
+#   ...
+# end
+#
+# The default behavior is reset_api_fixtures(:after_each_test,true).
+#
+class ActiveSupport::TestCase
+
+  def self.inherited subclass
+    subclass.class_eval do
+      class << self
+        attr_accessor :want_reset_api_fixtures
+      end
+      @want_reset_api_fixtures = {
+        after_each_test: true,
+        after_suite: false,
+        before_suite: false,
+      }
+    end
+    super
+  end
+  # Existing subclasses of ActiveSupport::TestCase (ones that already
+  # existed before we set up the self.inherited hook above) will not
+  # get their own instance variable. They're not real test cases
+  # anyway, so we give them a "don't reset anywhere" stub.
+  def self.want_reset_api_fixtures
+    {}
+  end
+
+  def self.reset_api_fixtures where, t=true
+    if not want_reset_api_fixtures.has_key? where
+      raise ArgumentError, "There is no #{where.inspect} hook"
+    end
+    self.want_reset_api_fixtures[where] = t
+  end
+
+  def self.run *args
+    reset_api_fixtures_now if want_reset_api_fixtures[:before_suite]
+    result = super
+    reset_api_fixtures_now if want_reset_api_fixtures[:after_suite]
+    result
+  end
+
+  def after_teardown
+    if self.class.want_reset_api_fixtures[:after_each_test]
+      self.class.reset_api_fixtures_now
+    end
+    super
+  end
+
+  protected
+  def self.reset_api_fixtures_now
+    # Never try to reset fixtures when we're just using test
+    # infrastructure to run performance/diagnostics suites.
+    return unless Rails.env == 'test'
+
+    auth = api_fixture('api_client_authorizations')['admin_trustedclient']
+    Thread.current[:arvados_api_token] = auth['api_token']
+    ArvadosApiClient.new.api(nil, '../../database/reset', {})
+    Thread.current[:arvados_api_token] = nil
+  end
+end
+
+# If it quacks like a duck, it must be a HTTP request object.
+class RequestDuck
+  def self.host
+    "localhost"
+  end
+
+  def self.port
+    8080
+  end
+
+  def self.protocol
+    "http"
+  end
+end
+
+# Example:
+#
+# apps/workbench$ RAILS_ENV=test bundle exec irb -Ilib:test
+# > load 'test/test_helper.rb'
+# > singletest 'integration/collection_upload_test.rb', 'Upload two empty files'
+#
+def singletest test_class_file, test_name
+  load File.join('test', test_class_file)
+  Minitest.run ['-v', '-n', "test_#{test_name.gsub ' ', '_'}"]
+  Object.send(:remove_const,
+              test_class_file.gsub(/.*\/|\.rb$/, '').camelize.to_sym)
+  ::Minitest::Runnable.runnables.reject! { true }
+end
+
+if ENV["RAILS_ENV"].eql? 'test'
+  ApiServerForTests.new.run
+  ApiServerForTests.new.run ["--websockets"]
+end
diff --git a/apps/workbench/test/unit/.gitkeep b/apps/workbench/test/unit/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/test/unit/arvados_resource_list_test.rb b/apps/workbench/test/unit/arvados_resource_list_test.rb
new file mode 100644 (file)
index 0000000..a3bfbc1
--- /dev/null
@@ -0,0 +1,94 @@
+require 'test_helper'
+
+class ResourceListTest < ActiveSupport::TestCase
+
+  test 'links_for on a resource list that does not return links' do
+    use_token :active
+    results = Specimen.all
+    assert_equal [], results.links_for(api_fixture('users')['active']['uuid'])
+  end
+
+  test 'get all items by default' do
+    use_token :admin
+    a = 0
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').each do
+      a += 1
+    end
+    assert_equal 201, a
+  end
+
+  test 'prefetch all items' do
+    use_token :admin
+    a = 0
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').each do
+      a += 1
+    end
+    assert_equal 201, a
+  end
+
+  test 'get limited items' do
+    use_token :admin
+    a = 0
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').limit(51).each do
+      a += 1
+    end
+    assert_equal 51, a
+  end
+
+  test 'get limited items, limit % page_size != 0' do
+    skip "Requires server MAX_LIMIT < 200 which is not currently the default"
+
+    use_token :admin
+    max_page_size = Collection.
+      where(owner_uuid: 'zzzzz-j7d0g-0201collections').
+      limit(1000000000).
+      fetch_multiple_pages(false).
+      count
+    # Conditions necessary for this test to be valid:
+    assert_operator 200, :>, max_page_size
+    assert_operator 1, :<, max_page_size
+    # Verify that the server really sends max_page_size when asked for max_page_size+1
+    assert_equal max_page_size, Collection.
+      where(owner_uuid: 'zzzzz-j7d0g-0201collections').
+      limit(max_page_size+1).
+      fetch_multiple_pages(false).
+      results.
+      count
+    # Now that we know the max_page_size+1 is in the middle of page 2,
+    # make sure #each returns page 1 and only the requested part of
+    # page 2.
+    a = 0
+    saw_uuid = {}
+    Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').limit(max_page_size+1).each do |item|
+      a += 1
+      saw_uuid[item.uuid] = true
+    end
+    assert_equal max_page_size+1, a
+    # Ensure no overlap between pages
+    assert_equal max_page_size+1, saw_uuid.size
+  end
+
+  test 'get single page of items' do
+    use_token :admin
+    a = 0
+    c = Collection.where(owner_uuid: 'zzzzz-j7d0g-0201collections').fetch_multiple_pages(false)
+    c.each do
+      a += 1
+    end
+
+    assert_operator a, :<, 201
+    assert_equal c.result_limit, a
+  end
+
+  test 'get empty set' do
+    use_token :admin
+    c = Collection.
+      where(owner_uuid: 'doesn-texis-tdoesntexistdoe').
+      fetch_multiple_pages(false)
+    # Important: check c.result_offset before calling c.results here.
+    assert_equal 0, c.result_offset
+    assert_equal 0, c.items_available
+    assert_empty c.results
+  end
+
+end
diff --git a/apps/workbench/test/unit/collection_test.rb b/apps/workbench/test/unit/collection_test.rb
new file mode 100644 (file)
index 0000000..e71f966
--- /dev/null
@@ -0,0 +1,74 @@
+require 'test_helper'
+
+class CollectionTest < ActiveSupport::TestCase
+  test 'recognize empty blob locator' do
+    ['d41d8cd98f00b204e9800998ecf8427e+0',
+     'd41d8cd98f00b204e9800998ecf8427e',
+     'd41d8cd98f00b204e9800998ecf8427e+0+Xyzzy'].each do |x|
+      assert_equal true, Collection.is_empty_blob_locator?(x)
+    end
+    ['d41d8cd98f00b204e9800998ecf8427e0',
+     'acbd18db4cc2f85cedef654fccc4a4d8+3',
+     'acbd18db4cc2f85cedef654fccc4a4d8+0'].each do |x|
+      assert_equal false, Collection.is_empty_blob_locator?(x)
+    end
+  end
+
+  def get_files_tree(coll_name)
+    use_token :admin
+    Collection.find(api_fixture('collections')[coll_name]['uuid']).files_tree
+  end
+
+  test "easy files_tree" do
+    files_in = lambda do |dirname|
+      (1..3).map { |n| [dirname, "file#{n}", 0] }
+    end
+    assert_equal([['.', 'dir1', nil], ['./dir1', 'subdir', nil]] +
+                 files_in['./dir1/subdir'] + files_in['./dir1'] +
+                 [['.', 'dir2', nil]] + files_in['./dir2'] + files_in['.'],
+                 get_files_tree('multilevel_collection_1'),
+                 "Collection file tree was malformed")
+  end
+
+  test "files_tree with files deep in subdirectories" do
+    # This test makes sure files_tree generates synthetic directory entries.
+    # The manifest doesn't list directories with no files.
+    assert_equal([['.', 'dir1', nil], ['./dir1', 'sub1', nil],
+                  ['./dir1/sub1', 'a', 0], ['./dir1/sub1', 'b', 0],
+                  ['.', 'dir2', nil], ['./dir2', 'sub2', nil],
+                  ['./dir2/sub2', 'c', 0], ['./dir2/sub2', 'd', 0]],
+                 get_files_tree('multilevel_collection_2'),
+                 "Collection file tree was malformed")
+  end
+
+  test "portable_data_hash never editable" do
+    refute(Collection.new.attribute_editable?("portable_data_hash", :ever))
+  end
+
+  test "admin can edit name" do
+    use_token :admin
+    assert(find_fixture(Collection, "foo_file").attribute_editable?("name"),
+           "admin not allowed to edit collection name")
+  end
+
+  test "project owner can edit name" do
+    use_token :active
+    assert(find_fixture(Collection, "foo_collection_in_aproject")
+             .attribute_editable?("name"),
+           "project owner not allowed to edit collection name")
+  end
+
+  test "project admin can edit name" do
+    use_token :subproject_admin
+    assert(find_fixture(Collection, "baz_file_in_asubproject")
+             .attribute_editable?("name"),
+           "project admin not allowed to edit collection name")
+  end
+
+  test "project viewer cannot edit name" do
+    use_token :project_viewer
+    refute(find_fixture(Collection, "foo_collection_in_aproject")
+             .attribute_editable?("name"),
+           "project viewer allowed to edit collection name")
+  end
+end
diff --git a/apps/workbench/test/unit/group_test.rb b/apps/workbench/test/unit/group_test.rb
new file mode 100644 (file)
index 0000000..4a4530c
--- /dev/null
@@ -0,0 +1,40 @@
+require 'test_helper'
+
+class GroupTest < ActiveSupport::TestCase
+  test "get contents with names" do
+    use_token :active
+    oi = Group.
+      find(api_fixture('groups')['asubproject']['uuid']).
+      contents()
+    assert_operator(0, :<, oi.count,
+                    "Expected to find some items belonging to :active user")
+    assert_operator(0, :<, oi.items_available,
+                    "Expected contents response to have items_available > 0")
+    oi_uuids = oi.collect { |i| i['uuid'] }
+
+    expect_uuid = api_fixture('specimens')['in_asubproject']['uuid']
+    assert_includes(oi_uuids, expect_uuid,
+                    "Expected '#{expect_uuid}' in asubproject's contents")
+  end
+
+  test "can select specific group columns" do
+    use_token :admin
+    Group.select(["uuid", "name"]).limit(5).each do |user|
+      assert_not_nil user.uuid
+      assert_not_nil user.name
+      assert_nil user.owner_uuid
+    end
+  end
+
+  test "project editable by its admin" do
+    use_token :subproject_admin
+    project = Group.find(api_fixture("groups")["asubproject"]["uuid"])
+    assert(project.editable?, "project not editable by admin")
+  end
+
+  test "project not editable by reader" do
+    use_token :project_viewer
+    project = Group.find(api_fixture("groups")["aproject"]["uuid"])
+    refute(project.editable?, "project editable by reader")
+  end
+end
diff --git a/apps/workbench/test/unit/helpers/api_client_authorizations_helper_test.rb b/apps/workbench/test/unit/helpers/api_client_authorizations_helper_test.rb
new file mode 100644 (file)
index 0000000..4225e04
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class ApiClientAuthorizationsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/authorized_keys_helper_test.rb b/apps/workbench/test/unit/helpers/authorized_keys_helper_test.rb
new file mode 100644 (file)
index 0000000..ced3b29
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class AuthorizedKeysHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/collections_helper_test.rb b/apps/workbench/test/unit/helpers/collections_helper_test.rb
new file mode 100644 (file)
index 0000000..56d23c5
--- /dev/null
@@ -0,0 +1,12 @@
+require 'test_helper'
+
+class CollectionsHelperTest < ActionView::TestCase
+  test "file_path generates short names" do
+    assert_equal('foo', CollectionsHelper.file_path(['.', 'foo', 0]),
+                 "wrong result for filename in collection root")
+    assert_equal('foo/bar', CollectionsHelper.file_path(['foo', 'bar', 0]),
+                 "wrong result for filename in directory without leading .")
+    assert_equal('foo/bar', CollectionsHelper.file_path(['./foo', 'bar', 0]),
+                 "wrong result for filename in directory with leading .")
+  end
+end
diff --git a/apps/workbench/test/unit/helpers/groups_helper_test.rb b/apps/workbench/test/unit/helpers/groups_helper_test.rb
new file mode 100644 (file)
index 0000000..a591e4e
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class ProjectsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/humans_helper_test.rb b/apps/workbench/test/unit/helpers/humans_helper_test.rb
new file mode 100644 (file)
index 0000000..8c515d6
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class HumansHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/job_tasks_helper_test.rb b/apps/workbench/test/unit/helpers/job_tasks_helper_test.rb
new file mode 100644 (file)
index 0000000..f53621c
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class JobTasksHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/jobs_helper_test.rb b/apps/workbench/test/unit/helpers/jobs_helper_test.rb
new file mode 100644 (file)
index 0000000..7c4a3fd
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class JobsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/keep_disks_helper_test.rb b/apps/workbench/test/unit/helpers/keep_disks_helper_test.rb
new file mode 100644 (file)
index 0000000..a3b064e
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class KeepDisksHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/links_helper_test.rb b/apps/workbench/test/unit/helpers/links_helper_test.rb
new file mode 100644 (file)
index 0000000..d50e16c
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class MetadataHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/logs_helper_test.rb b/apps/workbench/test/unit/helpers/logs_helper_test.rb
new file mode 100644 (file)
index 0000000..c165554
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class LogsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/nodes_helper_test.rb b/apps/workbench/test/unit/helpers/nodes_helper_test.rb
new file mode 100644 (file)
index 0000000..13011de
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class NodesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/pipeline_instances_helper_test.rb b/apps/workbench/test/unit/helpers/pipeline_instances_helper_test.rb
new file mode 100644 (file)
index 0000000..45749cb
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class PipelineInstancesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/pipeline_templates_helper_test.rb b/apps/workbench/test/unit/helpers/pipeline_templates_helper_test.rb
new file mode 100644 (file)
index 0000000..fb54368
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class PipelineTemplatesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/projects_helper_test.rb b/apps/workbench/test/unit/helpers/projects_helper_test.rb
new file mode 100644 (file)
index 0000000..a591e4e
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class ProjectsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/repositories_helper_test.rb b/apps/workbench/test/unit/helpers/repositories_helper_test.rb
new file mode 100644 (file)
index 0000000..51b6177
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class RepositoriesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/sessions_helper_test.rb b/apps/workbench/test/unit/helpers/sessions_helper_test.rb
new file mode 100644 (file)
index 0000000..7d44e09
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class SessionsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/specimens_helper_test.rb b/apps/workbench/test/unit/helpers/specimens_helper_test.rb
new file mode 100644 (file)
index 0000000..825af25
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class SpecimensHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/traits_helper_test.rb b/apps/workbench/test/unit/helpers/traits_helper_test.rb
new file mode 100644 (file)
index 0000000..da69c06
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class TraitsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/user_agreements_helper_test.rb b/apps/workbench/test/unit/helpers/user_agreements_helper_test.rb
new file mode 100644 (file)
index 0000000..2577870
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class UserAgreementsHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/users_helper_test.rb b/apps/workbench/test/unit/helpers/users_helper_test.rb
new file mode 100644 (file)
index 0000000..96af37a
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class UsersHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/helpers/virtual_machines_helper_test.rb b/apps/workbench/test/unit/helpers/virtual_machines_helper_test.rb
new file mode 100644 (file)
index 0000000..03ded1c
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class VirtualMachinesHelperTest < ActionView::TestCase
+end
diff --git a/apps/workbench/test/unit/job_test.rb b/apps/workbench/test/unit/job_test.rb
new file mode 100644 (file)
index 0000000..add4c0f
--- /dev/null
@@ -0,0 +1,31 @@
+require 'test_helper'
+
+class JobTest < ActiveSupport::TestCase
+  test "admin can edit description" do
+    use_token :admin
+    assert(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "admin not allowed to edit job description")
+  end
+
+  test "project owner can edit description" do
+    use_token :active
+    assert(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "project owner not allowed to edit job description")
+  end
+
+  test "project admin can edit description" do
+    use_token :subproject_admin
+    assert(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "project admin not allowed to edit job description")
+  end
+
+  test "project viewer cannot edit description" do
+    use_token :project_viewer
+    refute(find_fixture(Job, "job_in_subproject")
+             .attribute_editable?("description"),
+           "project viewer allowed to edit job description")
+  end
+end
diff --git a/apps/workbench/test/unit/link_test.rb b/apps/workbench/test/unit/link_test.rb
new file mode 100644 (file)
index 0000000..7636335
--- /dev/null
@@ -0,0 +1,48 @@
+require 'test_helper'
+
+class LinkTest < ActiveSupport::TestCase
+  def uuid_for(fixture_name, object_name)
+    api_fixture(fixture_name)[object_name]["uuid"]
+  end
+
+  test "active user can get permissions for owned project object" do
+    use_token :active
+    project = Group.find(uuid_for("groups", "aproject"))
+    refute_empty(Link.permissions_for(project),
+                 "no permissions found for managed project")
+  end
+
+  test "active user can get permissions for owned project by UUID" do
+    use_token :active
+    refute_empty(Link.permissions_for(uuid_for("groups", "aproject")),
+                 "no permissions found for managed project")
+  end
+
+  test "admin can get permissions for project object" do
+    use_token :admin
+    project = Group.find(uuid_for("groups", "aproject"))
+    refute_empty(Link.permissions_for(project),
+                 "no permissions found for managed project")
+  end
+
+  test "admin can get permissions for project by UUID" do
+    use_token :admin
+    refute_empty(Link.permissions_for(uuid_for("groups", "aproject")),
+                 "no permissions found for managed project")
+  end
+
+  test "project viewer can't get permissions for readable project object" do
+    use_token :project_viewer
+    project = Group.find(uuid_for("groups", "aproject"))
+    assert_raises(ArvadosApiClient::AccessForbiddenException) do
+      Link.permissions_for(project)
+    end
+  end
+
+  test "project viewer can't get permissions for readable project by UUID" do
+    use_token :project_viewer
+    assert_raises(ArvadosApiClient::AccessForbiddenException) do
+      Link.permissions_for(uuid_for("groups", "aproject"))
+    end
+  end
+end
diff --git a/apps/workbench/test/unit/pipeline_instance_test.rb b/apps/workbench/test/unit/pipeline_instance_test.rb
new file mode 100644 (file)
index 0000000..4cad6e6
--- /dev/null
@@ -0,0 +1,49 @@
+require 'test_helper'
+
+class PipelineInstanceTest < ActiveSupport::TestCase
+  def attribute_editable_for?(token_name, pi_name, attr_name, ever=nil)
+    use_token token_name
+    find_fixture(PipelineInstance, pi_name).attribute_editable?(attr_name, ever)
+  end
+
+  test "admin can edit name" do
+    assert(attribute_editable_for?(:admin, "new_pipeline_in_subproject",
+                                   "name"),
+           "admin not allowed to edit pipeline instance name")
+  end
+
+  test "project owner can edit name" do
+    assert(attribute_editable_for?(:active, "new_pipeline_in_subproject",
+                                   "name"),
+           "project owner not allowed to edit pipeline instance name")
+  end
+
+  test "project admin can edit name" do
+    assert(attribute_editable_for?(:subproject_admin,
+                                   "new_pipeline_in_subproject", "name"),
+           "project admin not allowed to edit pipeline instance name")
+  end
+
+  test "project viewer cannot edit name" do
+    refute(attribute_editable_for?(:project_viewer,
+                                   "new_pipeline_in_subproject", "name"),
+           "project viewer allowed to edit pipeline instance name")
+  end
+
+  test "name editable on completed pipeline" do
+    assert(attribute_editable_for?(:active, "has_component_with_completed_jobs",
+                                   "name"),
+           "name not editable on complete pipeline")
+  end
+
+  test "components editable on new pipeline" do
+    assert(attribute_editable_for?(:active, "new_pipeline", "components"),
+           "components not editable on new pipeline")
+  end
+
+  test "components not editable on completed pipeline" do
+    refute(attribute_editable_for?(:active, "has_component_with_completed_jobs",
+                                   "components"),
+           "components not editable on new pipeline")
+  end
+end
diff --git a/apps/workbench/test/unit/user_test.rb b/apps/workbench/test/unit/user_test.rb
new file mode 100644 (file)
index 0000000..89e95df
--- /dev/null
@@ -0,0 +1,12 @@
+require 'test_helper'
+
+class UserTest < ActiveSupport::TestCase
+  test "can select specific user columns" do
+    use_token :admin
+    User.select(["uuid", "is_active"]).limit(5).each do |user|
+      assert_not_nil user.uuid
+      assert_not_nil user.is_active
+      assert_nil user.first_name
+    end
+  end
+end
diff --git a/apps/workbench/vendor/assets/javascripts/.gitkeep b/apps/workbench/vendor/assets/javascripts/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/vendor/assets/javascripts/jquery.number.min.js b/apps/workbench/vendor/assets/javascripts/jquery.number.min.js
new file mode 100644 (file)
index 0000000..4fce02b
--- /dev/null
@@ -0,0 +1,2 @@
+/*! jQuery number 2.1.5 (c) github.com/teamdf/jquery-number | opensource.teamdf.com/license */
+(function(e){"use strict";function t(e,t){if(this.createTextRange){var n=this.createTextRange();n.collapse(true);n.moveStart("character",e);n.moveEnd("character",t-e);n.select()}else if(this.setSelectionRange){this.focus();this.setSelectionRange(e,t)}}function n(e){var t=this.value.length;e=e.toLowerCase()=="start"?"Start":"End";if(document.selection){var n=document.selection.createRange(),r,i,s;r=n.duplicate();r.expand("textedit");r.setEndPoint("EndToEnd",n);i=r.text.length-n.text.length;s=i+n.text.length;return e=="Start"?i:s}else if(typeof this["selection"+e]!="undefined"){t=this["selection"+e]}return t}var r={codes:{46:127,188:44,109:45,190:46,191:47,192:96,220:92,222:39,221:93,219:91,173:45,187:61,186:59,189:45,110:46},shifts:{96:"~",49:"!",50:"@",51:"#",52:"$",53:"%",54:"^",55:"&",56:"*",57:"(",48:")",45:"_",61:"+",91:"{",93:"}",92:"|",59:":",39:'"',44:"<",46:">",47:"?"}};e.fn.number=function(i,s,o,u){u=typeof u==="undefined"?",":u;o=typeof o==="undefined"?".":o;s=typeof s==="undefined"?0:s;var a="\\u"+("0000"+o.charCodeAt(0).toString(16)).slice(-4),f=new RegExp("[^"+a+"0-9]","g"),l=new RegExp(a,"g");if(i===true){if(this.is("input:text")){return this.on({"keydown.format":function(i){var a=e(this),f=a.data("numFormat"),l=i.keyCode?i.keyCode:i.which,c="",h=n.apply(this,["start"]),p=n.apply(this,["end"]),d="",v=false;if(r.codes.hasOwnProperty(l)){l=r.codes[l]}if(!i.shiftKey&&l>=65&&l<=90){l+=32}else if(!i.shiftKey&&l>=69&&l<=105){l-=48}else if(i.shiftKey&&r.shifts.hasOwnProperty(l)){c=r.shifts[l]}if(c=="")c=String.fromCharCode(l);if(l!=8&&l!=45&&l!=127&&c!=o&&!c.match(/[0-9]/)){var m=i.keyCode?i.keyCode:i.which;if(m==46||m==8||m==127||m==9||m==27||m==13||(m==65||m==82||m==80||m==83||m==70||m==72||m==66||m==74||m==84||m==90||m==61||m==173||m==48)&&(i.ctrlKey||i.metaKey)===true||(m==86||m==67||m==88)&&(i.ctrlKey||i.metaKey)===true||m>=35&&m<=39||m>=112&&m<=123){return}i.preventDefault();return false}if(h==0&&p==this.value.length||a.val()==0){if(l==8){h=p=1;this.value="";f.init=s>0?-1:0;f.c=s>0?-(s+1):0;t.apply(this,[0,0])}else if(c==o){h=p=1;this.value="0"+o+(new Array(s+1)).join("0");f.init=s>0?1:0;f.c=s>0?-(s+1):0}else if(l==45){h=p=2;this.value="-0"+o+(new Array(s+1)).join("0");f.init=s>0?1:0;f.c=s>0?-(s+1):0;t.apply(this,[2,2])}else{f.init=s>0?-1:0;f.c=s>0?-s:0}}else{f.c=p-this.value.length}f.isPartialSelection=h==p?false:true;if(s>0&&c==o&&h==this.value.length-s-1){f.c++;f.init=Math.max(0,f.init);i.preventDefault();v=this.value.length+f.c}else if(l==45&&(h!=0||this.value.indexOf("-")==0)){i.preventDefault()}else if(c==o){f.init=Math.max(0,f.init);i.preventDefault()}else if(s>0&&l==127&&h==this.value.length-s-1){i.preventDefault()}else if(s>0&&l==8&&h==this.value.length-s){i.preventDefault();f.c--;v=this.value.length+f.c}else if(s>0&&l==127&&h>this.value.length-s-1){if(this.value==="")return;if(this.value.slice(h,h+1)!="0"){d=this.value.slice(0,h)+"0"+this.value.slice(h+1);a.val(d)}i.preventDefault();v=this.value.length+f.c}else if(s>0&&l==8&&h>this.value.length-s){if(this.value==="")return;if(this.value.slice(h-1,h)!="0"){d=this.value.slice(0,h-1)+"0"+this.value.slice(h);a.val(d)}i.preventDefault();f.c--;v=this.value.length+f.c}else if(l==127&&this.value.slice(h,h+1)==u){i.preventDefault()}else if(l==8&&this.value.slice(h-1,h)==u){i.preventDefault();f.c--;v=this.value.length+f.c}else if(s>0&&h==p&&this.value.length>s+1&&h>this.value.length-s-1&&isFinite(+c)&&!i.metaKey&&!i.ctrlKey&&!i.altKey&&c.length===1){if(p===this.value.length){d=this.value.slice(0,h-1)}else{d=this.value.slice(0,h)+this.value.slice(h+1)}this.value=d;v=h}if(v!==false){t.apply(this,[v,v])}a.data("numFormat",f)},"keyup.format":function(r){var i=e(this),o=i.data("numFormat"),u=r.keyCode?r.keyCode:r.which,a=n.apply(this,["start"]),f=n.apply(this,["end"]),l;if(a===0&&f===0&&(u===189||u===109)){i.val("-"+i.val());a=1;o.c=1-this.value.length;o.init=1;i.data("numFormat",o);l=this.value.length+o.c;t.apply(this,[l,l])}if(this.value===""||(u<48||u>57)&&(u<96||u>105)&&u!==8&&u!==46&&u!==110)return;i.val(i.val());if(s>0){if(o.init<1){a=this.value.length-s-(o.init<0?1:0);o.c=a-this.value.length;o.init=1;i.data("numFormat",o)}else if(a>this.value.length-s&&u!=8){o.c++;i.data("numFormat",o)}}if(u==46&&!o.isPartialSelection){o.c++;i.data("numFormat",o)}l=this.value.length+o.c;t.apply(this,[l,l])},"paste.format":function(t){var n=e(this),r=t.originalEvent,i=null;if(window.clipboardData&&window.clipboardData.getData){i=window.clipboardData.getData("Text")}else if(r.clipboardData&&r.clipboardData.getData){i=r.clipboardData.getData("text/plain")}n.val(i);t.preventDefault();return false}}).each(function(){var t=e(this).data("numFormat",{c:-(s+1),decimals:s,thousands_sep:u,dec_point:o,regex_dec_num:f,regex_dec:l,init:this.value.indexOf(".")?true:false});if(this.value==="")return;t.val(t.val())})}else{return this.each(function(){var t=e(this),n=+t.text().replace(f,"").replace(l,".");t.number(!isFinite(n)?0:+n,s,o,u)})}}return this.text(e.number.apply(window,arguments))};var i=null,s=null;if(e.isPlainObject(e.valHooks.text)){if(e.isFunction(e.valHooks.text.get))i=e.valHooks.text.get;if(e.isFunction(e.valHooks.text.set))s=e.valHooks.text.set}else{e.valHooks.text={}}e.valHooks.text.get=function(t){var n=e(t),r,s,o=n.data("numFormat");if(!o){if(e.isFunction(i)){return i(t)}else{return undefined}}else{if(t.value==="")return"";r=+t.value.replace(o.regex_dec_num,"").replace(o.regex_dec,".");return(t.value.indexOf("-")===0?"-":"")+(isFinite(r)?r:0)}};e.valHooks.text.set=function(t,n){var r=e(t),i=r.data("numFormat");if(!i){if(e.isFunction(s)){return s(t,n)}else{return undefined}}else{var o=e.number(n,i.decimals,i.dec_point,i.thousands_sep);return t.value=o}};e.number=function(e,t,n,r){r=typeof r==="undefined"?",":r;n=typeof n==="undefined"?".":n;t=!isFinite(+t)?0:Math.abs(t);var i="\\u"+("0000"+n.charCodeAt(0).toString(16)).slice(-4);var s="\\u"+("0000"+r.charCodeAt(0).toString(16)).slice(-4);e=(e+"").replace(".",n).replace(new RegExp(s,"g"),"").replace(new RegExp(i,"g"),".").replace(new RegExp("[^0-9+-Ee.]","g"),"");var o=!isFinite(+e)?0:+e,u="",a=function(e,t){var n=Math.pow(10,t);return""+Math.round(e*n)/n};u=(t?a(o,t):""+Math.round(o)).split(".");if(u[0].length>3){u[0]=u[0].replace(/\B(?=(?:\d{3})+(?!\d))/g,r)}if((u[1]||"").length<t){u[1]=u[1]||"";u[1]+=(new Array(t-u[1].length+1)).join("0")}return u.join(n)}})(jQuery)
diff --git a/apps/workbench/vendor/assets/stylesheets/.gitkeep b/apps/workbench/vendor/assets/stylesheets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/workbench/vendor/plugins/.gitkeep b/apps/workbench/vendor/plugins/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/by-sa-3.0.txt b/by-sa-3.0.txt
new file mode 100644 (file)
index 0000000..281c9b6
--- /dev/null
@@ -0,0 +1,297 @@
+Creative Commons Legal Code
+
+Attribution-ShareAlike 3.0 United States
+
+License
+
+THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
+COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
+COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
+AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+
+BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
+BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE
+CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE
+IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+
+1. Definitions
+
+ a. "Collective Work" means a work, such as a periodical issue, anthology or
+    encyclopedia, in which the Work in its entirety in unmodified form, along
+    with one or more other contributions, constituting separate and independent
+    works in themselves, are assembled into a collective whole. A work that
+    constitutes a Collective Work will not be considered a Derivative Work (as
+    defined below) for the purposes of this License.
+
+ b. "Creative Commons Compatible License" means a license that is listed at
+    http://creativecommons.org/compatiblelicenses that has been approved by
+    Creative Commons as being essentially equivalent to this License,
+    including, at a minimum, because that license: (i) contains terms that have
+    the same purpose, meaning and effect as the License Elements of this
+    License; and, (ii) explicitly permits the relicensing of derivatives of
+    works made available under that license under this License or either a
+    Creative Commons unported license or a Creative Commons jurisdiction
+    license with the same License Elements as this License.
+
+ c. "Derivative Work" means a work based upon the Work or upon the Work and
+    other pre-existing works, such as a translation, musical arrangement,
+    dramatization, fictionalization, motion picture version, sound recording,
+    art reproduction, abridgment, condensation, or any other form in which the
+    Work may be recast, transformed, or adapted, except that a work that
+    constitutes a Collective Work will not be considered a Derivative Work for
+    the purpose of this License. For the avoidance of doubt, where the Work is
+    a musical composition or sound recording, the synchronization of the Work
+    in timed-relation with a moving image ("synching") will be considered a
+    Derivative Work for the purpose of this License.
+
+ d. "License Elements" means the following high-level license attributes as
+    selected by Licensor and indicated in the title of this License:
+    Attribution, ShareAlike.
+
+ e. "Licensor" means the individual, individuals, entity or entities that
+    offers the Work under the terms of this License.
+
+ f. "Original Author" means the individual, individuals, entity or entities who
+    created the Work.
+
+ g. "Work" means the copyrightable work of authorship offered under the terms
+    of this License.
+
+    h. "You" means an individual or entity exercising rights under this License
+    who has not previously violated the terms of this License with respect to
+    the Work, or who has received express permission from the Licensor to
+    exercise rights under this License despite a previous violation.
+
+2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or
+restrict any rights arising from fair use, first sale or other limitations on
+the exclusive rights of the copyright owner under copyright law or other
+applicable laws.
+
+3. License Grant. Subject to the terms and conditions of this License, Licensor
+hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the
+duration of the applicable copyright) license to exercise the rights in the
+Work as stated below:
+
+ a. to reproduce the Work, to incorporate the Work into one or more Collective
+    Works, and to reproduce the Work as incorporated in the Collective Works;
+
+ b. to create and reproduce Derivative Works provided that any such
+    Derivative Work, including any translation in any medium, takes reasonable
+    steps to clearly label, demarcate or otherwise identify that changes were
+    made to the original Work. For example, a translation could be marked "The
+    original work was translated from English to Spanish," or a modification
+    could indicate "The original work has been modified.";
+
+ c. to distribute copies or phonorecords of, display publicly, perform
+    publicly, and perform publicly by means of a digital audio transmission the
+    Work including as incorporated in Collective Works;
+
+ d. to distribute copies or phonorecords of, display publicly, perform
+    publicly, and perform publicly by means of a digital audio transmission
+    Derivative Works.
+
+ e. For the avoidance of doubt, where the Work is a musical composition:
+
+     i. Performance Royalties Under Blanket Licenses. Licensor waives the
+        exclusive right to collect, whether individually or, in the event that
+        Licensor is a member of a performance rights society (e.g. ASCAP, BMI,
+        SESAC), via that society, royalties for the public performance or
+        public digital performance (e.g. webcast) of the Work.
+
+    ii. Mechanical Rights and Statutory Royalties. Licensor waives the
+        exclusive right to collect, whether individually or via a music rights
+        agency or designated agent (e.g. Harry Fox Agency), royalties for any
+        phonorecord You create from the Work ("cover version") and distribute,
+        subject to the compulsory license created by 17 USC Section 115 of the
+        US Copyright Act (or the equivalent in other jurisdictions).
+
+ f. Webcasting Rights and Statutory Royalties. For the avoidance of doubt,
+    where the Work is a sound recording, Licensor waives the exclusive right to
+    collect, whether individually or via a performance-rights society
+    (e.g. SoundExchange), royalties for the public digital performance
+    (e.g. webcast) of the Work, subject to the compulsory license created by 17
+    USC Section 114 of the US Copyright Act (or the equivalent in other
+    jurisdictions).
+
+The above rights may be exercised in all media and formats whether now known or
+hereafter devised. The above rights include the right to make such
+modifications as are technically necessary to exercise the rights in other
+media and formats. All rights not expressly granted by Licensor are hereby
+reserved.
+
+4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:
+
+ a. You may distribute, publicly display, publicly perform, or publicly
+    digitally perform the Work only under the terms of this License, and You
+    must include a copy of, or the Uniform Resource Identifier for, this
+    License with every copy or phonorecord of the Work You distribute, publicly
+    display, publicly perform, or publicly digitally perform. You may not offer
+    or impose any terms on the Work that restrict the terms of this License or
+    the ability of a recipient of the Work to exercise of the rights granted to
+    that recipient under the terms of the License. You may not sublicense the
+    Work. You must keep intact all notices that refer to this License and to
+    the disclaimer of warranties. When You distribute, publicly display,
+    publicly perform, or publicly digitally perform the Work, You may not
+    impose any technological measures on the Work that restrict the ability of
+    a recipient of the Work from You to exercise of the rights granted to that
+    recipient under the terms of the License. This Section 4(a) applies to the
+    Work as incorporated in a Collective Work, but this does not require the
+    Collective Work apart from the Work itself to be made subject to the terms
+    of this License. If You create a Collective Work, upon notice from any
+    Licensor You must, to the extent practicable, remove from the Collective
+    Work any credit as required by Section 4(c), as requested. If You create a
+    Derivative Work, upon notice from any Licensor You must, to the extent
+    practicable, remove from the Derivative Work any credit as required by
+    Section 4(c), as requested.
+
+ b. You may distribute, publicly display, publicly perform, or publicly
+    digitally perform a Derivative Work only under: (i) the terms of this
+    License; (ii) a later version of this License with the same License
+    Elements as this License; (iii) either the Creative Commons (Unported)
+    license or a Creative Commons jurisdiction license (either this or a later
+    license version) that contains the same License Elements as this License
+    (e.g. Attribution-ShareAlike 3.0 (Unported)); (iv) a Creative Commons
+    Compatible License. If you license the Derivative Work under one of the
+    licenses mentioned in (iv), you must comply with the terms of that
+    license. If you license the Derivative Work under the terms of any of the
+    licenses mentioned in (i), (ii) or (iii) (the "Applicable License"), you
+    must comply with the terms of the Applicable License generally and with the
+    following provisions: (I) You must include a copy of, or the Uniform
+    Resource Identifier for, the Applicable License with every copy or
+    phonorecord of each Derivative Work You distribute, publicly display,
+    publicly perform, or publicly digitally perform; (II) You may not offer or
+    impose any terms on the Derivative Works that restrict the terms of the
+    Applicable License or the ability of a recipient of the Work to exercise
+    the rights granted to that recipient under the terms of the Applicable
+    License; (III) You must keep intact all notices that refer to the
+    Applicable License and to the disclaimer of warranties; and, (IV) when You
+    distribute, publicly display, publicly perform, or publicly digitally
+    perform the Work, You may not impose any technological measures on the
+    Derivative Work that restrict the ability of a recipient of the Derivative
+    Work from You to exercise the rights granted to that recipient under the
+    terms of the Applicable License. This Section 4(b) applies to the
+    Derivative Work as incorporated in a Collective Work, but this does not
+    require the Collective Work apart from the Derivative Work itself to be
+    made subject to the terms of the Applicable License.
+
+ c. If You distribute, publicly display, publicly perform, or publicly
+    digitally perform the Work (as defined in Section 1 above) or any
+    Derivative Works (as defined in Section 1 above) or Collective Works (as
+    defined in Section 1 above), You must, unless a request has been made
+    pursuant to Section 4(a), keep intact all copyright notices for the Work
+    and provide, reasonable to the medium or means You are utilizing: (i) the
+    name of the Original Author (or pseudonym, if applicable) if supplied,
+    and/or (ii) if the Original Author and/or Licensor designate another party
+    or parties (e.g. a sponsor institute, publishing entity, journal) for
+    attribution ("Attribution Parties") in Licensor's copyright notice, terms
+    of service or by other reasonable means, the name of such party or parties;
+    the title of the Work if supplied; to the extent reasonably practicable,
+    the Uniform Resource Identifier, if any, that Licensor specifies to be
+    associated with the Work, unless such URI does not refer to the copyright
+    notice or licensing information for the Work; and, consistent with Section
+    3(b) in the case of a Derivative Work, a credit identifying the use of the
+    Work in the Derivative Work (e.g., "French translation of the Work by
+    Original Author," or "Screenplay based on original Work by Original
+    Author"). The credit required by this Section 4(c) may be implemented in
+    any reasonable manner; provided, however, that in the case of a Derivative
+    Work or Collective Work, at a minimum such credit will appear, if a credit
+    for all contributing authors of the Derivative Work or Collective Work
+    appears, then as part of these credits and in a manner at least as
+    prominent as the credits for the other contributing authors. For the
+    avoidance of doubt, You may only use the credit required by this Section
+    for the purpose of attribution in the manner set out above and, by
+    exercising Your rights under this License, You may not implicitly or
+    explicitly assert or imply any connection with, sponsorship or endorsement
+    by the Original Author, Licensor and/or Attribution Parties, as
+    appropriate, of You or Your use of the Work, without the separate, express
+    prior written permission of the Original Author, Licensor and/or
+    Attribution Parties.
+
+
+5. Representations, Warranties and Disclaimer
+
+UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS
+THE WORK AS-IS AND ONLY TO THE EXTENT OF ANY RIGHTS HELD IN THE LICENSED WORK
+BY THE LICENSOR. THE LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
+KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING,
+WITHOUT LIMITATION, WARRANTIES OF TITLE, MARKETABILITY, MERCHANTIBILITY,
+FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR
+OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT
+DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED
+WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
+
+6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN
+NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL,
+INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS
+LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+7. Termination
+
+ a. This License and the rights granted hereunder will terminate automatically
+    upon any breach by You of the terms of this License. Individuals or
+    entities who have received Derivative Works or Collective Works from You
+    under this License, however, will not have their licenses terminated
+    provided such individuals or entities remain in full compliance with those
+    licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of
+    this License.
+
+ b. Subject to the above terms and conditions, the license granted here is
+    perpetual (for the duration of the applicable copyright in the
+    Work). Notwithstanding the above, Licensor reserves the right to release
+    the Work under different license terms or to stop distributing the Work at
+    any time; provided, however that any such election will not serve to
+    withdraw this License (or any other license that has been, or is required
+    to be, granted under the terms of this License), and this License will
+    continue in full force and effect unless terminated as stated above.
+
+8. Miscellaneous
+
+ a. Each time You distribute or publicly digitally perform the Work (as defined
+    in Section 1 above) or a Collective Work (as defined in Section 1 above),
+    the Licensor offers to the recipient a license to the Work on the same
+    terms and conditions as the license granted to You under this License.
+
+ b. Each time You distribute or publicly digitally perform a Derivative Work,
+    Licensor offers to the recipient a license to the original Work on the same
+    terms and conditions as the license granted to You under this License.
+
+ c. If any provision of this License is invalid or unenforceable under
+    applicable law, it shall not affect the validity or enforceability of the
+    remainder of the terms of this License, and without further action by the
+    parties to this agreement, such provision shall be reformed to the minimum
+    extent necessary to make such provision valid and enforceable.
+
+ d. No term or provision of this License shall be deemed waived and no breach
+    consented to unless such waiver or consent shall be in writing and signed
+    by the party to be charged with such waiver or consent.
+
+ e. This License constitutes the entire agreement between the parties with
+    respect to the Work licensed here. There are no understandings, agreements
+    or representations with respect to the Work not specified here. Licensor
+    shall not be bound by any additional provisions that may appear in any
+    communication from You. This License may not be modified without the mutual
+    written agreement of the Licensor and You.
+
+Creative Commons Notice
+
+    Creative Commons is not a party to this License, and makes no warranty
+    whatsoever in connection with the Work. Creative Commons will not be liable
+    to You or any party on any legal theory for any damages whatsoever,
+    including without limitation any general, special, incidental or
+    consequential damages arising in connection to this
+    license. Notwithstanding the foregoing two (2) sentences, if Creative
+    Commons has expressly identified itself as the Licensor hereunder, it shall
+    have all rights and obligations of Licensor.
+
+    Except for the limited purpose of indicating to the public that the Work is
+    licensed under the CCPL, Creative Commons does not authorize the use by
+    either party of the trademark "Creative Commons" or any related trademark
+    or logo of Creative Commons without the prior written consent of Creative
+    Commons. Any permitted use will be in compliance with Creative Commons'
+    then-current trademark usage guidelines, as may be published on its website
+    or otherwise made available upon request from time to time. For the
+    avoidance of doubt, this trademark restriction does not form part of this
+    License.
+
+    Creative Commons may be contacted at http://creativecommons.org/.
diff --git a/crunch_scripts/GATK2-VariantFiltration b/crunch_scripts/GATK2-VariantFiltration
new file mode 100755 (executable)
index 0000000..392e14b
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+import arvados
+import os
+import re
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+gatk_path = arvados.util.tarball_extract(
+    tarball = this_job['script_parameters']['gatk_binary_tarball'],
+    path = 'gatk')
+bundle_path = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    path = 'gatk-bundle',
+    files = ['human_g1k_v37.dict', 'human_g1k_v37.fasta', 'human_g1k_v37.fasta.fai'])
+this_task_input = this_task['parameters']['input']
+
+input_file = list(arvados.CollectionReader(this_task_input).all_files())[0]
+
+# choose vcf temporary file names
+vcf_in = os.path.join(arvados.current_task().tmpdir,
+                      os.path.basename(input_file.name()))
+vcf_out = re.sub('(.*)\\.vcf', '\\1-filtered.vcf', vcf_in)
+
+# fetch the unfiltered data
+vcf_in_file = open(vcf_in, 'w')
+for buf in input_file.readall():
+    vcf_in_file.write(buf)
+vcf_in_file.close()
+
+stdoutdata, stderrdata = arvados.util.run_command(
+    ['java', '-Xmx1g',
+     '-jar', os.path.join(gatk_path,'GenomeAnalysisTK.jar'),
+     '-T', 'VariantFiltration', '--variant', vcf_in,
+     '--out', vcf_out,
+     '--filterExpression', 'QD < 2.0',
+     '--filterName', 'GATK_QD',
+     '--filterExpression', 'MQ < 40.0',
+     '--filterName', 'GATK_MQ',
+     '--filterExpression', 'FS > 60.0',
+     '--filterName', 'GATK_FS',
+     '--filterExpression', 'MQRankSum < -12.5',
+     '--filterName', 'GATK_MQRankSum',
+     '--filterExpression', 'ReadPosRankSum < -8.0',
+     '--filterName', 'GATK_ReadPosRankSum',
+     '-R', os.path.join(bundle_path, 'human_g1k_v37.fasta')],
+    cwd=arvados.current_task().tmpdir)
+
+# store the filtered data
+with open(vcf_out, 'rb') as f:
+    out = arvados.CollectionWriter()
+    while True:
+        buf = f.read()
+        if len(buf) == 0:
+            break
+        out.write(buf)
+out.set_current_file_name(os.path.basename(vcf_out))
+
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/GATK2-bqsr b/crunch_scripts/GATK2-bqsr
new file mode 100755 (executable)
index 0000000..82cc57e
--- /dev/null
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+import os
+import re
+import arvados
+import arvados_gatk2
+import arvados_samtools
+from arvados_ipc import *
+
+class InvalidArgumentError(Exception):
+    pass
+
+arvados_samtools.one_task_per_bam_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+tmpdir = arvados.current_task().tmpdir
+arvados.util.clear_tmpdir()
+
+known_sites_files = arvados.getjobparam(
+    'known_sites',
+    ['dbsnp_137.b37.vcf',
+     'Mills_and_1000G_gold_standard.indels.b37.vcf',
+     ])
+bundle_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    files = [
+        'human_g1k_v37.dict',
+        'human_g1k_v37.fasta',
+        'human_g1k_v37.fasta.fai'
+        ] + known_sites_files + [v + '.idx' for v in known_sites_files],
+    path = 'gatk_bundle')
+ref_fasta_files = [os.path.join(bundle_dir, f)
+                   for f in os.listdir(bundle_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+
+input_collection = this_task['parameters']['input']
+input_dir = arvados.util.collection_extract(
+    collection = input_collection,
+    path = os.path.join(this_task.tmpdir, 'input'))
+input_bam_files = []
+for f in arvados.util.listdir_recursive(input_dir):
+    if re.search(r'\.bam$', f):
+        input_stream_name, input_file_name = os.path.split(f)
+        input_bam_files += [os.path.join(input_dir, f)]
+if len(input_bam_files) != 1:
+    raise InvalidArgumentError("Expected exactly one bam file per task.")
+
+known_sites_args = []
+for f in known_sites_files:
+    known_sites_args += ['-knownSites', os.path.join(bundle_dir, f)]
+
+recal_file = os.path.join(tmpdir, 'recal.csv')
+
+children = {}
+pipes = {}
+
+arvados_gatk2.run(
+    args=[
+        '-nct', arvados_gatk2.cpus_on_this_node(),
+        '-T', 'BaseRecalibrator',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', recal_file,
+        ] + known_sites_args)
+
+pipe_setup(pipes, 'BQSR')
+if 0 == named_fork(children, 'BQSR'):
+    pipe_closeallbut(pipes, ('BQSR', 'w'))
+    arvados_gatk2.run(
+        args=[
+        '-T', 'PrintReads',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', '/dev/fd/' + str(pipes['BQSR','w']),
+        '-BQSR', recal_file,
+        '--disable_bam_indexing',
+        ],
+        close_fds=False)
+    os._exit(0)
+os.close(pipes.pop(('BQSR','w'), None))
+
+out = arvados.CollectionWriter()
+out.start_new_stream(input_stream_name)
+
+out.start_new_file(input_file_name + '.recal.csv')
+out.write(open(recal_file, 'rb'))
+
+out.start_new_file(input_file_name)
+while True:
+    buf = os.read(pipes['BQSR','r'], 2**20)
+    if len(buf) == 0:
+        break
+    out.write(buf)
+pipe_closeallbut(pipes)
+
+if waitpid_and_check_children(children):
+    this_task.set_output(out.finish())
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/GATK2-merge-call b/crunch_scripts/GATK2-merge-call
new file mode 100755 (executable)
index 0000000..c584449
--- /dev/null
@@ -0,0 +1,239 @@
+#!/usr/bin/env python
+
+import os
+import re
+import string
+import threading
+import arvados
+import arvados_gatk2
+import arvados_picard
+from arvados_ipc import *
+
+class InvalidArgumentError(Exception):
+    pass
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+tmpdir = arvados.current_task().tmpdir
+arvados.util.clear_tmpdir()
+
+bundle_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    files = [
+        'human_g1k_v37.dict',
+        'human_g1k_v37.fasta',
+        'human_g1k_v37.fasta.fai',
+        'dbsnp_137.b37.vcf',
+        'dbsnp_137.b37.vcf.idx',
+        ],
+    path = 'gatk_bundle')
+ref_fasta_files = [os.path.join(bundle_dir, f)
+                   for f in os.listdir(bundle_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+regions_args = []
+if 'regions' in this_job['script_parameters']:
+    regions_dir = arvados.util.collection_extract(
+        collection = this_job['script_parameters']['regions'],
+        path = 'regions')
+    region_padding = int(this_job['script_parameters']['region_padding'])
+    for f in os.listdir(regions_dir):
+        if re.search(r'\.bed$', f):
+            regions_args += [
+                '--intervals', os.path.join(regions_dir, f),
+                '--interval_padding', str(region_padding)
+                ]
+
+
+# Start a child process for each input file, feeding data to picard.
+
+input_child_names = []
+children = {}
+pipes = {}
+
+input_collection = this_job['script_parameters']['input']
+input_index = 0
+for s in arvados.CollectionReader(input_collection).all_streams():
+    for f in s.all_files():
+        if not re.search(r'\.bam$', f.name()):
+            continue
+        input_index += 1
+        childname = 'input-' + str(input_index)
+        input_child_names += [childname]
+        pipe_setup(pipes, childname)
+        childpid = named_fork(children, childname)
+        if childpid == 0:
+            pipe_closeallbut(pipes, (childname, 'w'))
+            for s in f.readall():
+                os.write(pipes[childname, 'w'], s)
+            os.close(pipes[childname, 'w'])
+            os._exit(0)
+        sys.stderr.write("pid %d writing %s to fd %d->%d\n" %
+                         (childpid,
+                          s.name()+'/'+f.name(),
+                          pipes[childname, 'w'],
+                          pipes[childname, 'r']))
+        pipe_closeallbut(pipes, *[(childname, 'r')
+                                  for childname in input_child_names])
+
+
+# Merge-sort the input files to merge.bam
+
+arvados_picard.run(
+    'MergeSamFiles',
+    args=[
+        'I=/dev/fd/' + str(pipes[childname, 'r'])
+        for childname in input_child_names
+        ],
+    params={
+        'o': 'merge.bam',
+        'quiet': 'true',
+        'so': 'coordinate',
+        'use_threading': 'true',
+        'create_index': 'true',
+        'validation_stringency': 'LENIENT',
+        },
+    close_fds=False,
+    )
+pipe_closeallbut(pipes)
+
+
+# Run CoverageBySample on merge.bam
+
+pipe_setup(pipes, 'stats_log')
+pipe_setup(pipes, 'stats_out')
+if 0 == named_fork(children, 'GATK'):
+    pipe_closeallbut(pipes,
+                     ('stats_log', 'w'),
+                     ('stats_out', 'w'))
+    arvados_gatk2.run(
+        args=[
+            '-T', 'CoverageBySample',
+            '-R', ref_fasta_files[0],
+            '-I', 'merge.bam',
+            '-o', '/dev/fd/' + str(pipes['stats_out', 'w']),
+            '--log_to_file', '/dev/fd/' + str(pipes['stats_log', 'w']),
+            ]
+        + regions_args,
+        close_fds=False)
+    pipe_closeallbut(pipes)
+    os._exit(0)
+pipe_closeallbut(pipes, ('stats_log', 'r'), ('stats_out', 'r'))
+
+
+# Start two threads to read from CoverageBySample pipes
+
+class ExceptionPropagatingThread(threading.Thread):
+    """
+    If a subclassed thread calls _raise(e) in run(), running join() on
+    the thread will raise e in the thread that calls join().
+    """
+    def __init__(self, *args, **kwargs):
+        super(ExceptionPropagatingThread, self).__init__(*args, **kwargs)
+        self.__exception = None
+    def join(self, *args, **kwargs):
+        ret = super(ExceptionPropagatingThread, self).join(*args, **kwargs)
+        if self.__exception:
+            raise self.__exception
+        return ret
+    def _raise(self, exception):
+        self.__exception = exception
+
+class StatsLogReader(ExceptionPropagatingThread):
+    def __init__(self, **kwargs):
+        super(StatsLogReader, self).__init__()
+        self.args = kwargs
+    def run(self):
+        try:
+            for logline in self.args['infile']:
+                x = re.search('Processing (\d+) bp from intervals', logline)
+                if x:
+                    self._total_bp = int(x.group(1))
+        except Exception as e:
+            self._raise(e)
+    def total_bp(self):
+        self.join()
+        return self._total_bp
+stats_log_thr = StatsLogReader(infile=os.fdopen(pipes.pop(('stats_log', 'r'))))
+stats_log_thr.start()
+
+class StatsOutReader(ExceptionPropagatingThread):
+    """
+    Read output of CoverageBySample and collect a histogram of
+    coverage (last column) -> number of loci (number of rows).
+    """
+    def __init__(self, **kwargs):
+        super(StatsOutReader, self).__init__()
+        self.args = kwargs
+    def run(self):
+        try:
+            hist = [0]
+            histtot = 0
+            for line in self.args['infile']:
+                try:
+                    i = int(string.split(line)[-1])
+                except ValueError:
+                    continue
+                if i >= 1:
+                    if len(hist) <= i:
+                        hist.extend([0 for x in range(1+i-len(hist))])
+                    hist[i] += 1
+                    histtot += 1
+            hist[0] = stats_log_thr.total_bp() - histtot
+            self._histogram = hist
+        except Exception as e:
+            self._raise(e)
+    def histogram(self):
+        self.join()
+        return self._histogram
+stats_out_thr = StatsOutReader(infile=os.fdopen(pipes.pop(('stats_out', 'r'))))
+stats_out_thr.start()
+
+
+# Run UnifiedGenotyper on merge.bam
+
+arvados_gatk2.run(
+    args=[
+        '-nt', arvados_gatk2.cpus_on_this_node(),
+        '-T', 'UnifiedGenotyper',
+        '-R', ref_fasta_files[0],
+        '-I', 'merge.bam',
+        '-o', os.path.join(tmpdir, 'out.vcf'),
+        '--dbsnp', os.path.join(bundle_dir, 'dbsnp_137.b37.vcf'),
+        '-metrics', 'UniGenMetrics',
+        '-A', 'DepthOfCoverage',
+        '-A', 'AlleleBalance',
+        '-A', 'QualByDepth',
+        '-A', 'HaplotypeScore',
+        '-A', 'MappingQualityRankSumTest',
+        '-A', 'ReadPosRankSumTest',
+        '-A', 'FisherStrand',
+        '-glm', 'both',
+        ]
+    + regions_args
+    + arvados.getjobparam('GATK2_UnifiedGenotyper_args',[]))
+
+# Copy the output VCF file to Keep
+
+out = arvados.CollectionWriter()
+out.start_new_stream()
+out.start_new_file('out.vcf')
+out.write(open(os.path.join(tmpdir, 'out.vcf'), 'rb'))
+
+
+# Write statistics to Keep
+
+out.start_new_file('mincoverage_nlocus.csv')
+sofar = 0
+hist = stats_out_thr.histogram()
+total_bp = stats_log_thr.total_bp()
+for i in range(len(hist)):
+    out.write("%d,%d,%f\n" %
+              (i,
+               total_bp - sofar,
+               100.0 * (total_bp - sofar) / total_bp))
+    sofar += hist[i]
+
+if waitpid_and_check_children(children):
+    this_task.set_output(out.finish())
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/GATK2-realign b/crunch_scripts/GATK2-realign
new file mode 100755 (executable)
index 0000000..be45828
--- /dev/null
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+import os
+import re
+import arvados
+import arvados_gatk2
+import arvados_picard
+import arvados_samtools
+from arvados_ipc import *
+
+class InvalidArgumentError(Exception):
+    pass
+
+arvados_samtools.one_task_per_bam_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+tmpdir = arvados.current_task().tmpdir
+arvados.util.clear_tmpdir()
+
+known_sites_files = arvados.getjobparam(
+    'known_sites',
+    ['dbsnp_137.b37.vcf',
+     'Mills_and_1000G_gold_standard.indels.b37.vcf',
+     ])
+bundle_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['gatk_bundle'],
+    files = [
+        'human_g1k_v37.dict',
+        'human_g1k_v37.fasta',
+        'human_g1k_v37.fasta.fai'
+        ] + known_sites_files + [v + '.idx' for v in known_sites_files],
+    path = 'gatk_bundle')
+ref_fasta_files = [os.path.join(bundle_dir, f)
+                   for f in os.listdir(bundle_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+regions_args = []
+if 'regions' in this_job['script_parameters']:
+    regions_dir = arvados.util.collection_extract(
+        collection = this_job['script_parameters']['regions'],
+        path = 'regions')
+    region_padding = int(this_job['script_parameters']['region_padding'])
+    for f in os.listdir(regions_dir):
+        if re.search(r'\.bed$', f):
+            regions_args += [
+                '--intervals', os.path.join(regions_dir, f),
+                '--interval_padding', str(region_padding)
+                ]
+
+input_collection = this_task['parameters']['input']
+input_dir = arvados.util.collection_extract(
+    collection = input_collection,
+    path = os.path.join(this_task.tmpdir, 'input'))
+input_bam_files = []
+for f in arvados.util.listdir_recursive(input_dir):
+    if re.search(r'\.bam$', f):
+        input_stream_name, input_file_name = os.path.split(f)
+        input_bam_files += [os.path.join(input_dir, f)]
+if len(input_bam_files) != 1:
+    raise InvalidArgumentError("Expected exactly one bam file per task.")
+
+known_sites_args = []
+for f in known_sites_files:
+    known_sites_args += ['-known', os.path.join(bundle_dir, f)]
+
+children = {}
+pipes = {}
+
+arvados_gatk2.run(
+    args=[
+        '-nt', arvados_gatk2.cpus_per_task(),
+        '-T', 'RealignerTargetCreator',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', os.path.join(tmpdir, 'intervals.list')
+        ] + known_sites_args + regions_args)
+
+pipe_setup(pipes, 'IndelRealigner')
+if 0 == named_fork(children, 'IndelRealigner'):
+    pipe_closeallbut(pipes, ('IndelRealigner', 'w'))
+    arvados_gatk2.run(
+        args=[
+        '-T', 'IndelRealigner',
+        '-R', ref_fasta_files[0],
+        '-targetIntervals', os.path.join(tmpdir, 'intervals.list'),
+        '-I', input_bam_files[0],
+        '-o', '/dev/fd/' + str(pipes['IndelRealigner','w']),
+        '--disable_bam_indexing',
+        ] + known_sites_args + regions_args,
+        close_fds=False)
+    os._exit(0)
+os.close(pipes.pop(('IndelRealigner','w'), None))
+
+pipe_setup(pipes, 'bammanifest')
+pipe_setup(pipes, 'bam')
+if 0==named_fork(children, 'bammanifest'):
+    pipe_closeallbut(pipes,
+                     ('IndelRealigner', 'r'),
+                     ('bammanifest', 'w'),
+                     ('bam', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(input_file_name)
+    while True:
+        buf = os.read(pipes['IndelRealigner','r'], 2**20)
+        if len(buf) == 0:
+            break
+        os.write(pipes['bam','w'], buf)
+        out.write(buf)
+    os.write(pipes['bammanifest','w'], out.manifest_text())
+    os.close(pipes['bammanifest','w'])
+    os._exit(0)
+
+pipe_setup(pipes, 'index')
+if 0==named_fork(children, 'index'):
+    pipe_closeallbut(pipes, ('bam', 'r'), ('index', 'w'))
+    arvados_picard.run(
+        'BuildBamIndex',
+        params={
+            'i': '/dev/fd/' + str(pipes['bam','r']),
+            'o': '/dev/fd/' + str(pipes['index','w']),
+            'quiet': 'true',
+            'validation_stringency': 'LENIENT'
+            },
+        close_fds=False)
+    os._exit(0)
+
+pipe_setup(pipes, 'indexmanifest')
+if 0==named_fork(children, 'indexmanifest'):
+    pipe_closeallbut(pipes, ('index', 'r'), ('indexmanifest', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(re.sub('\.bam$', '.bai', input_file_name))
+    while True:
+        buf = os.read(pipes['index','r'], 2**20)
+        if len(buf) == 0:
+            break
+        out.write(buf)
+    os.write(pipes['indexmanifest','w'], out.manifest_text())
+    os.close(pipes['indexmanifest','w'])
+    os._exit(0)
+
+pipe_closeallbut(pipes, ('bammanifest', 'r'), ('indexmanifest', 'r'))
+outmanifest = ''
+for which in ['bammanifest', 'indexmanifest']:
+    with os.fdopen(pipes[which,'r'], 'rb', 2**20) as f:
+        while True:
+            buf = f.read()
+            if buf == '':
+                break
+            outmanifest += buf
+
+all_ok = True
+for (childname, pid) in children.items():
+    all_ok = all_ok and waitpid_and_check_exit(pid, childname)
+
+if all_ok:
+    this_task.set_output(outmanifest)
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/arvados-bcbio-nextgen.py b/crunch_scripts/arvados-bcbio-nextgen.py
new file mode 100755 (executable)
index 0000000..9351b05
--- /dev/null
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+
+import arvados
+import subprocess
+import crunchutil.subst as subst
+import shutil
+import os
+import sys
+import time
+
+if len(arvados.current_task()['parameters']) > 0:
+    p = arvados.current_task()['parameters']
+else:
+    p = arvados.current_job()['script_parameters']
+
+t = arvados.current_task().tmpdir
+
+os.unlink("/usr/local/share/bcbio-nextgen/galaxy")
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy")
+shutil.copy("/usr/local/share/bcbio-nextgen/config/bcbio_system.yaml", "/usr/local/share/bcbio-nextgen/galaxy")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool_data_table_conf.xml", "w") as f:
+    f.write('''<tables>
+    <!-- Locations of indexes in the BWA mapper format -->
+    <table name="bwa_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bwa_index.loc" />
+    </table>
+    <!-- Locations of indexes in the Bowtie2 mapper format -->
+    <table name="bowtie2_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bowtie2_indices.loc" />
+    </table>
+    <!-- Locations of indexes in the Bowtie2 mapper format for TopHat2 to use -->
+    <table name="tophat2_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/bowtie2_indices.loc" />
+    </table>
+    <!-- Location of SAMTools indexes and other files -->
+    <table name="sam_fa_indexes" comment_char="#">
+        <columns>index, value, path</columns>
+        <file path="tool-data/sam_fa_indices.loc" />
+    </table>
+    <!-- Location of Picard dict file and other files -->
+    <table name="picard_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/picard_index.loc" />
+    </table>
+    <!-- Location of Picard dict files valid for GATK -->
+    <table name="gatk_picard_indexes" comment_char="#">
+        <columns>value, dbkey, name, path</columns>
+        <file path="tool-data/gatk_sorted_picard_index.loc" />
+    </table>
+</tables>
+''')
+
+os.mkdir("/usr/local/share/bcbio-nextgen/galaxy/tool-data")
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bowtie2_indices.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(dir $(bowtie2_indices))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/bwa_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(bwa_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/gatk_sorted_picard_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(gatk_sorted_picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/picard_index.loc", "w") as f:
+    f.write(subst.do_substitution(p, "GRCh37\tGRCh37\tHuman (GRCh37)\t$(file $(picard_index))\n"))
+
+with open("/usr/local/share/bcbio-nextgen/galaxy/tool-data/sam_fa_indices.loc", "w") as f:
+    f.write(subst.do_substitution(p, "index\tGRCh37\t$(file $(sam_fa_indices))\n"))
+
+with open("/tmp/crunch-job/freebayes-variant.yaml", "w") as f:
+    f.write('''
+# Template for whole genome Illumina variant calling with FreeBayes
+# This is a GATK-free pipeline without post-alignment BAM pre-processing
+# (recalibration and realignment)
+---
+details:
+  - analysis: variant2
+    genome_build: GRCh37
+    # to do multi-sample variant calling, assign samples the same metadata / batch
+    # metadata:
+    #   batch: your-arbitrary-batch-name
+    algorithm:
+      aligner: bwa
+      mark_duplicates: true
+      recalibrate: false
+      realign: false
+      variantcaller: freebayes
+      platform: illumina
+      quality_format: Standard
+      # for targetted projects, set the region
+      # variant_regions: /path/to/your.bed
+''')
+
+os.unlink("/usr/local/share/bcbio-nextgen/gemini_data")
+os.symlink(arvados.get_job_param_mount("gemini_data"), "/usr/local/share/bcbio-nextgen/gemini_data")
+
+os.chdir(arvados.current_task().tmpdir)
+
+rcode = subprocess.call(["bcbio_nextgen.py", "--workflow", "template", "/tmp/crunch-job/freebayes-variant.yaml", "project1",
+                         subst.do_substitution(p, "$(file $(R1))"),
+                         subst.do_substitution(p, "$(file $(R2))")])
+
+os.chdir("project1/work")
+
+os.symlink("/usr/local/share/bcbio-nextgen/galaxy/tool-data", "tool-data")
+
+rcode = subprocess.call(["bcbio_nextgen.py", "../config/project1.yaml", "-n", os.environ['CRUNCH_NODE_SLOTS']])
+
+print("run-command: completed with exit code %i (%s)" % (rcode, "success" if rcode == 0 else "failed"))
+
+if rcode == 0:
+    os.chdir("../final")
+
+    print("arvados-bcbio-nextgen: the follow output files will be saved to keep:")
+
+    subprocess.call(["find", ".", "-type", "f", "-printf", "arvados-bcbio-nextgen: %12.12s %h/%f\\n"])
+
+    print("arvados-bcbio-nextgen: start writing output to keep")
+
+    done = False
+    api = arvados.api('v1')
+    while not done:
+        try:
+            out = arvados.CollectionWriter()
+            out.write_directory_tree(".", max_manifest_depth=0)
+            outuuid = out.finish()
+            api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                                 body={
+                                                     'output':outuuid,
+                                                     'success': (rcode == 0),
+                                                     'progress':1.0
+                                                 }).execute()
+            done = True
+        except Exception as e:
+            print("arvados-bcbio-nextgen: caught exception: {}".format(e))
+            time.sleep(5)
+
+sys.exit(rcode)
diff --git a/crunch_scripts/arvados_bwa.py b/crunch_scripts/arvados_bwa.py
new file mode 100644 (file)
index 0000000..d4d8207
--- /dev/null
@@ -0,0 +1,111 @@
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+bwa_install_path = None
+
+def install_path():
+    """
+    Extract the bwa source tree, build the bwa binary, and return the
+    path to the source tree.
+    """
+    global bwa_install_path
+    if bwa_install_path:
+        return bwa_install_path
+
+    bwa_install_path = arvados.util.tarball_extract(
+        tarball = arvados.current_job()['script_parameters']['bwa_tbz'],
+        path = 'bwa')
+
+    # build "bwa" binary
+    lockfile = open(os.path.split(bwa_install_path)[0] + '.bwa-make.lock',
+                    'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    arvados.util.run_command(['make', '-j16'], cwd=bwa_install_path)
+    lockfile.close()
+
+    return bwa_install_path
+
+def bwa_binary():
+    """
+    Return the path to the bwa executable.
+    """
+    return os.path.join(install_path(), 'bwa')
+
+def run(command, command_args, **kwargs):
+    """
+    Build and run the bwa binary.
+
+    command is the bwa module, e.g., "index" or "aln".
+
+    command_args is a list of additional command line arguments, e.g.,
+    ['-a', 'bwtsw', 'ref.fasta']
+
+    It is assumed that we are running in a Crunch job environment, and
+    the job's "bwa_tbz" parameter is a collection containing the bwa
+    source tree in a .tbz file.
+    """
+    execargs = [bwa_binary(),
+                command]
+    execargs += command_args
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    arvados.util.run_command(
+        execargs,
+        cwd=arvados.current_task().tmpdir,
+        stderr=sys.stderr,
+        stdin=kwargs.get('stdin', subprocess.PIPE),
+        stdout=kwargs.get('stdout', sys.stderr))
+
+def one_task_per_pair_input_file(if_sequence=0, and_end_task=True):
+    """
+    Queue one task for each pair of fastq files in this job's input
+    collection.
+
+    Each new task will have two parameters, named "input_1" and
+    "input_2", each being a manifest containing a single fastq file.
+
+    A matching pair of files in the input collection is assumed to
+    have names "x_1.y" and "x_2.y".
+
+    Files in the input collection that are not part of a matched pair
+    are silently ignored.
+
+    if_sequence and and_end_task arguments have the same significance
+    as in arvados.job_setup.one_task_per_input_file().
+    """
+    if if_sequence != arvados.current_task()['sequence']:
+        return
+    job_input = arvados.current_job()['script_parameters']['input']
+    cr = arvados.CollectionReader(job_input)
+    all_files = []
+    for s in cr.all_streams():
+        all_files += list(s.all_files())
+    for s in cr.all_streams():
+        for left_file in s.all_files():
+            left_name = left_file.name()
+            right_file = None
+            right_name = re.sub(r'(.*_)1\.', '\g<1>2.', left_name)
+            if right_name == left_name:
+                continue
+            for f2 in s.all_files():
+                if right_name == f2.name():
+                    right_file = f2
+            if right_file != None:
+                new_task_attrs = {
+                    'job_uuid': arvados.current_job()['uuid'],
+                    'created_by_job_task_uuid': arvados.current_task()['uuid'],
+                    'sequence': if_sequence + 1,
+                    'parameters': {
+                        'input_1':left_file.as_manifest(),
+                        'input_2':right_file.as_manifest()
+                        }
+                    }
+                arvados.api().job_tasks().create(body=new_task_attrs).execute()
+    if and_end_task:
+        arvados.api().job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                   body={'success':True}
+                                   ).execute()
+        exit(0)
diff --git a/crunch_scripts/arvados_gatk2.py b/crunch_scripts/arvados_gatk2.py
new file mode 100644 (file)
index 0000000..47951ec
--- /dev/null
@@ -0,0 +1,48 @@
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+gatk2_install_path = None
+
+def install_path():
+    global gatk2_install_path
+    if gatk2_install_path:
+        return gatk2_install_path
+    gatk2_install_path = arvados.util.tarball_extract(
+        tarball = arvados.current_job()['script_parameters']['gatk_tbz'],
+        path = 'gatk2')
+    return gatk2_install_path
+
+def memory_limit():
+    taskspernode = int(os.environ.get('CRUNCH_NODE_SLOTS', '1'))
+    with open('/proc/meminfo', 'r') as f:
+        ram = int(re.search(r'MemTotal:\s*(\d+)', f.read()).group(1)) / 1024
+    if taskspernode > 1:
+        ram = ram / taskspernode
+    return max(ram-700, 500)
+
+def cpus_on_this_node():
+    with open('/proc/cpuinfo', 'r') as cpuinfo:
+        return max(int(os.environ.get('SLURM_CPUS_ON_NODE', 1)),
+                   len(re.findall(r'^processor\s*:\s*\d',
+                                  cpuinfo.read(),
+                                  re.MULTILINE)))
+
+def cpus_per_task():
+    return max(1, (cpus_on_this_node()
+                   / int(os.environ.get('CRUNCH_NODE_SLOTS', 1))))
+
+def run(**kwargs):
+    kwargs.setdefault('cwd', arvados.current_task().tmpdir)
+    kwargs.setdefault('stdout', sys.stderr)
+    execargs = ['java',
+                '-Xmx%dm' % memory_limit(),
+                '-Djava.io.tmpdir=' + arvados.current_task().tmpdir,
+                '-jar', os.path.join(install_path(), 'GenomeAnalysisTK.jar')]
+    execargs += [str(arg) for arg in kwargs.pop('args', [])]
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    return arvados.util.run_command(execargs, **kwargs)
+
diff --git a/crunch_scripts/arvados_ipc.py b/crunch_scripts/arvados_ipc.py
new file mode 100644 (file)
index 0000000..7197e97
--- /dev/null
@@ -0,0 +1,47 @@
+import os
+import re
+import sys
+import subprocess
+
+def pipe_setup(pipes, name):
+    pipes[name,'r'], pipes[name,'w'] = os.pipe()
+
+def pipe_closeallbut(pipes, *keepus):
+    for n,m in pipes.keys():
+        if (n,m) not in keepus:
+            os.close(pipes.pop((n,m), None))
+
+def named_fork(children, name):
+    children[name] = os.fork()
+    return children[name]
+
+def waitpid_and_check_children(children):
+    """
+    Given a dict of childname->pid, wait for each child process to
+    finish, and report non-zero exit status on stderr. Return True if
+    all children exited 0.
+    """
+    all_ok = True
+    for (childname, pid) in children.items():
+        # all_ok must be on RHS here -- we need to call waitpid() on
+        # every child, even if all_ok is already False.
+        all_ok = waitpid_and_check_exit(pid, childname) and all_ok
+    return all_ok
+
+def waitpid_and_check_exit(pid, childname=''):
+    """
+    Wait for a child process to finish. If it exits non-zero, report
+    exit status on stderr (mentioning the given childname) and return
+    False. If it exits zero, return True.
+    """
+    _, childstatus = os.waitpid(pid, 0)
+    exitvalue = childstatus >> 8
+    signal = childstatus & 127
+    dumpedcore = childstatus & 128
+    if childstatus != 0:
+        sys.stderr.write("%s child %d failed: exit %d signal %d core %s\n"
+                         % (childname, pid, exitvalue, signal,
+                            ('y' if dumpedcore else 'n')))
+        return False
+    return True
+
diff --git a/crunch_scripts/arvados_picard.py b/crunch_scripts/arvados_picard.py
new file mode 100644 (file)
index 0000000..de9adeb
--- /dev/null
@@ -0,0 +1,38 @@
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+picard_install_path = None
+
+def install_path():
+    global picard_install_path
+    if picard_install_path:
+        return picard_install_path
+    zipball = arvados.current_job()['script_parameters']['picard_zip']
+    extracted = arvados.util.zipball_extract(
+        zipball = zipball,
+        path = 'picard')
+    for f in os.listdir(extracted):
+        if (re.search(r'^picard-tools-[\d\.]+$', f) and
+            os.path.exists(os.path.join(extracted, f, '.'))):
+            picard_install_path = os.path.join(extracted, f)
+            break
+    if not picard_install_path:
+        raise Exception("picard-tools-{version} directory not found in %s" %
+                        zipball)
+    return picard_install_path
+
+def run(module, **kwargs):
+    kwargs.setdefault('cwd', arvados.current_task().tmpdir)
+    execargs = ['java',
+                '-Xmx1500m',
+                '-Djava.io.tmpdir=' + arvados.current_task().tmpdir,
+                '-jar', os.path.join(install_path(), module + '.jar')]
+    execargs += [str(arg) for arg in kwargs.pop('args', [])]
+    for key, value in kwargs.pop('params', {}).items():
+        execargs += [key.upper() + '=' + str(value)]
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    return arvados.util.run_command(execargs, **kwargs)
diff --git a/crunch_scripts/arvados_samtools.py b/crunch_scripts/arvados_samtools.py
new file mode 100644 (file)
index 0000000..6f4d966
--- /dev/null
@@ -0,0 +1,106 @@
+import arvados
+import re
+import os
+import sys
+import fcntl
+import subprocess
+
+samtools_path = None
+
+def samtools_install_path():
+    """
+    Extract the samtools source tree, build the samtools binary, and
+    return the path to the source tree.
+    """
+    global samtools_path
+    if samtools_path:
+        return samtools_path
+    samtools_path = arvados.util.tarball_extract(
+        tarball = arvados.current_job()['script_parameters']['samtools_tgz'],
+        path = 'samtools')
+
+    # build "samtools" binary
+    lockfile = open(os.path.split(samtools_path)[0] + '.samtools-make.lock',
+                    'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    arvados.util.run_command(['make', '-j16'], cwd=samtools_path)
+    lockfile.close()
+
+    return samtools_path
+
+def samtools_binary():
+    """
+    Return the path to the samtools executable.
+    """
+    return os.path.join(samtools_install_path(), 'samtools')
+
+def run(command, command_args, **kwargs):
+    """
+    Build and run the samtools binary.
+
+    command is the samtools subcommand, e.g., "view" or "sort".
+
+    command_args is a list of additional command line arguments, e.g.,
+    ['-bt', 'ref_list.txt', '-o', 'aln.bam', 'aln.sam.gz']
+
+    It is assumed that we are running in a Crunch job environment, and
+    the job's "samtools_tgz" parameter is a collection containing the
+    samtools source tree in a .tgz file.
+    """
+    execargs = [samtools_binary(),
+                command]
+    execargs += command_args
+    sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
+    arvados.util.run_command(
+        execargs,
+        cwd=arvados.current_task().tmpdir,
+        stdin=kwargs.get('stdin', subprocess.PIPE),
+        stderr=kwargs.get('stderr', sys.stderr),
+        stdout=kwargs.get('stdout', sys.stderr))
+
+def one_task_per_bam_file(if_sequence=0, and_end_task=True):
+    """
+    Queue one task for each bam file in this job's input collection.
+
+    Each new task will have an "input" parameter: a manifest
+    containing one .bam file and (if available) the corresponding .bai
+    index file.
+
+    Files in the input collection that are not named *.bam or *.bai
+    (as well as *.bai files that do not match any .bam file present)
+    are silently ignored.
+
+    if_sequence and and_end_task arguments have the same significance
+    as in arvados.job_setup.one_task_per_input_file().
+    """
+    if if_sequence != arvados.current_task()['sequence']:
+        return
+    job_input = arvados.current_job()['script_parameters']['input']
+    cr = arvados.CollectionReader(job_input)
+    bam = {}
+    bai = {}
+    for s in cr.all_streams():
+        for f in s.all_files():
+            if re.search(r'\.bam$', f.name()):
+                bam[s.name(), f.name()] = f
+            elif re.search(r'\.bai$', f.name()):
+                bai[s.name(), f.name()] = f
+    for ((s_name, f_name), bam_f) in bam.items():
+        bai_f = bai.get((s_name, re.sub(r'bam$', 'bai', f_name)), None)
+        task_input = bam_f.as_manifest()
+        if bai_f:
+            task_input += bai_f.as_manifest()
+        new_task_attrs = {
+            'job_uuid': arvados.current_job()['uuid'],
+            'created_by_job_task_uuid': arvados.current_task()['uuid'],
+            'sequence': if_sequence + 1,
+            'parameters': {
+                'input': task_input
+                }
+            }
+        arvados.api().job_tasks().create(body=new_task_attrs).execute()
+    if and_end_task:
+        arvados.api().job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                         body={'success':True}
+                                         ).execute()
+        exit(0)
diff --git a/crunch_scripts/bwa-aln b/crunch_scripts/bwa-aln
new file mode 100755 (executable)
index 0000000..89e8b3a
--- /dev/null
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+
+import arvados
+import arvados_bwa
+import arvados_samtools
+import os
+import re
+import sys
+import subprocess
+
+arvados_bwa.one_task_per_pair_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference_index'],
+    path = 'reference',
+    decompress = False)
+
+ref_basename = None
+for f in os.listdir(ref_dir):
+    basename = re.sub(r'\.bwt$', '', f)
+    if basename != f:
+        ref_basename = os.path.join(ref_dir, basename)
+if ref_basename == None:
+    raise Exception("Could not find *.bwt in reference collection.")
+
+tmp_dir = arvados.current_task().tmpdir
+
+class Aligner:
+    def input_filename(self):
+        for s in arvados.CollectionReader(self.collection).all_streams():
+            for f in s.all_files():
+                return f.decompressed_name()
+    def generate_input(self):
+        for s in arvados.CollectionReader(self.collection).all_streams():
+            for f in s.all_files():
+                for s in f.readall_decompressed():
+                    yield s
+    def aln(self, input_param):
+        self.collection = this_task['parameters'][input_param]
+        reads_filename = os.path.join(tmp_dir, self.input_filename())
+        aln_filename = os.path.join(tmp_dir, self.input_filename() + '.sai')
+        reads_pipe_r, reads_pipe_w = os.pipe()
+        if os.fork() == 0:
+            os.close(reads_pipe_r)
+            reads_file = open(reads_filename, 'wb')
+            for s in self.generate_input():
+                if len(s) != os.write(reads_pipe_w, s):
+                    raise Exception("short write")
+                reads_file.write(s)
+            reads_file.close()
+            os.close(reads_pipe_w)
+            sys.exit(0)
+        os.close(reads_pipe_w)
+
+        aln_file = open(aln_filename, 'wb')
+        bwa_proc = subprocess.Popen(
+            [arvados_bwa.bwa_binary(),
+             'aln', '-t', '16',
+             ref_basename,
+             '-'],
+            stdin=os.fdopen(reads_pipe_r, 'rb', 2**20),
+            stdout=aln_file)
+        aln_file.close()
+        return reads_filename, aln_filename
+
+reads_1, alignments_1 = Aligner().aln('input_1')
+reads_2, alignments_2 = Aligner().aln('input_2')
+pid1, exit1 = os.wait()
+pid2, exit2 = os.wait()
+if exit1 != 0 or exit2 != 0:
+    raise Exception("bwa aln exited non-zero (0x%x, 0x%x)" % (exit1, exit2))
+
+# output alignments in sam format to pipe
+sam_pipe_r, sam_pipe_w = os.pipe()
+sam_pid = os.fork()
+if sam_pid != 0:
+    # parent
+    os.close(sam_pipe_w)
+else:
+    # child
+    os.close(sam_pipe_r)
+    arvados_bwa.run('sampe',
+                    [ref_basename,
+                     alignments_1, alignments_2,
+                     reads_1, reads_2],
+                    stdout=os.fdopen(sam_pipe_w, 'wb', 2**20))
+    sys.exit(0)
+
+# convert sam (sam_pipe_r) to bam (bam_pipe_w)
+bam_pipe_r, bam_pipe_w = os.pipe()
+bam_pid = os.fork()
+if bam_pid != 0:
+    # parent
+    os.close(bam_pipe_w)
+    os.close(sam_pipe_r)
+else:
+    # child
+    os.close(bam_pipe_r)
+    arvados_samtools.run('view',
+                         ['-S', '-b',
+                          '-'],
+                         stdin=os.fdopen(sam_pipe_r, 'rb', 2**20),
+                         stdout=os.fdopen(bam_pipe_w, 'wb', 2**20))
+    sys.exit(0)
+
+# copy bam (bam_pipe_r) to Keep
+out_bam_filename = os.path.split(reads_1)[-1] + '.bam'
+out = arvados.CollectionWriter()
+out.start_new_stream()
+out.start_new_file(out_bam_filename)
+out.write(os.fdopen(bam_pipe_r, 'rb', 2**20))
+
+# make sure everyone exited nicely
+pid3, exit3 = os.waitpid(sam_pid, 0)
+if exit3 != 0:
+    raise Exception("bwa sampe exited non-zero (0x%x)" % exit3)
+pid4, exit4 = os.waitpid(bam_pid, 0)
+if exit4 != 0:
+    raise Exception("samtools view exited non-zero (0x%x)" % exit4)
+
+# proclaim success
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/bwa-index b/crunch_scripts/bwa-index
new file mode 100755 (executable)
index 0000000..3b21549
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+import arvados
+import arvados_bwa
+import os
+import re
+import sys
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'reference',
+    decompress = False)
+
+ref_fasta_files = (os.path.join(ref_dir, f)
+                   for f in os.listdir(ref_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f))
+
+# build reference index
+arvados_bwa.run('index',
+                ['-a', 'bwtsw'] + list(ref_fasta_files))
+
+# move output files to new empty directory
+out_dir = os.path.join(arvados.current_task().tmpdir, 'out')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+os.mkdir(out_dir)
+for f in os.listdir(ref_dir):
+    if re.search(r'\.(amb|ann|bwt|pac|rbwt|rpac|rsa|sa)$', f):
+        sys.stderr.write("bwa output: %s (%d)\n" %
+                         (f, os.stat(os.path.join(ref_dir, f)).st_size))
+        os.rename(os.path.join(ref_dir, f),
+                  os.path.join(out_dir, f))
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/collection-merge b/crunch_scripts/collection-merge
new file mode 100755 (executable)
index 0000000..ca80a82
--- /dev/null
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# collection-merge
+#
+# Merge two or more collections together.  Can also be used to extract specific
+# files from a collection to produce a new collection.
+#
+# input:
+# An array of collections or collection/file paths in script_parameter["input"]
+#
+# output:
+# A manifest with the collections merged.  Duplicate file names will
+# have their contents concatenated in the order that they appear in the input
+# array.
+
+import arvados
+import md5
+import crunchutil.subst as subst
+import subprocess
+import os
+import hashlib
+
+p = arvados.current_job()['script_parameters']
+
+merged = ""
+src = []
+for c in p["input"]:
+    c = subst.do_substitution(p, c)
+    i = c.find('/')
+    if i == -1:
+        src.append(c)
+        merged += arvados.CollectionReader(c).manifest_text()
+    else:
+        src.append(c[0:i])
+        cr = arvados.CollectionReader(c[0:i])
+        j = c.rfind('/')
+        stream = c[i+1:j]
+        if stream == "":
+            stream = "."
+        fn = c[(j+1):]
+        for s in cr.all_streams():
+            if s.name() == stream:
+                if fn in s.files():
+                    merged += s.files()[fn].as_manifest()
+
+arvados.current_task().set_output(merged)
diff --git a/crunch_scripts/crunchutil/__init__.py b/crunch_scripts/crunchutil/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/crunch_scripts/crunchutil/robust_put.py b/crunch_scripts/crunchutil/robust_put.py
new file mode 100644 (file)
index 0000000..5a6d593
--- /dev/null
@@ -0,0 +1,52 @@
+import arvados
+import arvados.commands.put as put
+import os
+import logging
+import time
+
+def machine_progress(bytes_written, bytes_expected):
+    return "upload wrote {} total {}\n".format(
+        bytes_written, -1 if (bytes_expected is None) else bytes_expected)
+
+class Args(object):
+    def __init__(self, fn):
+        self.filename = None
+        self.paths = [fn]
+        self.max_manifest_depth = 0
+
+# Upload to Keep with error recovery.
+# Return a uuid or raise an exception if there are too many failures.
+def upload(source_dir, logger=None):
+    if logger is None:
+        logger = logging.getLogger("arvados")
+
+    source_dir = os.path.abspath(source_dir)
+    done = False
+    if 'TASK_WORK' in os.environ:
+        resume_cache = put.ResumeCache(os.path.join(arvados.current_task().tmpdir, "upload-output-checkpoint"))
+    else:
+        resume_cache = put.ResumeCache(put.ResumeCache.make_path(Args(source_dir)))
+    reporter = put.progress_writer(machine_progress)
+    bytes_expected = put.expected_bytes_for([source_dir])
+    backoff = 1
+    outuuid = None
+    while not done:
+        try:
+            out = put.ArvPutCollectionWriter.from_cache(resume_cache, reporter, bytes_expected)
+            out.do_queued_work()
+            out.write_directory_tree(source_dir, max_manifest_depth=0)
+            outuuid = out.finish()
+            done = True
+        except KeyboardInterrupt as e:
+            logger.critical("caught interrupt signal 2")
+            raise e
+        except Exception as e:
+            logger.exception("caught exception:")
+            backoff *= 2
+            if backoff > 256:
+                logger.critical("Too many upload failures, giving up")
+                raise e
+            else:
+                logger.warning("Sleeping for %s seconds before trying again" % backoff)
+                time.sleep(backoff)
+    return outuuid
diff --git a/crunch_scripts/crunchutil/subst.py b/crunch_scripts/crunchutil/subst.py
new file mode 100644 (file)
index 0000000..06ef6c1
--- /dev/null
@@ -0,0 +1,95 @@
+import os
+import glob
+import stat
+
+class SubstitutionError(Exception):
+    pass
+
+def search(c):
+    DEFAULT = 0
+    DOLLAR = 1
+
+    i = 0
+    state = DEFAULT
+    start = None
+    depth = 0
+    while i < len(c):
+        if c[i] == '\\':
+            i += 1
+        elif state == DEFAULT:
+            if c[i] == '$':
+                state = DOLLAR
+                if depth == 0:
+                    start = i
+            elif c[i] == ')':
+                if depth == 1:
+                    return [start, i]
+                if depth > 0:
+                    depth -= 1
+        elif state == DOLLAR:
+            if c[i] == '(':
+                depth += 1
+            state = DEFAULT
+        i += 1
+    if depth != 0:
+        raise SubstitutionError("Substitution error, mismatched parentheses {}".format(c))
+    return None
+
+def sub_file(v):
+    path = os.path.join(os.environ['TASK_KEEPMOUNT'], v)
+    st = os.stat(path)
+    if st and stat.S_ISREG(st.st_mode):
+        return path
+    else:
+        raise SubstitutionError("$(file {}) is not accessible or is not a regular file".format(path))
+
+def sub_dir(v):
+    d = os.path.dirname(v)
+    if d == '':
+        d = v
+    path = os.path.join(os.environ['TASK_KEEPMOUNT'], d)
+    st = os.stat(path)
+    if st and stat.S_ISDIR(st.st_mode):
+        return path
+    else:
+        raise SubstitutionError("$(dir {}) is not accessible or is not a directory".format(path))
+
+def sub_basename(v):
+    return os.path.splitext(os.path.basename(v))[0]
+
+def sub_glob(v):
+    l = glob.glob(v)
+    if len(l) == 0:
+        raise SubstitutionError("$(glob {}) no match fonud".format(v))
+    else:
+        return l[0]
+
+default_subs = {"file ": sub_file,
+                "dir ": sub_dir,
+                "basename ": sub_basename,
+                "glob ": sub_glob}
+
+def do_substitution(p, c, subs=default_subs):
+    while True:
+        m = search(c)
+        if m is None:
+            return c
+
+        v = do_substitution(p, c[m[0]+2 : m[1]])
+        var = True
+        for sub in subs:
+            if v.startswith(sub):
+                r = subs[sub](v[len(sub):])
+                var = False
+                break
+        if var:
+            if v in p:
+                r = p[v]
+            else:
+                raise SubstitutionError("Unknown variable or function '%s' while performing substitution on '%s'" % (v, c))
+            if r is None:
+                raise SubstitutionError("Substitution for '%s' is null while performing substitution on '%s'" % (v, c))
+            if not isinstance(r, basestring):
+                raise SubstitutionError("Substitution for '%s' must be a string while performing substitution on '%s'" % (v, c))
+
+        c = c[:m[0]] + r + c[m[1]+1:]
diff --git a/crunch_scripts/crunchutil/vwd.py b/crunch_scripts/crunchutil/vwd.py
new file mode 100644 (file)
index 0000000..3d54c9c
--- /dev/null
@@ -0,0 +1,54 @@
+import arvados
+import os
+import robust_put
+import stat
+
+# Implements "Virtual Working Directory"
+# Provides a way of emulating a shared writable directory in Keep based
+# on a "check out, edit, check in, merge" model.
+# At the moment, this only permits adding new files, applications
+# cannot modify or delete existing files.
+
+# Create a symlink tree rooted at target_dir mirroring arv-mounted
+# source_collection.  target_dir must be empty, and will be created if it
+# doesn't exist.
+def checkout(source_collection, target_dir, keepmount=None):
+    # create symlinks
+    if keepmount is None:
+        keepmount = os.environ['TASK_KEEPMOUNT']
+
+    if not os.path.exists(target_dir):
+        os.makedirs(target_dir)
+
+    l = os.listdir(target_dir)
+    if len(l) > 0:
+        raise Exception("target_dir must be empty before checkout, contains %s" % l)
+
+    stem = os.path.join(keepmount, source_collection)
+    for root, dirs, files in os.walk(os.path.join(keepmount, source_collection), topdown=True):
+        rel = root[len(stem)+1:]
+        for d in dirs:
+            os.mkdir(os.path.join(target_dir, rel, d))
+        for f in files:
+            os.symlink(os.path.join(root, f), os.path.join(target_dir, rel, f))
+
+# Delete all symlinks and check in any remaining normal files.
+# If merge == True, merge the manifest with source_collection and return a
+# CollectionReader for the combined collection.
+def checkin(source_collection, target_dir, merge=True):
+    # delete symlinks, commit directory, merge manifests and return combined
+    # collection.
+    for root, dirs, files in os.walk(target_dir):
+        for f in files:
+            s = os.lstat(os.path.join(root, f))
+            if stat.S_ISLNK(s.st_mode):
+                os.unlink(os.path.join(root, f))
+
+    uuid = robust_put.upload(target_dir)
+    if merge:
+        cr1 = arvados.CollectionReader(source_collection)
+        cr2 = arvados.CollectionReader(uuid)
+        combined = arvados.CollectionReader(cr1.manifest_text() + cr2.manifest_text())
+        return combined
+    else:
+        return arvados.CollectionReader(uuid)
diff --git a/crunch_scripts/decompress-all.py b/crunch_scripts/decompress-all.py
new file mode 100755 (executable)
index 0000000..50d11f4
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+#
+# decompress-all.py
+#
+# Decompress all compressed files in the collection using the "dtrx" tool and
+# produce a new collection with the contents.  Uncompressed files
+# are passed through.
+#
+# input:
+# A collection at script_parameters["input"]
+#
+# output:
+# A manifest of the uncompressed contents of the input collection.
+
+import arvados
+import re
+import subprocess
+import os
+import sys
+import crunchutil.robust_put as robust_put
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+                                          input_as_path=True)
+
+task = arvados.current_task()
+
+input_file = task['parameters']['input']
+
+infile_parts = re.match(r"(^[a-f0-9]{32}\+\d+)(\+\S+)*(/.*)?(/[^/]+)$", input_file)
+
+outdir = os.path.join(task.tmpdir, "output")
+os.makedirs(outdir)
+os.chdir(outdir)
+
+if infile_parts is None:
+    print >>sys.stderr, "Failed to parse input filename '%s' as a Keep file\n" % input_file
+    sys.exit(1)
+
+cr = arvados.CollectionReader(infile_parts.group(1))
+streamname = infile_parts.group(3)[1:]
+filename = infile_parts.group(4)[1:]
+
+if streamname is not None:
+    subprocess.call(["mkdir", "-p", streamname])
+    os.chdir(streamname)
+else:
+    streamname = '.'
+
+m = re.match(r'.*\.(gz|Z|bz2|tgz|tbz|zip|rar|7z|cab|deb|rpm|cpio|gem)$', arvados.get_task_param_mount('input'), re.IGNORECASE)
+
+if m is not None:
+    rc = subprocess.call(["dtrx", "-r", "-n", "-q", arvados.get_task_param_mount('input')])
+    if rc == 0:
+        task.set_output(robust_put.upload(outdir))
+    else:
+        sys.exit(rc)
+else:
+    streamreader = filter(lambda s: s.name() == streamname, cr.all_streams())[0]
+    filereader = streamreader.files()[filename]
+    task.set_output(streamname + filereader.as_manifest()[1:])
diff --git a/crunch_scripts/file-select b/crunch_scripts/file-select
new file mode 100755 (executable)
index 0000000..fb3f761
--- /dev/null
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+import arvados
+import os
+import re
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_job_input = this_job['script_parameters']['input']
+manifest_text = ""
+for f in arvados.CollectionReader(this_job_input).all_files():
+    if f.name() in this_job['script_parameters']['names']:
+        manifest_text += f.as_manifest()
+
+this_task.set_output(arvados.Keep.put(manifest_text))
diff --git a/crunch_scripts/grep b/crunch_scripts/grep
new file mode 100755 (executable)
index 0000000..65120a5
--- /dev/null
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+import arvados
+import re
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_task_input = this_task['parameters']['input']
+pattern = re.compile(this_job['script_parameters']['pattern'])
+
+input_file = list(arvados.CollectionReader(this_task_input).all_files())[0]
+out = arvados.CollectionWriter()
+out.set_current_file_name(input_file.decompressed_name())
+out.set_current_stream_name(input_file.stream_name())
+for line in input_file.readlines():
+    if pattern.search(line):
+        out.write(line)
+
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/hash b/crunch_scripts/hash
new file mode 100755 (executable)
index 0000000..cdd5eba
--- /dev/null
@@ -0,0 +1,34 @@
+#!/usr/bin/env python                                                                                                                                                                            
+
+import arvados
+import hashlib
+import os
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+
+if 'algorithm' in this_job['script_parameters']:
+    alg = this_job['script_parameters']['algorithm']
+else:
+    alg = 'md5'
+digestor = hashlib.new(alg)
+
+input_file = arvados.get_task_param_mount('input')
+
+with open(input_file) as f:
+    while True:
+        buf = f.read(2**20)
+        if len(buf) == 0:
+            break
+        digestor.update(buf)
+
+hexdigest = digestor.hexdigest()
+
+file_name = '/'.join(this_task['parameters']['input'].split('/')[1:])
+
+out = arvados.CollectionWriter()
+out.set_current_file_name("md5sum.txt")
+out.write("%s %s\n" % (hexdigest, file_name))
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/pgp-survey-import b/crunch_scripts/pgp-survey-import
new file mode 100755 (executable)
index 0000000..4e11812
--- /dev/null
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+
+import arvados
+import string
+import json
+import UserDict
+import sys
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_job_input = this_job['script_parameters']['input']
+
+out = arvados.CollectionWriter()
+out.set_current_file_name("arvados_objects.json")
+out.write("[\n")
+separator = ""
+
+traits = {}
+done_bytes = 0
+done_ratio = 0
+for input_file in arvados.CollectionReader(this_job_input).all_files():
+    for line_number, line in enumerate(input_file.readlines()):
+
+        done_bytes += len(line)
+        new_done_ratio = 1.0 * done_bytes / input_file.size()
+        if line_number == 2 or new_done_ratio - done_ratio > 0.05:
+            sys.stderr.write("progress: %d%% after %d lines\n" % (int(done_ratio * 100), line_number+1))
+            done_ratio = new_done_ratio
+
+        words = string.split(string.strip(line), "\t")
+        if line_number == 0:
+            headings = words
+            for t in arvados.api('v1').traits().list(
+                where={'name':words},
+                limit=1000
+                ).execute()['items']:
+                traits[t['name']] = t
+            for i, trait_name in enumerate(words[3:], start=3):
+                # find or create trait
+                if trait_name not in traits:
+                    traits_match = arvados.api('v1').traits().list(
+                        where={'name':trait_name}
+                        ).execute()['items']
+                    if len(traits_match) > 0:
+                        traits[trait_name] = traits_match[0]
+                    else:
+                        traits[trait_name] = arvados.api('v1').traits().create(
+                            trait={'name':trait_name}).execute()
+                out.write(separator)
+                out.write(json.dumps(traits[trait_name]))
+                separator = ",\n"
+        else:
+            huID_links_match = arvados.api('v1').links().list(
+                where={'link_class':'identifier','name':words[0]}
+                ).execute()['items']
+            if len(huID_links_match) > 0:
+                human_uuid = huID_links_match[0]['head_uuid']
+            else:
+                human = arvados.api('v1').humans().create(
+                    body={}
+                    ).execute()
+                huID_link = arvados.api('v1').links().create(
+                    body={
+                        'link_class':'identifier',
+                        'name':words[0],
+                        'head_kind':'arvados#human',
+                        'head_uuid':human['uuid']
+                        }
+                    ).execute()
+                human_uuid = human['uuid']
+            human_trait = {}
+            for t in arvados.api('v1').links().list(
+                limit=10000,
+                where={
+                    'tail_uuid':human_uuid,
+                    'tail_kind':'arvados#human',
+                    'head_kind':'arvados#trait',
+                    'link_class':'human_trait',
+                    'name':'pgp-survey-response'
+                    }
+                ).execute()['items']:
+                human_trait[t['head_uuid']] = t
+            for i, trait_value in enumerate(words[3:], start=3):
+                trait_uuid = traits[headings[i]]['uuid']
+                if trait_uuid in human_trait:
+                    trait_link = human_trait[trait_uuid]
+                    if trait_link['properties']['value'] != trait_value:
+                        # update database value to match survey response
+                        trait_link['properties']['value'] = trait_value
+                        arvados.api('v1').links().update(
+                            uuid=trait_link['uuid'],
+                            body={'properties':trait_link['properties']}
+                            ).execute()
+                    out.write(",\n")
+                    out.write(json.dumps(trait_link))
+                elif trait_value == '':
+                    # nothing in database, nothing in input
+                    pass
+                else:
+                    trait_link = {
+                        'tail_uuid':human_uuid,
+                        'tail_kind':'arvados#human',
+                        'head_uuid':traits[headings[i]]['uuid'],
+                        'head_kind':'arvados#trait',
+                        'link_class':'human_trait',
+                        'name':'pgp-survey-response',
+                        'properties': { 'value': trait_value }
+                        }
+                    arvados.api('v1').links().create(
+                        body=trait_link
+                        ).execute()
+                    out.write(",\n")
+                    out.write(json.dumps(trait_link))
+
+out.write("\n]\n")
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/pgp-survey-parse b/crunch_scripts/pgp-survey-parse
new file mode 100755 (executable)
index 0000000..0633ea4
--- /dev/null
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+import arvados
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+parser_path = arvados.util.git_checkout(
+    url = this_job['script_parameters']['parser_url'],
+    version = this_job['script_parameters']['parser_version'],
+    path = 'parser')
+
+stdoutdata, stderrdata = arvados.util.run_command(
+    ["python", "demo.py"],
+    cwd=parser_path)
+
+out = arvados.CollectionWriter()
+out.write(stdoutdata)
+out.set_current_file_name('participant_traits.tsv')
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/picard-gatk2-prep b/crunch_scripts/picard-gatk2-prep
new file mode 100755 (executable)
index 0000000..73b143a
--- /dev/null
@@ -0,0 +1,208 @@
+#!/usr/bin/env python
+
+import arvados
+import os
+import re
+import sys
+import subprocess
+import arvados_picard
+from arvados_ipc import *
+
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference'],
+    path = 'reference',
+    decompress = True)
+ref_fasta_files = [os.path.join(ref_dir, f)
+                   for f in os.listdir(ref_dir)
+                   if re.search(r'\.fasta(\.gz)?$', f)]
+input_collection = this_task['parameters']['input']
+
+for s in arvados.CollectionReader(input_collection).all_streams():
+    for f in s.all_files():
+        input_stream_name = s.name()
+        input_file_name = f.name()
+        break
+
+# Unfortunately, picard FixMateInformation cannot read from a pipe. We
+# must copy the input to a temporary file before running picard.
+input_bam_path = os.path.join(this_task.tmpdir, input_file_name)
+with open(input_bam_path, 'wb') as bam:
+    for s in arvados.CollectionReader(input_collection).all_streams():
+        for f in s.all_files():
+            for s in f.readall():
+                bam.write(s)
+
+children = {}
+pipes = {}
+
+pipe_setup(pipes, 'fixmate')
+if 0==named_fork(children, 'fixmate'):
+    pipe_closeallbut(pipes, ('fixmate', 'w'))
+    arvados_picard.run(
+        'FixMateInformation',
+        params={
+            'i': input_bam_path,
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'so': 'coordinate',
+            'validation_stringency': 'LENIENT',
+            'compression_level': 0
+            },
+        stdout=os.fdopen(pipes['fixmate','w'], 'wb', 2**20))
+    os._exit(0)
+os.close(pipes.pop(('fixmate','w'), None))
+
+pipe_setup(pipes, 'sortsam')
+if 0==named_fork(children, 'sortsam'):
+    pipe_closeallbut(pipes, ('fixmate', 'r'), ('sortsam', 'w'))
+    arvados_picard.run(
+        'SortSam',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'so': 'coordinate',
+            'validation_stringency': 'LENIENT',
+            'compression_level': 0
+            },
+        stdin=os.fdopen(pipes['fixmate','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['sortsam','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'reordersam')
+if 0==named_fork(children, 'reordersam'):
+    pipe_closeallbut(pipes, ('sortsam', 'r'), ('reordersam', 'w'))
+    arvados_picard.run(
+        'ReorderSam',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'reference': ref_fasta_files[0],
+            'quiet': 'true',
+            'validation_stringency': 'LENIENT',
+            'compression_level': 0
+            },
+        stdin=os.fdopen(pipes['sortsam','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['reordersam','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'addrg')
+if 0==named_fork(children, 'addrg'):
+    pipe_closeallbut(pipes, ('reordersam', 'r'), ('addrg', 'w'))
+    arvados_picard.run(
+        'AddOrReplaceReadGroups',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'rglb': this_job['script_parameters'].get('rglb', 0),
+            'rgpl': this_job['script_parameters'].get('rgpl', 'illumina'),
+            'rgpu': this_job['script_parameters'].get('rgpu', 0),
+            'rgsm': this_job['script_parameters'].get('rgsm', 0),
+            'validation_stringency': 'LENIENT'
+            },
+        stdin=os.fdopen(pipes['reordersam','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['addrg','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'bammanifest')
+pipe_setup(pipes, 'bam')
+pipe_setup(pipes, 'casm_in')
+if 0==named_fork(children, 'bammanifest'):
+    pipe_closeallbut(pipes,
+                     ('addrg', 'r'),
+                     ('bammanifest', 'w'),
+                     ('bam', 'w'),
+                     ('casm_in', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(input_file_name)
+    while True:
+        buf = os.read(pipes['addrg','r'], 2**20)
+        if len(buf) == 0:
+            break
+        os.write(pipes['bam','w'], buf)
+        os.write(pipes['casm_in','w'], buf)
+        out.write(buf)
+    os.write(pipes['bammanifest','w'], out.manifest_text())
+    os.close(pipes['bammanifest','w'])
+    os._exit(0)
+
+pipe_setup(pipes, 'casm')
+if 0 == named_fork(children, 'casm'):
+    pipe_closeallbut(pipes, ('casm_in', 'r'), ('casm', 'w'))
+    arvados_picard.run(
+        'CollectAlignmentSummaryMetrics',
+        params={
+            'input': '/dev/fd/' + str(pipes['casm_in','r']),
+            'output': '/dev/fd/' + str(pipes['casm','w']),
+            'reference_sequence': ref_fasta_files[0],
+            'validation_stringency': 'LENIENT',
+            },
+        close_fds=False)
+    os._exit(0)
+
+pipe_setup(pipes, 'index')
+if 0==named_fork(children, 'index'):
+    pipe_closeallbut(pipes, ('bam', 'r'), ('index', 'w'))
+    arvados_picard.run(
+        'BuildBamIndex',
+        params={
+            'i': '/dev/stdin',
+            'o': '/dev/stdout',
+            'quiet': 'true',
+            'validation_stringency': 'LENIENT'
+            },
+        stdin=os.fdopen(pipes['bam','r'], 'rb', 2**20),
+        stdout=os.fdopen(pipes['index','w'], 'wb', 2**20))
+    os._exit(0)
+
+pipe_setup(pipes, 'indexmanifest')
+if 0==named_fork(children, 'indexmanifest'):
+    pipe_closeallbut(pipes, ('index', 'r'), ('indexmanifest', 'w'))
+    out = arvados.CollectionWriter()
+    out.start_new_stream(input_stream_name)
+    out.start_new_file(re.sub('\.bam$', '.bai', input_file_name))
+    while True:
+        buf = os.read(pipes['index','r'], 2**20)
+        if len(buf) == 0:
+            break
+        out.write(buf)
+    os.write(pipes['indexmanifest','w'], out.manifest_text())
+    os.close(pipes['indexmanifest','w'])
+    os._exit(0)
+
+pipe_closeallbut(pipes,
+                 ('bammanifest', 'r'),
+                 ('indexmanifest', 'r'),
+                 ('casm', 'r'))
+
+outmanifest = ''
+
+for which in ['bammanifest', 'indexmanifest']:
+    with os.fdopen(pipes[which,'r'], 'rb', 2**20) as f:
+        while True:
+            buf = f.read()
+            if buf == '':
+                break
+            outmanifest += buf
+
+casm_out = arvados.CollectionWriter()
+casm_out.start_new_stream(input_stream_name)
+casm_out.start_new_file(input_file_name + '.casm.tsv')
+casm_out.write(os.fdopen(pipes.pop(('casm','r'))))
+
+outmanifest += casm_out.manifest_text()
+
+all_ok = True
+for (childname, pid) in children.items():
+    all_ok = all_ok and waitpid_and_check_exit(pid, childname)
+
+if all_ok:
+    this_task.set_output(outmanifest)
+else:
+    sys.exit(1)
diff --git a/crunch_scripts/pyrtg.py b/crunch_scripts/pyrtg.py
new file mode 100644 (file)
index 0000000..536bacf
--- /dev/null
@@ -0,0 +1,71 @@
+import arvados
+import re
+import os
+import sys
+
+rtg_install_path = None
+
+def setup():
+    global rtg_install_path
+    if rtg_install_path:
+        return rtg_install_path
+    rtg_path = arvados.util.zipball_extract(
+        zipball = arvados.current_job()['script_parameters']['rtg_binary_zip'],
+        path = 'rtg')
+    rtg_license_path = arvados.util.collection_extract(
+        collection = arvados.current_job()['script_parameters']['rtg_license'],
+        path = 'license',
+        decompress = False)
+
+    # symlink to rtg-license.txt
+    license_txt_path = os.path.join(rtg_license_path, 'rtg-license.txt')
+    try:
+        os.symlink(license_txt_path, os.path.join(rtg_path,'rtg-license.txt'))
+    except OSError:
+        if not os.path.exists(os.path.join(rtg_path,'rtg-license.txt')):
+            os.symlink(license_txt_path, os.path.join(rtg_path,'rtg-license.txt'))
+
+    rtg_install_path = rtg_path
+    return rtg_path
+
+def run_rtg(command, output_dir, command_args, **kwargs):
+    global rtg_install_path
+    execargs = [os.path.join(rtg_install_path, 'rtg'),
+                command,
+                '-o', output_dir]
+    execargs += command_args
+    sys.stderr.write("run_rtg: exec %s\n" % str(execargs))
+    arvados.util.run_command(
+        execargs,
+        cwd=arvados.current_task().tmpdir,
+        stderr=sys.stderr,
+        stdout=sys.stderr)
+
+    # Exit status cannot be trusted in rtg 1.1.1.
+    assert_done(output_dir)
+
+    # Copy log files to stderr and delete them to avoid storing them
+    # in Keep with the output data.
+    for dirent in arvados.util.listdir_recursive(output_dir):
+        if is_log_file(dirent):
+            log_file = os.path.join(output_dir, dirent)
+            sys.stderr.write(' '.join(['==>', dirent, '<==\n']))
+            with open(log_file, 'rb') as f:
+                while True:
+                    buf = f.read(2**20)
+                    if len(buf) == 0:
+                        break
+                    sys.stderr.write(buf)
+            sys.stderr.write('\n') # in case log does not end in newline
+            os.unlink(log_file)
+
+def assert_done(output_dir):
+    # Sanity-check exit code.
+    done_file = os.path.join(output_dir, 'done')
+    if not os.path.exists(done_file):
+        raise Exception("rtg exited 0 but %s does not exist. abort.\n" % done_file)
+
+def is_log_file(filename):
+    return re.search(r'^(.*/)?(progress|done|\S+.log)$', filename)
+
+setup()
diff --git a/crunch_scripts/rtg-fasta2sdf b/crunch_scripts/rtg-fasta2sdf
new file mode 100755 (executable)
index 0000000..732cf8b
--- /dev/null
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+fasta_path = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'fasta',
+    decompress = False)
+fasta_files = filter(lambda f: f != '.locator', os.listdir(fasta_path))
+out_dir = os.path.join(arvados.current_task().tmpdir, 'ref-sdf')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+
+pyrtg.run_rtg('format', out_dir,
+              map(lambda f: os.path.join(fasta_path, f), fasta_files))
+
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/rtg-fastq2sdf b/crunch_scripts/rtg-fastq2sdf
new file mode 100755 (executable)
index 0000000..25de4c1
--- /dev/null
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+fastq_path = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'fastq')
+fastq_files = filter(lambda f: f != '.locator', os.listdir(fastq_path))
+tmp_dir_base = os.path.join(arvados.current_task().tmpdir, 'tmp')
+out_dir = os.path.join(arvados.current_task().tmpdir, 'reads')
+
+arvados.util.run_command(['rm', '-rf', tmp_dir_base], stderr=sys.stderr)
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+os.mkdir(tmp_dir_base)
+
+# convert fastq to sdf
+tmp_dirs = []
+for leftarm in fastq_files:
+    if re.search(r'_1.f(ast)?q(.gz)?$', leftarm):
+        rightarm = re.sub(r'_1(.f(ast)?q(.gz)?)$', '_2\\1', leftarm)
+        if rightarm in fastq_files:
+            tmp_dirs += ['%s/%08d' % (tmp_dir_base, len(tmp_dirs))]
+            pyrtg.run_rtg('format', tmp_dirs[-1],
+                          ['-f', 'fastq',
+                           '-q', 'sanger',
+                           '-l', os.path.join(fastq_path, leftarm),
+                           '-r', os.path.join(fastq_path, rightarm)])
+
+# split sdf
+pyrtg.run_rtg('sdfsplit', out_dir,
+              ['-n', '1500000'] + tmp_dirs)
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=1)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/rtg-map b/crunch_scripts/rtg-map
new file mode 100755 (executable)
index 0000000..daf46f3
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+arvados.job_setup.one_task_per_input_stream(if_sequence=0, and_end_task=True)
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+in_dir = os.path.join(this_task.tmpdir, 'input')
+arvados.util.run_command(['rm', '-rf', in_dir], stderr=sys.stderr)
+in_dir = arvados.util.stream_extract(
+    stream = arvados.StreamReader(this_task['parameters']['input']),
+    path = in_dir,
+    decompress = False)
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference'],
+    path = 'reference',
+    decompress = False)
+
+out_dir = os.path.join(arvados.current_task().tmpdir, 'out')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+
+# map reads
+pyrtg.run_rtg('map', out_dir,
+              ['-i', in_dir,
+               '-t', ref_dir,
+               '-a', '2',
+               '-b', '1',
+               '--sam-rg', '@RG\\tID:NA\\tSM:NA\\tPL:ILLUMINA'])
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, this_task['parameters']['input'][0], 0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/rtg-snp b/crunch_scripts/rtg-snp
new file mode 100755 (executable)
index 0000000..67c4ac7
--- /dev/null
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+import arvados
+import os
+import re
+import sys
+import pyrtg
+
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+ref_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['reference'],
+    path = 'reference',
+    decompress = False)
+input_dir = arvados.util.collection_extract(
+    collection = this_job['script_parameters']['input'],
+    path = 'input')
+bam_files = map(lambda f: os.path.join(input_dir, f),
+                filter(lambda f: re.search(r'^(.*/)?alignments.bam$', f),
+                       arvados.util.listdir_recursive(input_dir)))
+out_dir = os.path.join(arvados.current_task().tmpdir, 'out')
+arvados.util.run_command(['rm', '-rf', out_dir], stderr=sys.stderr)
+
+# call sequence variants
+pyrtg.run_rtg('snp', out_dir,
+              ['-t', ref_dir] + bam_files)
+
+# store output
+out = arvados.CollectionWriter()
+out.write_directory_tree(out_dir, max_manifest_depth=0)
+this_task.set_output(out.finish())
diff --git a/crunch_scripts/run-command b/crunch_scripts/run-command
new file mode 100755 (executable)
index 0000000..c07debd
--- /dev/null
@@ -0,0 +1,447 @@
+#!/usr/bin/env python
+
+import logging
+
+logger = logging.getLogger('run-command')
+log_handler = logging.StreamHandler()
+log_handler.setFormatter(logging.Formatter("run-command: %(message)s"))
+logger.addHandler(log_handler)
+logger.setLevel(logging.INFO)
+
+import arvados
+import re
+import os
+import subprocess
+import sys
+import shutil
+import crunchutil.subst as subst
+import time
+import arvados.commands.put as put
+import signal
+import stat
+import copy
+import traceback
+import pprint
+import multiprocessing
+import crunchutil.robust_put as robust_put
+import crunchutil.vwd as vwd
+import argparse
+import json
+import tempfile
+import errno
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--dry-run', action='store_true')
+parser.add_argument('--script-parameters', type=str, default="{}")
+args = parser.parse_args()
+
+os.umask(0077)
+
+if not args.dry_run:
+    api = arvados.api('v1')
+    t = arvados.current_task().tmpdir
+    os.chdir(arvados.current_task().tmpdir)
+    os.mkdir("tmpdir")
+    os.mkdir("output")
+
+    os.chdir("output")
+
+    outdir = os.getcwd()
+
+    taskp = None
+    jobp = arvados.current_job()['script_parameters']
+    if len(arvados.current_task()['parameters']) > 0:
+        taskp = arvados.current_task()['parameters']
+else:
+    outdir = "/tmp"
+    jobp = json.loads(args.script_parameters)
+    os.environ['JOB_UUID'] = 'zzzzz-8i9sb-1234567890abcde'
+    os.environ['TASK_UUID'] = 'zzzzz-ot0gb-1234567890abcde'
+    os.environ['CRUNCH_SRC'] = '/tmp/crunche-src'
+    if 'TASK_KEEPMOUNT' not in os.environ:
+        os.environ['TASK_KEEPMOUNT'] = '/keep'
+
+links = []
+
+def sub_tmpdir(v):
+    return os.path.join(arvados.current_task().tmpdir, 'tmpdir')
+
+def sub_outdir(v):
+    return outdir
+
+def sub_cores(v):
+     return str(multiprocessing.cpu_count())
+
+def sub_jobid(v):
+     return os.environ['JOB_UUID']
+
+def sub_taskid(v):
+     return os.environ['TASK_UUID']
+
+def sub_jobsrc(v):
+     return os.environ['CRUNCH_SRC']
+
+subst.default_subs["task.tmpdir"] = sub_tmpdir
+subst.default_subs["task.outdir"] = sub_outdir
+subst.default_subs["job.srcdir"] = sub_jobsrc
+subst.default_subs["node.cores"] = sub_cores
+subst.default_subs["job.uuid"] = sub_jobid
+subst.default_subs["task.uuid"] = sub_taskid
+
+class SigHandler(object):
+    def __init__(self):
+        self.sig = None
+
+    def send_signal(self, subprocesses, signum):
+        for sp in subprocesses:
+            sp.send_signal(signum)
+        self.sig = signum
+
+# http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
+def flatten(l, ltypes=(list, tuple)):
+    ltype = type(l)
+    l = list(l)
+    i = 0
+    while i < len(l):
+        while isinstance(l[i], ltypes):
+            if not l[i]:
+                l.pop(i)
+                i -= 1
+                break
+            else:
+                l[i:i + 1] = l[i]
+        i += 1
+    return ltype(l)
+
+def add_to_group(gr, match):
+    m = match.groups()
+    if m not in gr:
+        gr[m] = []
+    gr[m].append(match.group(0))
+
+class EvaluationError(Exception):
+    pass
+
+# Return the name of variable ('var') that will take on each value in 'items'
+# when performing an inner substitution
+def var_items(p, c, key):
+    if key not in c:
+        raise EvaluationError("'%s' was expected in 'p' but is missing" % key)
+
+    if "var" in c:
+        if not isinstance(c["var"], basestring):
+            raise EvaluationError("Value of 'var' must be a string")
+        # Var specifies the variable name for inner parameter substitution
+        return (c["var"], get_items(p, c[key]))
+    else:
+        # The component function ('key') value is a list, so return the list
+        # directly with no parameter selected.
+        if isinstance(c[key], list):
+            return (None, get_items(p, c[key]))
+        elif isinstance(c[key], basestring):
+            # check if c[key] is a string that looks like a parameter
+            m = re.match("^\$\((.*)\)$", c[key])
+            if m and m.group(1) in p:
+                return (m.group(1), get_items(p, c[key]))
+            else:
+                # backwards compatible, foreach specifies bare parameter name to use
+                return (c[key], get_items(p, p[c[key]]))
+        else:
+            raise EvaluationError("Value of '%s' must be a string or list" % key)
+
+# "p" is the parameter scope, "c" is the item to be expanded.
+# If "c" is a dict, apply function expansion.
+# If "c" is a list, recursively expand each item and return a new list.
+# If "c" is a string, apply parameter substitution
+def expand_item(p, c):
+    if isinstance(c, dict):
+        if "foreach" in c and "command" in c:
+            # Expand a command template for each item in the specified user
+            # parameter
+            var, items = var_items(p, c, "foreach")
+            if var is None:
+                raise EvaluationError("Must specify 'var' in foreach")
+            r = []
+            for i in items:
+                params = copy.copy(p)
+                params[var] = i
+                r.append(expand_item(params, c["command"]))
+            return r
+        elif "list" in c and "index" in c and "command" in c:
+            # extract a single item from a list
+            var, items = var_items(p, c, "list")
+            if var is None:
+                raise EvaluationError("Must specify 'var' in list")
+            params = copy.copy(p)
+            params[var] = items[int(c["index"])]
+            return expand_item(params, c["command"])
+        elif "regex" in c:
+            pattern = re.compile(c["regex"])
+            if "filter" in c:
+                # filter list so that it only includes items that match a
+                # regular expression
+                _, items = var_items(p, c, "filter")
+                return [i for i in items if pattern.match(i)]
+            elif "group" in c:
+                # generate a list of lists, where items are grouped on common
+                # subexpression match
+                _, items = var_items(p, c, "group")
+                groups = {}
+                for i in items:
+                    match = pattern.match(i)
+                    if match:
+                        add_to_group(groups, match)
+                return [groups[k] for k in groups]
+            elif "extract" in c:
+                # generate a list of lists, where items are split by
+                # subexpression match
+                _, items = var_items(p, c, "extract")
+                r = []
+                for i in items:
+                    match = pattern.match(i)
+                    if match:
+                        r.append(list(match.groups()))
+                return r
+        elif "batch" in c and "size" in c:
+            # generate a list of lists, where items are split into a batch size
+            _, items = var_items(p, c, "batch")
+            sz = int(c["size"])
+            r = []
+            for j in xrange(0, len(items), sz):
+                r.append(items[j:j+sz])
+            return r
+        raise EvaluationError("Missing valid list context function")
+    elif isinstance(c, list):
+        return [expand_item(p, arg) for arg in c]
+    elif isinstance(c, basestring):
+        m = re.match("^\$\((.*)\)$", c)
+        if m and m.group(1) in p:
+            return expand_item(p, p[m.group(1)])
+        else:
+            return subst.do_substitution(p, c)
+    else:
+        raise EvaluationError("expand_item() unexpected parameter type %s" % type(c))
+
+# Evaluate in a list context
+# "p" is the parameter scope, "value" will be evaluated
+# if "value" is a list after expansion, return that
+# if "value" is a path to a directory, return a list consisting of each entry in the directory
+# if "value" is a path to a file, return a list consisting of each line of the file
+def get_items(p, value):
+    value = expand_item(p, value)
+    if isinstance(value, list):
+        return value
+    elif isinstance(value, basestring):
+        mode = os.stat(value).st_mode
+        prefix = value[len(os.environ['TASK_KEEPMOUNT'])+1:]
+        if mode is not None:
+            if stat.S_ISDIR(mode):
+                items = [os.path.join(value, l) for l in os.listdir(value)]
+            elif stat.S_ISREG(mode):
+                with open(value) as f:
+                    items = [line.rstrip("\r\n") for line in f]
+            return items
+    raise EvaluationError("get_items did not yield a list")
+
+stdoutname = None
+stdoutfile = None
+stdinname = None
+stdinfile = None
+
+# Construct the cross product of all values of each variable listed in fvars
+def recursive_foreach(params, fvars):
+    var = fvars[0]
+    fvars = fvars[1:]
+    items = get_items(params, params[var])
+    logger.info("parallelizing on %s with items %s" % (var, items))
+    if items is not None:
+        for i in items:
+            params = copy.copy(params)
+            params[var] = i
+            if len(fvars) > 0:
+                recursive_foreach(params, fvars)
+            else:
+                if not args.dry_run:
+                    arvados.api().job_tasks().create(body={
+                        'job_uuid': arvados.current_job()['uuid'],
+                        'created_by_job_task_uuid': arvados.current_task()['uuid'],
+                        'sequence': 1,
+                        'parameters': params
+                    }).execute()
+                else:
+                    if isinstance(params["command"][0], list):
+                        for c in params["command"]:
+                            logger.info(flatten(expand_item(params, c)))
+                    else:
+                        logger.info(flatten(expand_item(params, params["command"])))
+    else:
+        logger.error("parameter %s with value %s in task.foreach yielded no items" % (var, params[var]))
+        sys.exit(1)
+
+try:
+    if "task.foreach" in jobp:
+        if args.dry_run or arvados.current_task()['sequence'] == 0:
+            # This is the first task to start the other tasks and exit
+            fvars = jobp["task.foreach"]
+            if isinstance(fvars, basestring):
+                fvars = [fvars]
+            if not isinstance(fvars, list) or len(fvars) == 0:
+                logger.error("value of task.foreach must be a string or non-empty list")
+                sys.exit(1)
+            recursive_foreach(jobp, jobp["task.foreach"])
+            if not args.dry_run:
+                if "task.vwd" in jobp:
+                    # Set output of the first task to the base vwd collection so it
+                    # will be merged with output fragments from the other tasks by
+                    # crunch.
+                    arvados.current_task().set_output(subst.do_substitution(jobp, jobp["task.vwd"]))
+                else:
+                    arvados.current_task().set_output(None)
+            sys.exit(0)
+    else:
+        # This is the only task so taskp/jobp are the same
+        taskp = jobp
+except Exception as e:
+    logger.exception("caught exception")
+    logger.error("job parameters were:")
+    logger.error(pprint.pformat(jobp))
+    sys.exit(1)
+
+try:
+    if not args.dry_run:
+        if "task.vwd" in taskp:
+            # Populate output directory with symlinks to files in collection
+            vwd.checkout(subst.do_substitution(taskp, taskp["task.vwd"]), outdir)
+
+        if "task.cwd" in taskp:
+            os.chdir(subst.do_substitution(taskp, taskp["task.cwd"]))
+
+    cmd = []
+    if isinstance(taskp["command"][0], list):
+        for c in taskp["command"]:
+            cmd.append(flatten(expand_item(taskp, c)))
+    else:
+        cmd.append(flatten(expand_item(taskp, taskp["command"])))
+
+    if "task.stdin" in taskp:
+        stdinname = subst.do_substitution(taskp, taskp["task.stdin"])
+        if not args.dry_run:
+            stdinfile = open(stdinname, "rb")
+
+    if "task.stdout" in taskp:
+        stdoutname = subst.do_substitution(taskp, taskp["task.stdout"])
+        if not args.dry_run:
+            stdoutfile = open(stdoutname, "wb")
+
+    logger.info("{}{}{}".format(' | '.join([' '.join(c) for c in cmd]), (" < " + stdinname) if stdinname is not None else "", (" > " + stdoutname) if stdoutname is not None else ""))
+
+    if args.dry_run:
+        sys.exit(0)
+except subst.SubstitutionError as e:
+    logger.error(str(e))
+    logger.error("task parameters were:")
+    logger.error(pprint.pformat(taskp))
+    sys.exit(1)
+except Exception as e:
+    logger.exception("caught exception")
+    logger.error("task parameters were:")
+    logger.error(pprint.pformat(taskp))
+    sys.exit(1)
+
+# rcode holds the return codes produced by each subprocess
+rcode = {}
+try:
+    subprocesses = []
+    close_streams = []
+    if stdinfile:
+        close_streams.append(stdinfile)
+    next_stdin = stdinfile
+
+    for i in xrange(len(cmd)):
+        if i == len(cmd)-1:
+            # this is the last command in the pipeline, so its stdout should go to stdoutfile
+            next_stdout = stdoutfile
+        else:
+            # this is an intermediate command in the pipeline, so its stdout should go to a pipe
+            next_stdout = subprocess.PIPE
+
+        sp = subprocess.Popen(cmd[i], shell=False, stdin=next_stdin, stdout=next_stdout)
+
+        # Need to close the FDs on our side so that subcommands will get SIGPIPE if the
+        # consuming process ends prematurely.
+        if sp.stdout:
+            close_streams.append(sp.stdout)
+
+        # Send this processes's stdout to to the next process's stdin
+        next_stdin = sp.stdout
+
+        subprocesses.append(sp)
+
+    # File descriptors have been handed off to the subprocesses, so close them here.
+    for s in close_streams:
+        s.close()
+
+    # Set up signal handling
+    sig = SigHandler()
+
+    # Forward terminate signals to the subprocesses.
+    signal.signal(signal.SIGINT, lambda signum, frame: sig.send_signal(subprocesses, signum))
+    signal.signal(signal.SIGTERM, lambda signum, frame: sig.send_signal(subprocesses, signum))
+    signal.signal(signal.SIGQUIT, lambda signum, frame: sig.send_signal(subprocesses, signum))
+
+    active = 1
+    pids = set([s.pid for s in subprocesses])
+    while len(pids) > 0:
+        (pid, status) = os.wait()
+        pids.discard(pid)
+        if not taskp.get("task.ignore_rcode"):
+            rcode[pid] = (status >> 8)
+        else:
+            rcode[pid] = 0
+
+    if sig.sig is not None:
+        logger.critical("terminating on signal %s" % sig.sig)
+        sys.exit(2)
+    else:
+        for i in xrange(len(cmd)):
+            r = rcode[subprocesses[i].pid]
+            logger.info("%s completed with exit code %i (%s)" % (cmd[i][0], r, "success" if r == 0 else "failed"))
+
+except Exception as e:
+    logger.exception("caught exception")
+
+# restore default signal handlers.
+signal.signal(signal.SIGINT, signal.SIG_DFL)
+signal.signal(signal.SIGTERM, signal.SIG_DFL)
+signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+
+for l in links:
+    os.unlink(l)
+
+logger.info("the following output files will be saved to keep:")
+
+subprocess.call(["find", ".", "-type", "f", "-printf", "run-command: %12.12s %h/%f\\n"], stdout=sys.stderr)
+
+logger.info("start writing output to keep")
+
+if "task.vwd" in taskp:
+    if "task.foreach" in jobp:
+        # This is a subtask, so don't merge with the original collection, that will happen at the end
+        outcollection = vwd.checkin(subst.do_substitution(taskp, taskp["task.vwd"]), outdir, merge=False).manifest_text()
+    else:
+        # Just a single task, so do merge with the original collection
+        outcollection = vwd.checkin(subst.do_substitution(taskp, taskp["task.vwd"]), outdir, merge=True).manifest_text()
+else:
+    outcollection = robust_put.upload(outdir, logger)
+
+# Success if we ran any subprocess, and they all exited 0.
+success = rcode and all(status == 0 for status in rcode.itervalues())
+
+api.job_tasks().update(uuid=arvados.current_task()['uuid'],
+                                     body={
+                                         'output': outcollection,
+                                         'success': success,
+                                         'progress':1.0
+                                     }).execute()
+
+sys.exit(0 if success else 1)
diff --git a/crunch_scripts/split-fastq.py b/crunch_scripts/split-fastq.py
new file mode 100755 (executable)
index 0000000..17aabf2
--- /dev/null
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+
+import arvados
+import re
+import hashlib
+import string
+
+api = arvados.api('v1')
+
+piece = 0
+manifest_text = ""
+
+# Look for paired reads
+
+inp = arvados.CollectionReader(arvados.getjobparam('reads'))
+
+manifest_list = []
+
+chunking = False #arvados.getjobparam('chunking')
+
+def nextline(reader, start):
+    n = -1
+    while True:
+        r = reader.readfrom(start, 128)
+        if r == '':
+            break
+        n = string.find(r, "\n")
+        if n > -1:
+            break
+        else:
+            start += 128
+    return n
+
+# Chunk a fastq into approximately 64 MiB chunks.  Requires that the input data
+# be decompressed ahead of time, such as using decompress-all.py.  Generates a
+# new manifest, but doesn't actually move any data around.  Handles paired
+# reads by ensuring that each chunk of a pair gets the same number of records.
+#
+# This works, but in practice is so slow that potential gains in alignment
+# performance are lost in the prep time, which is why it is currently disabled.
+#
+# A better algorithm would seek to a file position a bit less than the desired
+# chunk size and then scan ahead for the next record, making sure that record
+# was matched by the read pair.
+def splitfastq(p):
+    for i in xrange(0, len(p)):
+        p[i]["start"] = 0
+        p[i]["end"] = 0
+
+    count = 0
+    recordsize = [0, 0]
+
+    global piece
+    finish = False
+    while not finish:
+        for i in xrange(0, len(p)):
+            recordsize[i] = 0
+
+        # read next 4 lines
+        for i in xrange(0, len(p)):
+            for ln in xrange(0, 4):
+                r = nextline(p[i]["reader"], p[i]["end"]+recordsize[i])
+                if r == -1:
+                    finish = True
+                    break
+                recordsize[i] += (r+1)
+
+        splitnow = finish
+        for i in xrange(0, len(p)):
+            if ((p[i]["end"] - p[i]["start"]) + recordsize[i]) >= (64*1024*1024):
+                splitnow = True
+
+        if splitnow:
+            for i in xrange(0, len(p)):
+                global manifest_list
+                print >>sys.stderr, "Finish piece ./_%s/%s (%s %s)" % (piece, p[i]["reader"].name(), p[i]["start"], p[i]["end"])
+                manifest = []
+                manifest.extend(["./_" + str(piece)])
+                manifest.extend([d[arvados.LOCATOR] for d in p[i]["reader"]._stream._data_locators])
+                manifest.extend(["{}:{}:{}".format(seg[arvados.LOCATOR]+seg[arvados.OFFSET], seg[arvados.SEGMENTSIZE], p[i]["reader"].name().replace(' ', '\\040')) for seg in arvados.locators_and_ranges(p[i]["reader"].segments, p[i]["start"], p[i]["end"] - p[i]["start"])])
+                manifest_list.append(manifest)
+                p[i]["start"] = p[i]["end"]
+            piece += 1
+        else:
+            for i in xrange(0, len(p)):
+                p[i]["end"] += recordsize[i]
+            count += 1
+            if count % 10000 == 0:
+                print >>sys.stderr, "Record %s at %s" % (count, p[i]["end"])
+
+prog = re.compile(r'(.*?)(_[12])?\.fastq(\.gz)?$')
+
+# Look for fastq files
+for s in inp.all_streams():
+    for f in s.all_files():
+        name_pieces = prog.match(f.name())
+        if name_pieces is not None:
+            if s.name() != ".":
+                # The downstream tool (run-command) only iterates over the top
+                # level of directories so if there are fastq files in
+                # directories in the input, the choice is either to forget
+                # there are directories (which might lead to name conflicts) or
+                # just fail.
+                print >>sys.stderr, "fastq must be at the root of the collection"
+                sys.exit(1)
+
+            p = None
+            if name_pieces.group(2) is not None:
+                if name_pieces.group(2) == "_1":
+                    p = [{}, {}]
+                    p[0]["reader"] = s.files()[name_pieces.group(0)]
+                    p[1]["reader"] = s.files()[name_pieces.group(1) + "_2.fastq" + (name_pieces.group(3) if name_pieces.group(3) else '')]
+            else:
+                p = [{}]
+                p[0]["reader"] = s.files()[name_pieces.group(0)]
+
+            if p is not None:
+                if chunking:
+                    splitfastq(p)
+                else:
+                    for i in xrange(0, len(p)):
+                        m = p[i]["reader"].as_manifest().split()
+                        m[0] = "./_" + str(piece)
+                        manifest_list.append(m)
+                    piece += 1
+
+manifest_text = "\n".join(" ".join(m) for m in manifest_list) + "\n"
+
+arvados.current_task().set_output(manifest_text)
diff --git a/doc/Gemfile b/doc/Gemfile
new file mode 100644 (file)
index 0000000..9ee5f58
--- /dev/null
@@ -0,0 +1,6 @@
+source 'https://rubygems.org'
+
+gem 'zenweb'
+gem 'liquid'
+gem 'RedCloth'
+gem 'colorize'
diff --git a/doc/Gemfile.lock b/doc/Gemfile.lock
new file mode 100644 (file)
index 0000000..344a0a8
--- /dev/null
@@ -0,0 +1,34 @@
+GEM
+  remote: https://rubygems.org/
+  specs:
+    RedCloth (4.2.9)
+    coderay (1.1.0)
+    colorize (0.6.0)
+    kramdown (1.3.1)
+    less (1.2.21)
+      mutter (>= 0.4.2)
+      treetop (>= 1.4.2)
+    liquid (2.6.1)
+    makerakeworkwell (1.0.3)
+      rake (>= 0.9.2, < 11)
+    mutter (0.5.3)
+    polyglot (0.3.3)
+    rake (10.1.1)
+    treetop (1.4.15)
+      polyglot
+      polyglot (>= 0.3.1)
+    zenweb (3.3.1)
+      coderay (~> 1.0)
+      kramdown (~> 1.0)
+      less (~> 1.2)
+      makerakeworkwell (~> 1.0)
+      rake (>= 0.9, < 11)
+
+PLATFORMS
+  ruby
+
+DEPENDENCIES
+  RedCloth
+  colorize
+  liquid
+  zenweb
diff --git a/doc/README.textile b/doc/README.textile
new file mode 100644 (file)
index 0000000..9d58dfe
--- /dev/null
@@ -0,0 +1,69 @@
+h1. Arvados documentation
+
+This is the source code for "doc.arvados.org":http://doc.arvados.org.
+
+Here's how to build the HTML pages locally so you can preview your updates before you commit and push.
+
+Additional information is available on the "'Documentation' page on the Arvados wiki":https://arvados.org/projects/arvados/wiki/Documentation.
+
+h2. Install dependencies
+
+<pre>
+arvados/doc$ bundle install
+</pre>
+
+h2. Generate HTML pages
+
+<pre>
+arvados/doc$ rake
+</pre>
+
+Alternately, to make the documentation browsable on the local filesystem:
+
+<pre>
+arvados/doc$ rake generate baseurl=$PWD/.site
+</pre>
+
+h2. Run linkchecker
+
+If you have "Linkchecker":http://wummel.github.io/linkchecker/ installed on
+your system, you can run it against the documentation:
+
+<pre>
+arvados/doc$ rake linkchecker baseurl=file://$PWD/.site
+</pre>
+
+Please note that this will regenerate your $PWD/.site directory.
+
+h2. Preview HTML pages
+
+<pre>
+arvados/doc$ rake run
+[2014-03-10 09:03:41] INFO  WEBrick 1.3.1
+[2014-03-10 09:03:41] INFO  ruby 2.1.1 (2014-02-24) [x86_64-linux]
+[2014-03-10 09:03:41] INFO  WEBrick::HTTPServer#start: pid=8926 port=8000
+</pre>
+
+Preview the rendered pages at "http://localhost:8000":http://localhost:8000.
+
+h2. Publish HTML pages inside Workbench
+
+(or some other web site)
+
+You can set @baseurl@ (the URL prefix for all internal links), @arvados_api_host@ and @arvados_workbench_host@ without changing @_config.yml@:
+
+<pre>
+arvados/doc$ rake generate baseurl=/doc arvados_api_host=xyzzy.arvadosapi.com
+</pre>
+
+Make the docs appear at {workbench_host}/doc by creating a symbolic link in Workbench's @public@ directory, pointing to the generated HTML tree.
+
+<pre>
+arvados/doc$ ln -sn ../../../doc/.site ../apps/workbench/public/doc
+</pre>
+
+h2. Delete generated files
+
+<pre>
+arvados/doc$ rake realclean
+</pre>
diff --git a/doc/Rakefile b/doc/Rakefile
new file mode 100644 (file)
index 0000000..811ca67
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/env rake
+
+require "rubygems"
+require "colorize"
+
+task :generate => [ :realclean, 'sdk/python/arvados/index.html' ] do
+  vars = ['baseurl', 'arvados_api_host', 'arvados_workbench_host']
+  vars.each do |v|
+    if ENV[v]
+      website.config.h[v] = ENV[v]
+    end
+  end
+end
+
+file "sdk/python/arvados/index.html" do |t|
+  `which epydoc`
+  if $? == 0
+    `epydoc --html --parse-only -o sdk/python/arvados ../sdk/python/arvados/`
+  else
+    puts "Warning: epydoc not found, Python documentation will not be generated".colorize(:light_red)
+  end
+end
+
+task :linkchecker => [ :generate ] do
+  Dir.chdir(".site") do
+    `which linkchecker`
+    if $? == 0
+      system "linkchecker index.html --ignore-url='!file://'" or exit $?.exitstatus
+    else
+      puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
+    end
+  end
+end
+
+task :clean do
+  rm_rf "sdk/python/arvados"
+end
+
+require "zenweb/tasks"
+load "zenweb-textile.rb"
+load "zenweb-liquid.rb"
+
+task :extra_wirings do
+  $website.pages["sdk/python/python.html.textile.liquid"].depends_on("sdk/python/arvados/index.html")
+end
diff --git a/doc/_config.yml b/doc/_config.yml
new file mode 100644 (file)
index 0000000..af5160f
--- /dev/null
@@ -0,0 +1,146 @@
+# baseurl is the location of the generated site from the browser's
+# perspective (e.g., http://doc.arvados.org or
+# file:///tmp/arvados/doc/.site). To make docs show up inside
+# workbench, use /doc here and add a symlink at
+# apps/workbench/public/doc pointing to ../../../doc/.site
+# You can also set these on the command line:
+# $ rake generate baseurl=/example arvados_api_host=example.com
+
+baseurl:
+arvados_api_host: localhost
+arvados_workbench_host: localhost
+
+exclude: ["Rakefile", "tmp", "vendor"]
+
+navbar:
+  userguide:
+    - Getting Started:
+      - user/index.html.textile.liquid
+      - user/getting_started/community.html.textile.liquid
+    - Run a pipeline using Workbench:
+      - user/getting_started/workbench.html.textile.liquid
+      - user/tutorials/tutorial-pipeline-workbench.html.textile.liquid
+    - Access an Arvados virtual machine:
+      - user/getting_started/ssh-access-unix.html.textile.liquid
+      - user/getting_started/ssh-access-windows.html.textile.liquid
+      - user/getting_started/check-environment.html.textile.liquid
+      - user/reference/api-tokens.html.textile.liquid
+    - Working with data sets:
+      - user/tutorials/tutorial-keep.html.textile.liquid
+      - user/tutorials/tutorial-keep-get.html.textile.liquid
+      - user/tutorials/tutorial-keep-mount.html.textile.liquid
+      - user/topics/keep.html.textile.liquid
+    - Run a pipeline on the command line:
+      - user/topics/running-pipeline-command-line.html.textile.liquid
+      - user/topics/arv-run.html.textile.liquid
+    - Develop a new pipeline:
+      - user/tutorials/intro-crunch.html.textile.liquid
+      - user/tutorials/running-external-program.html.textile.liquid
+      - user/tutorials/tutorial-firstscript.html.textile.liquid
+      - user/tutorials/tutorial-submit-job.html.textile.liquid
+      - user/topics/tutorial-parallel.html.textile.liquid
+      - user/topics/arv-docker.html.textile.liquid
+    - Reference:
+      - user/topics/run-command.html.textile.liquid
+      - user/reference/job-pipeline-ref.html.textile.liquid
+      - user/examples/crunch-examples.html.textile.liquid
+    - Query the metadata database:
+      - user/topics/tutorial-trait-search.html.textile.liquid
+    - Arvados License:
+      - user/copying/copying.html.textile.liquid
+      - user/copying/agpl-3.0.html
+      - user/copying/LICENSE-2.0.html
+      - user/copying/by-sa-3.0.html
+  sdk:
+    - Overview:
+      - sdk/index.html.textile.liquid
+    - Python:
+      - sdk/python/sdk-python.html.textile.liquid
+      - sdk/python/python.html.textile.liquid
+      - sdk/python/crunch-utility-libraries.html.textile.liquid
+    - Perl:
+      - sdk/perl/index.html.textile.liquid
+    - Ruby:
+      - sdk/ruby/index.html.textile.liquid
+    - Java:
+      - sdk/java/index.html.textile.liquid
+    - Go:
+      - sdk/go/index.html.textile.liquid
+    - CLI:
+      - sdk/cli/index.html.textile.liquid
+      - sdk/cli/install.html.textile.liquid
+      - sdk/cli/reference.html.textile.liquid
+      - sdk/cli/subcommands.html.textile.liquid
+  api:
+    - Concepts:
+      - api/index.html.textile.liquid
+      - api/authentication.html.textile.liquid
+      - api/methods.html.textile.liquid
+      - api/resources.html.textile.liquid
+      - api/crunch-scripts.html.textile.liquid
+      - api/permission-model.html.textile.liquid
+    - API Methods:
+      - api/methods/api_client_authorizations.html.textile.liquid
+      - api/methods/api_clients.html.textile.liquid
+      - api/methods/authorized_keys.html.textile.liquid
+      - api/methods/collections.html.textile.liquid
+      - api/methods/groups.html.textile.liquid
+      - api/methods/humans.html.textile.liquid
+      - api/methods/jobs.html.textile.liquid
+      - api/methods/job_tasks.html.textile.liquid
+      - api/methods/keep_disks.html.textile.liquid
+      - api/methods/keep_services.html.textile.liquid
+      - api/methods/links.html.textile.liquid
+      - api/methods/logs.html.textile.liquid
+      - api/methods/nodes.html.textile.liquid
+      - api/methods/pipeline_instances.html.textile.liquid
+      - api/methods/pipeline_templates.html.textile.liquid
+      - api/methods/repositories.html.textile.liquid
+      - api/methods/specimens.html.textile.liquid
+      - api/methods/traits.html.textile.liquid
+      - api/methods/users.html.textile.liquid
+      - api/methods/virtual_machines.html.textile.liquid
+    - Schema:
+      - api/schema/ApiClientAuthorization.html.textile.liquid
+      - api/schema/ApiClient.html.textile.liquid
+      - api/schema/AuthorizedKey.html.textile.liquid
+      - api/schema/Collection.html.textile.liquid
+      - api/schema/Group.html.textile.liquid
+      - api/schema/Human.html.textile.liquid
+      - api/schema/Job.html.textile.liquid
+      - api/schema/JobTask.html.textile.liquid
+      - api/schema/KeepDisk.html.textile.liquid
+      - api/schema/KeepService.html.textile.liquid
+      - api/schema/Link.html.textile.liquid
+      - api/schema/Log.html.textile.liquid
+      - api/schema/Node.html.textile.liquid
+      - api/schema/PipelineInstance.html.textile.liquid
+      - api/schema/PipelineTemplate.html.textile.liquid
+      - api/schema/Repository.html.textile.liquid
+      - api/schema/Specimen.html.textile.liquid
+      - api/schema/Trait.html.textile.liquid
+      - api/schema/User.html.textile.liquid
+      - api/schema/VirtualMachine.html.textile.liquid
+  admin:
+    - Admin:
+      - admin/index.html.md.liquid
+      - admin/cheat_sheet.html.textile.liquid
+  installguide:
+    - Overview:
+      - install/index.html.textile.liquid
+    - Docker:
+      - install/install-docker.html.textile.liquid
+    - Manual installation:
+      - install/install-manual-overview.html.textile.liquid
+      - install/install-manual-prerequisites.html.textile.liquid
+      - install/install-api-server.html.textile.liquid
+      - install/install-workbench-app.html.textile.liquid
+      - install/install-shell-server.html.textile.liquid
+      - install/create-standard-objects.html.textile.liquid
+      - install/install-keepstore.html.textile.liquid
+      - install/install-keepproxy.html.textile.liquid
+      - install/install-crunch-dispatch.html.textile.liquid
+      - install/install-compute-node.html.textile.liquid
+    - Software prerequisites:
+      - install/install-manual-prerequisites-ruby.html.textile.liquid
+      - install/install-sso.html.textile.liquid
diff --git a/doc/_includes/_0_filter_py.liquid b/doc/_includes/_0_filter_py.liquid
new file mode 100644 (file)
index 0000000..831e1b8
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Import the Arvados sdk module
+import arvados
+
+# Get information about the task from the environment
+this_task = arvados.current_task()
+
+this_task_input = arvados.current_job()['script_parameters']['input']
+
+# Create the object access to the collection referred to in the input
+collection = arvados.CollectionReader(this_task_input)
+
+# Create an object to write a new collection as output
+out = arvados.CollectionWriter()
+
+# Create a new file in the output collection
+with out.open('0-filter.txt') as out_file:
+    # Iterate over every input file in the input collection
+    for input_file in collection.all_files():
+        # Output every line in the file that starts with '0'
+        out_file.writelines(line for line in input_file if line.startswith('0'))
+
+# Commit the output to Keep.
+output_locator = out.finish()
+
+# Use the resulting locator as the output for this task.
+this_task.set_output(output_locator)
+
+# Done!
diff --git a/doc/_includes/_alert-incomplete.liquid b/doc/_includes/_alert-incomplete.liquid
new file mode 100644 (file)
index 0000000..972ba15
--- /dev/null
@@ -0,0 +1,5 @@
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Hi!</h4>
+  <P>This section is incomplete. Please be patient with us as we fill in the blanks &mdash; or <A href="https://arvados.org/projects/arvados/wiki/Documentation#Contributing">contribute to the documentation project.</A></P>
+</div>
diff --git a/doc/_includes/_alert_stub.liquid b/doc/_includes/_alert_stub.liquid
new file mode 100644 (file)
index 0000000..4cc2498
--- /dev/null
@@ -0,0 +1,5 @@
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Hi!</h4>
+  <p>This section is incomplete. Please be patient with us as we fill in the blanks &mdash; or <A href="https://arvados.org/projects/arvados/wiki/Documentation#Contributing">contribute to the documentation project.</A></p>
+</div>
diff --git a/doc/_includes/_concurrent_hash_script_py.liquid b/doc/_includes/_concurrent_hash_script_py.liquid
new file mode 100644 (file)
index 0000000..691ed56
--- /dev/null
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+import hashlib
+import os
+import arvados
+
+# Jobs consist of one or more tasks.  A task is a single invocation of
+# a crunch script.
+
+# Get the current task
+this_task = arvados.current_task()
+
+# Tasks have a sequence number for ordering.  All tasks
+# with the current sequence number must finish successfully
+# before tasks in the next sequence are started.
+# The first task has sequence number 0
+if this_task['sequence'] == 0:
+    # Get the "input" field from "script_parameters" on the task object
+    job_input = arvados.current_job()['script_parameters']['input']
+
+    # Create a collection reader to read the input
+    cr = arvados.CollectionReader(job_input)
+
+    # Loop over each stream in the collection (a stream is a subset of
+    # files that logically represents a directory)
+    for s in cr.all_streams():
+
+        # Loop over each file in the stream
+        for f in s.all_files():
+
+            # Synthesize a manifest for just this file
+            task_input = f.as_manifest()
+
+            # Set attributes for a new task:
+            # 'job_uuid' the job that this task is part of
+            # 'created_by_job_task_uuid' this task that is creating the new task
+            # 'sequence' the sequence number of the new task
+            # 'parameters' the parameters to be passed to the new task
+            new_task_attrs = {
+                'job_uuid': arvados.current_job()['uuid'],
+                'created_by_job_task_uuid': arvados.current_task()['uuid'],
+                'sequence': 1,
+                'parameters': {
+                    'input':task_input
+                    }
+                }
+
+            # Ask the Arvados API server to create a new task, running the same
+            # script as the parent task specified in 'created_by_job_task_uuid'
+            arvados.api().job_tasks().create(body=new_task_attrs).execute()
+
+    # Now tell the Arvados API server that this task executed successfully,
+    # even though it doesn't have any output.
+    this_task.set_output(None)
+else:
+    # The task sequence was not 0, so it must be a parallel worker task
+    # created by the first task
+
+    # Instead of getting "input" from the "script_parameters" field of
+    # the job object, we get it from the "parameters" field of the
+    # task object
+    this_task_input = this_task['parameters']['input']
+
+    collection = arvados.CollectionReader(this_task_input)
+
+    # There should only be one file in the collection, so get the
+    # first one from the all files iterator.
+    input_file = next(collection.all_files())
+    output_path = os.path.normpath(os.path.join(input_file.stream_name(),
+                                                input_file.name))
+
+    # Everything after this is the same as the first tutorial.
+    digestor = hashlib.new('md5')
+    for buf in input_file.readall():
+        digestor.update(buf)
+
+    out = arvados.CollectionWriter()
+    with out.open('md5sum.txt') as out_file:
+        out_file.write("{} {}\n".format(digestor.hexdigest(), output_path))
+
+    this_task.set_output(out.finish())
+
+# Done!
diff --git a/doc/_includes/_example_docker.liquid b/doc/_includes/_example_docker.liquid
new file mode 100644 (file)
index 0000000..9486ad7
--- /dev/null
@@ -0,0 +1,28 @@
+{
+    "name": "Example using R in a custom Docker image",
+    "components": {
+        "Rscript": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "Rscript",
+                    "$(glob $(file $(myscript))/*.r)",
+                    "$(glob $(dir $(mydata))/*.csv)"
+                ],
+                "myscript": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "mydata": {
+                    "required": true,
+                    "dataclass": "Collection"
+                }
+            },
+            "runtime_constraints": {
+                "docker_image": "arvados/jobs-with-r"
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_example_sdk_go.liquid b/doc/_includes/_example_sdk_go.liquid
new file mode 100644 (file)
index 0000000..08124e6
--- /dev/null
@@ -0,0 +1,109 @@
+package main
+
+
+// *******************
+// Import the modules.
+//
+// Our examples don't use keepclient, but they do use fmt and log to
+// display output.
+
+import (
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "log"
+)
+
+func main() {
+
+
+       // ********************************
+       // Set up an API client user agent.
+       //
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatalf("Error setting up arvados client %s", err.Error())
+       }
+
+
+       // *****************************************
+       // Print the full name of the current user.
+       //
+
+       type user struct {
+               Uuid     string `json:"uuid"`
+               FullName string `json:"full_name"`
+       }
+
+       var u user
+       err = arv.Call("GET", "users", "", "current", nil, &u)
+
+       if err != nil {
+               log.Fatalf("error querying current user", err.Error())
+       }
+
+       log.Printf("Logged in as %s (uuid %s)", u.FullName, u.Uuid)
+
+
+       // ********************************************************
+       // Print all fields from the first five collections returned.
+       //
+       // Note that some fields, are not returned by default and have to be
+       // requested. See below for an example.
+
+       var results map[string]interface{}
+
+       params := arvadosclient.Dict{"limit": 5}
+
+       err = arv.List("collections", params, &results)
+       if err != nil {
+               log.Fatalf("error querying collections", err.Error())
+       }
+
+       printArvadosResults(results)
+
+
+       // *********************************************************
+       // Print some fields from the first two collections returned.
+       //
+       // We also print manifest_test, which has to be explicitly requested.
+       //
+
+       collection_fields_wanted := []string{"manifest_text", "owner_uuid", "uuid"}
+       params = arvadosclient.Dict{"limit": 2, "select": collection_fields_wanted}
+
+       err = arv.List("collections", params, &results)
+       if err != nil {
+               log.Fatalf("error querying collections", err.Error())
+       }
+
+       printArvadosResults(results)
+}
+
+
+// A helper method which will print out a result map returned by
+// arvadosclient.
+func printArvadosResults(results map[string]interface{}) {
+       for key, value := range results {
+               // "items", if it exists, holds a map.
+               // So we print it prettily below.
+               if key != "items" {
+                       fmt.Println(key, ":", value)
+               }
+       }
+
+       if value, ok := results["items"]; ok {
+               items := value.([]interface{})
+               for index, item := range items {
+                       fmt.Println("===========  ", index, "  ===========")
+                       item_map := item.(map[string]interface{})
+                       if len(item_map) == 0 {
+                               fmt.Println("item", index, ": empty map")
+                       } else {
+                               for k, v := range item_map {
+                                       fmt.Println(index, k, ":", v)
+                               }
+                       }
+               }
+       }
+}
diff --git a/doc/_includes/_example_sdk_go_imports.liquid b/doc/_includes/_example_sdk_go_imports.liquid
new file mode 100644 (file)
index 0000000..fe2cfca
--- /dev/null
@@ -0,0 +1,4 @@
+import (
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+)
diff --git a/doc/_includes/_navbar_left.liquid b/doc/_includes/_navbar_left.liquid
new file mode 100644 (file)
index 0000000..c97023f
--- /dev/null
@@ -0,0 +1,18 @@
+<div class="col-sm-3">
+  <div class="well">
+    <ol class="nav nav-list">
+      {% for section in site.navbar[page.navsection] %}
+      {% for entry in section %}
+      <li><span class="nav-header">{{ entry[0] }}</span>
+       <ol class="nav nav-list">
+          {% for item in entry[1] %}        
+          {% assign p = site.pages[item] %}
+          <li {% if p.url == page.url %} class="active activesubnav" {% elsif p.title == page.subnavsection %} class="activesubnav" {% endif %}>
+            <a href="{{ site.baseurl }}{{ p.url }}">{{ p.title }}</a></li>
+          {% endfor %}
+        </ol>
+        {% endfor %}
+        {% endfor %}
+    </ol>
+  </div>
+</div>
diff --git a/doc/_includes/_navbar_top.liquid b/doc/_includes/_navbar_top.liquid
new file mode 100644 (file)
index 0000000..9123893
--- /dev/null
@@ -0,0 +1,23 @@
+<div class="navbar navbar-default navbar-fixed-top">
+  <div class="container-fluid">
+    <div class="navbar-header">
+      <button type="button" class="navbar-toggle" data-toggle="collapse" data-target="#bs-navbar-collapse">
+        <span class="sr-only">Toggle navigation</span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+        <span class="icon-bar"></span>
+      </button>
+      <a class="navbar-brand" href="{{ site.baseurl }}/">Arvados Docs</a>
+    </div>
+    <div class="collapse navbar-collapse" id="bs-navbar-collapse">
+      <ul class="nav navbar-nav">
+        <li {% if page.navsection == 'userguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/user/index.html">User&nbsp;Guide</a></li>
+        <li {% if page.navsection == 'sdk' %} class="active" {% endif %}><a href="{{ site.baseurl }}/sdk/index.html">SDK&nbsp;Reference</a></li>
+        <li {% if page.navsection == 'api' %} class="active" {% endif %}><a href="{{ site.baseurl }}/api/index.html">API&nbsp;Reference</a></li>
+        <li {% if page.navsection == 'adminguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/admin/index.html">Admin Guide</a></li>
+        <li {% if page.navsection == 'installguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/install/index.html">Install Guide</a></li>
+        <li><a href="https://arvados.org/" style="padding-left: 2em">arvados.org&nbsp;&raquo;</a></li>
+      </ul>
+    </div>
+  </div>
+</div>
diff --git a/doc/_includes/_notebox_begin.liquid b/doc/_includes/_notebox_begin.liquid
new file mode 100644 (file)
index 0000000..eb00110
--- /dev/null
@@ -0,0 +1,3 @@
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Note:</h4>
diff --git a/doc/_includes/_notebox_end.liquid b/doc/_includes/_notebox_end.liquid
new file mode 100644 (file)
index 0000000..04f5b84
--- /dev/null
@@ -0,0 +1 @@
+</div>
diff --git a/doc/_includes/_run_command_foreach_example.liquid b/doc/_includes/_run_command_foreach_example.liquid
new file mode 100644 (file)
index 0000000..3fb754f
--- /dev/null
@@ -0,0 +1,40 @@
+{
+    "name":"run-command example pipeline",
+    "components":{
+        "bwa-mem": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "bwa",
+                    "mem",
+                    "-t",
+                    "$(node.cores)",
+                    "$(glob $(dir $(reference_collection))/*.fasta)",
+                    {
+                        "foreach": "read_pair",
+                        "command": "$(read_pair)"
+                    }
+                ],
+                "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam",
+                "task.foreach": ["sample_subdir", "read_pair"],
+                "reference_collection": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "sample": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "sample_subdir": "$(dir $(samples))",
+                "read_pair": {
+                    "value": {
+                        "group": "sample_subdir",
+                        "regex": "(.*)_[12]\\.fastq(\\.gz)?$"
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_run_command_simple_example.liquid b/doc/_includes/_run_command_simple_example.liquid
new file mode 100644 (file)
index 0000000..abd0071
--- /dev/null
@@ -0,0 +1,30 @@
+{
+    "name":"run-command example pipeline",
+    "components":{
+        "bwa-mem": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "bwa",
+                    "mem",
+                    "-t",
+                    "$(node.cores)",
+                    "$(glob $(dir $(reference_collection))/*.fasta)",
+                    "$(glob $(dir $(sample))/*_1.fastq)",
+                    "$(glob $(dir $(sample))/*_2.fastq)"
+                ],
+                "task.stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam",
+                "reference_collection": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "sample": {
+                    "required": true,
+                    "dataclass": "Collection"
+                }
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_run_md5sum_py.liquid b/doc/_includes/_run_md5sum_py.liquid
new file mode 100644 (file)
index 0000000..46152f1
--- /dev/null
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+import arvados
+
+# Automatically parallelize this job by running one task per file.
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+                                          input_as_path=True)
+
+# Get the input file for the task
+input_file = arvados.get_task_param_mount('input')
+
+# Run the external 'md5sum' program on the input file
+stdoutdata, stderrdata = arvados.util.run_command(['md5sum', input_file])
+
+# Save the standard output (stdoutdata) to "md5sum.txt" in the output collection
+out = arvados.CollectionWriter()
+with out.open('md5sum.txt') as out_file:
+    out_file.write(stdoutdata)
+arvados.current_task().set_output(out.finish())
diff --git a/doc/_includes/_skip_sso_server_install.liquid b/doc/_includes/_skip_sso_server_install.liquid
new file mode 100644 (file)
index 0000000..a5c1511
--- /dev/null
@@ -0,0 +1,6 @@
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Note!</h4>
+  <p>The SSO server codebase currently uses OpenID 2.0 to talk to Google's authentication service. Google <a href="https://developers.google.com/accounts/docs/OpenID2">has deprecated that protocol</a>. This means that new clients will not be allowed to talk to Google's authentication services anymore over OpenID 2.0, and they will phase out the use of OpenID 2.0 completely in the coming monts. We are working on upgrading the SSO server codebase to a newer protocol. That work should be complete by the end of November 2014. In the mean time, anyone is free to use the existing Curoverse SSO server for any local Arvados installation. Instructions to do so are provided on the "API server":install-api-server.html page.</p>
+  <p><strong>Recommendation: skip this step</strong></p>
+</div>
diff --git a/doc/_includes/_ssh_addkey.liquid b/doc/_includes/_ssh_addkey.liquid
new file mode 100644 (file)
index 0000000..3770635
--- /dev/null
@@ -0,0 +1,26 @@
+
+You may now proceed to "adding your key to the Arvados Workbench.":#workbench
+
+h1(#workbench). Adding your key to Arvados Workbench
+
+h3. From the Workbench dashboard
+
+If you have no SSH keys registered, there should be a notification asking you to provide your SSH public key.  In the Workbench top navigation menu, look for a dropdown menu with your email address in upper right corner. It will have an icon such as <span class="badge badge-alert">1</span> (the number indicates there are new notifications).  Click on this icon and a dropdown menu should appear with a message asking you to add your public key.  Paste your public key into the text area provided and click on the check button to submit the key.  You are now ready to "log into an Arvados VM":#login.
+
+h3. Alternate way to add SSH keys
+
+Click on the link with your _email address_ in the upper right corner to access the user settings menu, and click on the menu item *Manage account* to go to the account management page.
+
+On the *Manage account* page, click on the button <span class="btn btn-primary">*+* Add new SSH key</span> button in the upper right corner of the page in the SSH Keys panel.
+
+This will open a popup as shown in this screenshot:
+
+!{{ site.baseurl }}/images/ssh-adding-public-key.png!
+
+Paste the public key that you copied to the cliboard in the previous section into the popup text box labeled *Public Key* and click on the <span class="btn btn-primary">Submit</span> button to save it. This should refresh the Manage account page with the fingerprint of the public key that you just added in the SSH Keys panel.  You are now ready to "log into an Arvados VM":#login.
+
+h1(#login). Using SSH to log into an Arvados VM
+
+To see a list of virtual machines that you have access to and determine the name and login information, click on the link with your _email address_ in the upper right corner and click on the menu item *Manage account* to go to the account management page. On this page, you will see a *Virtual Machines* panel, which lists the virtual machines you can access. The *hostname* column lists the name of each available VM.  The *logins* column will have a list of comma separated values of the form @you@. In this guide the hostname will be *_shell_* and the login will be *_you_*.  Replace these with your hostname and login name as appropriate.
+
+
diff --git a/doc/_includes/_ssh_intro.liquid b/doc/_includes/_ssh_intro.liquid
new file mode 100644 (file)
index 0000000..64ceb0e
--- /dev/null
@@ -0,0 +1,7 @@
+
+Arvados requires a public SSH key in order to securely log in to an Arvados VM instance, or to access an Arvados Git repository. The three sections below help you get started:
+
+# "Getting your SSH key":#gettingkey
+# "Adding your key to Arvados Workbench":#workbench
+# "Using SSH to log into an Arvados VM instance":#login
+
diff --git a/doc/_includes/_tutorial_bwa_sortsam_pipeline.liquid b/doc/_includes/_tutorial_bwa_sortsam_pipeline.liquid
new file mode 100644 (file)
index 0000000..c2be9e2
--- /dev/null
@@ -0,0 +1,65 @@
+{
+    "name": "Tutorial align using bwa mem and SortSam",
+    "components": {
+        "bwa-mem": {
+            "script": "run-command",
+            "script_version": "master",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "bwa",
+                    "mem",
+                    "-t",
+                    "$(node.cores)",
+                    "-R",
+                    "@RG\\tID:group_id\\tPL:illumina\\tSM:sample_id",
+                    "$(glob $(dir $(reference_collection))/*.fasta)",
+                    "$(glob $(dir $(sample))/*_1.fastq)",
+                    "$(glob $(dir $(sample))/*_2.fastq)"
+                ],
+                "reference_collection": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "sample": {
+                    "required": true,
+                    "dataclass": "Collection"
+                },
+                "stdout": "$(basename $(glob $(dir $(sample))/*_1.fastq)).sam"
+            },
+            "runtime_constraints": {
+                "docker_image": "arvados/jobs-java-bwa-samtools"
+            }
+        },
+        "SortSam": {
+            "script": "run-command",
+            "script_version": "847459b3c257aba65df3e0cbf6777f7148542af2",
+            "repository": "arvados",
+            "script_parameters": {
+                "command": [
+                    "java",
+                    "-Xmx4g",
+                    "-Djava.io.tmpdir=$(tmpdir)",
+                    "-jar",
+                    "$(dir $(picard))/SortSam.jar",
+                    "CREATE_INDEX=True",
+                    "SORT_ORDER=coordinate",
+                    "VALIDATION_STRINGENCY=LENIENT",
+                    "INPUT=$(glob $(dir $(input))/*.sam)",
+                    "OUTPUT=$(basename $(glob $(dir $(input))/*.sam)).sort.bam"
+                ],
+                "input": {
+                    "output_of": "bwa-mem"
+                },
+                "picard": {
+                    "required": true,
+                    "dataclass": "Collection",
+                    "default": "88447c464574ad7f79e551070043f9a9+1970"
+                }
+            },
+            "runtime_constraints": {
+                "docker_image": "arvados/jobs-java-bwa-samtools"
+            }
+        }
+    }
+}
diff --git a/doc/_includes/_tutorial_expectations.liquid b/doc/_includes/_tutorial_expectations.liquid
new file mode 100644 (file)
index 0000000..a371d24
--- /dev/null
@@ -0,0 +1,3 @@
+{% include 'notebox_begin' %}
+This tutorial assumes either that you are logged into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or you have installed the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html
+{% include 'notebox_end' %}
diff --git a/doc/_includes/_tutorial_hash_script_py.liquid b/doc/_includes/_tutorial_hash_script_py.liquid
new file mode 100644 (file)
index 0000000..ede2809
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+import hashlib      # Import the hashlib module to compute MD5.
+import os           # Import the os module for basic path manipulation
+import arvados      # Import the Arvados sdk module
+
+# Automatically parallelize this job by running one task per file.
+# This means that if the input consists of many files, each file will
+# be processed in parallel on different nodes enabling the job to
+# be completed quicker.
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True,
+                                          input_as_path=True)
+
+# Get object representing the current task
+this_task = arvados.current_task()
+
+# Create the message digest object that will compute the MD5 hash
+digestor = hashlib.new('md5')
+
+# Get the input file for the task
+input_id, input_path = this_task['parameters']['input'].split('/', 1)
+
+# Open the input collection
+input_collection = arvados.CollectionReader(input_id)
+
+# Open the input file for reading
+with input_collection.open(input_path) as input_file:
+    for buf in input_file.readall():  # Iterate the file's data blocks
+        digestor.update(buf)          # Update the MD5 hash object
+
+# Write a new collection as output
+out = arvados.CollectionWriter()
+
+# Write an output file with one line: the MD5 value and input path
+with out.open('md5sum.txt') as out_file:
+    out_file.write("{} {}/{}\n".format(digestor.hexdigest(), input_id,
+                                       os.path.normpath(input_path)))
+
+# Commit the output to Keep.
+output_locator = out.finish()
+
+# Use the resulting locator as the output for this task.
+this_task.set_output(output_locator)
+
+# Done!
diff --git a/doc/_includes/_tutorial_submit_job.liquid b/doc/_includes/_tutorial_submit_job.liquid
new file mode 100644 (file)
index 0000000..57063b3
--- /dev/null
@@ -0,0 +1,19 @@
+{
+  "name":"My md5 pipeline",
+  "components":{
+    "do_hash":{
+      "repository":"$USER",
+      "script":"hash.py",
+      "script_version":"master",
+      "runtime_constraints":{
+        "docker_image":"arvados/jobs-java-bwa-samtools"
+      },
+      "script_parameters":{
+        "input":{
+          "required": true,
+          "dataclass": "Collection"
+        }
+      }
+    }
+  }
+}
diff --git a/doc/_includes/_webring.liquid b/doc/_includes/_webring.liquid
new file mode 100644 (file)
index 0000000..7cb9468
--- /dev/null
@@ -0,0 +1,29 @@
+{% assign n = 0 %}
+{% assign prev = "" %}
+{% assign nx = 0 %}
+{% for section in site.navbar[page.navsection] %}
+  {% for entry in section %}
+    {% for item in entry[1] %}        
+      {% assign p = site.pages[item] %}
+      {% if nx == 1 %}
+        <hr>
+        {% if prev != "" %}
+          <a href="{{ site.baseurl }}{{ prev.url }}" class="pull-left">Previous: {{ prev.title }}</a>
+        {% endif %}
+        <a href="{{ site.baseurl }}{{ p.url }}" class="pull-right">Next: {{ p.title }}</a>
+        {% assign nx = 0 %}
+        {% assign n = 1 %}
+      {% endif %}
+      {% if p.url == page.url %}
+        {% assign nx = 1 %}
+      {% else %}
+        {% assign prev = p %}
+      {% endif %}
+    {% endfor %}
+  {% endfor %}
+{% endfor %}
+{% if n == 0 && prev != "" %}
+  <hr>
+  <a href="{{ site.baseurl }}{{ prev.url }}" class="pull-left">Previous: {{ prev.title }}</a>
+  {% assign n = 1 %}
+{% endif %}
\ No newline at end of file
diff --git a/doc/_layouts/default.html.liquid b/doc/_layouts/default.html.liquid
new file mode 100644 (file)
index 0000000..88da01b
--- /dev/null
@@ -0,0 +1,111 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="utf-8">
+    <title>{% unless page.title == "Arvados | Documentation" %} Arvados | Documentation | {% endunless %}{{ page.title }}</title>
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <meta name="description" content="">
+    <meta name="author" content="">
+    <link rel="icon" href="{{ site.baseurl }}/images/favicon.ico" type="image/x-icon">
+    <link rel="shortcut icon" href="{{ site.baseurl }}/images/favicon.ico" type="image/x-icon">
+    <link href="{{ site.baseurl }}/css/bootstrap.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/nav-list.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/badges.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/code.css" rel="stylesheet">
+    <link href="{{ site.baseurl }}/css/font-awesome.css" rel="stylesheet">
+    <style>
+      html {
+      height:100%;
+      }
+      body {
+      padding-top: 61px;
+      height: 90%; /* If calc() is not supported */
+      height: calc(100% - 46px); /* Sets the body full height minus the padding for the menu bar */
+      }
+      @media (max-width: 979px) {
+      div.frontpagehero {
+      margin-left: -20px;
+      margin-right: -20px;
+      padding-left: 20px;
+      }
+      }
+      .sidebar-nav {
+        padding: 9px 0;
+      }
+      .section-block {
+      background: #eeeeee;
+      padding: 1em;
+      -webkit-border-radius: 12px;
+      -moz-border-radius: 12px;
+      border-radius: 12px;
+      margin: 0 2em;
+      }
+      .row-fluid :first-child .section-block {
+      margin-left: 0;
+      }
+      .row-fluid :last-child .section-block {
+      margin-right: 0;
+      }
+      .rarr {
+      font-size: 1.5em;
+      }
+      .darr {
+      font-size: 4em;
+      text-align: center;
+      margin-bottom: 1em;
+      }
+      :target {
+      padding-top: 61px;
+      margin-top: -61px;
+      }
+    </style>
+
+    <!-- HTML5 shim, for IE6-8 support of HTML5 elements -->
+    <!--[if lt IE 9]>
+        <script src="../assets/js/html5shiv.js"></script>
+        <![endif]-->
+  </head>
+  <body class="nopad">
+    {% include 'navbar_top' %}
+
+    {% if page.navsection == 'top' or page.no_nav_left %}
+    {{ content }}
+    {% else %}
+
+    <div class="container-fluid">
+      <div class="row">
+        {% include 'navbar_left' %}
+        <div class="col-sm-9">
+          <h1>{{ page.title }}</h1>
+          {{ content }}
+          {% include 'webring' %}
+        </div>
+      </div>
+
+      <div style="height: 2em"></div>
+
+    </div>
+    {% endif %}
+    <script src="{{ site.baseurl }}/js/jquery.min.js"></script>
+    <script src="{{ site.baseurl }}/js/bootstrap.min.js"></script>
+    <script>
+  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
+  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
+  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
+  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');
+
+  ga('create', 'UA-40055979-1', 'arvados.org');
+  ga('send', 'pageview');
+
+    </script>
+
+<p style="text-align: center"><small>
+The content of this documentation is licensed under the
+<a href="{{ site.baseurl }}/user/copying/by-sa-3.0.html">Creative
+  Commons Attribution-Share Alike 3.0 United States</a> licence.<br>
+Code samples in this documentation are licensed under the
+<a href="{{ site.baseurl }}/user/copying/LICENSE-2.0.html">Apache License, Version 2.0.</a></small>
+</p>
+
+  </body>
+</html>
diff --git a/doc/admin/cheat_sheet.html.textile.liquid b/doc/admin/cheat_sheet.html.textile.liquid
new file mode 100644 (file)
index 0000000..83fa5e8
--- /dev/null
@@ -0,0 +1,68 @@
+---
+layout: default
+navsection: admin
+title: Cheat Sheet
+...
+
+
+
+h3. CLI setup
+
+<pre>
+ARVADOS_API_HOST={{ site.arvados_api_host }}
+ARVADOS_API_TOKEN=1234567890qwertyuiopasdfghjklzxcvbnm1234567890zzzz
+</pre>
+
+h3. Create VM
+
+<pre>
+arv virtual_machine create --virtual-machine '{"hostname":"xxxxxxxchangeme.example.com"}'
+</pre>
+
+h3. Activate user
+
+<pre>
+user_uuid=xxxxxxxchangeme
+
+arv user update --uuid "$user_uuid" --user '{"is_active":true}'
+</pre>
+
+h3. User &rarr; VM
+
+Give @$user_uuid@ permission to log in to @$vm_uuid@ as @$target_username@
+
+<pre>
+user_uuid=xxxxxxxchangeme
+vm_uuid=xxxxxxxchangeme
+target_username=xxxxxxxchangeme
+
+read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
+{
+"tail_uuid":"$user_uuid",
+"head_uuid":"$vm_uuid",
+"link_class":"permission",
+"name":"can_login",
+"properties":{"username":"$target_username"}
+}
+EOF
+</pre>
+
+h3. User &rarr; repo
+
+Give @$user_uuid@ permission to commit to @$repo_uuid@ as @$repo_username@
+
+<pre>
+user_uuid=xxxxxxxchangeme
+repo_uuid=xxxxxxxchangeme
+repo_username=xxxxxxxchangeme
+
+read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
+{
+"tail_uuid":"$user_uuid",
+"head_uuid":"$repo_uuid",
+"link_class":"permission",
+"name":"can_write",
+"properties":{"username":"$repo_username"}
+}
+EOF
+</pre>
diff --git a/doc/admin/index.html.md.liquid b/doc/admin/index.html.md.liquid
new file mode 100644 (file)
index 0000000..f0ed3ad
--- /dev/null
@@ -0,0 +1,14 @@
+---
+layout: default
+navsection: admin
+title: Overview
+...
+
+{% include 'alert_stub' %}
+
+# Administration Overview
+
+Unlike other users, administrators
+
+* can see and modify all user-created objects regardless of permission settings
+* can directly create and modify system objects like Nodes and ApiClients
diff --git a/doc/api/authentication.html.textile.liquid b/doc/api/authentication.html.textile.liquid
new file mode 100644 (file)
index 0000000..cbf7553
--- /dev/null
@@ -0,0 +1,40 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: Authentication
+
+...
+
+
+
+Every API request (except the authentication API itself) includes an @access_token@ parameter.
+
+table(table table-bordered table-condensed).
+|Name|Type|Description|
+|access_token|string|Access token returned by OAuth 2.0 authorization procedure|
+
+Many resources contain "actor" attributes like @modified_by@.  An @access_token@ uniquely identifies a client (application or project) and an end-user.
+
+table(table table-bordered table-condensed).
+|Name|Type|Description|
+|modified_by_client_uuid|string|ID of API client|
+|modified_by_user_uuid|string|ID of authenticated user|
+
+h2. Authorizing a client application
+
+The Arvados API uses the "OAuth 2.0 protocol":http://tools.ietf.org/html/draft-ietf-oauth-v2-22 for authentication and authorization.
+
+h3. Register your client application
+
+Before an application can run on an Arvados cloud, it needs to be registered with the cloud. 
+
+That registration yields a @client_id@ and a @client_secret@. 
+
+h3. Obtain an access code
+
+A client obtains an access code by means of a standard Oauth 2.0 flow. The access code is granted to it by an authorized user. The client requests one or more scopes, which translate to a set of requested permissions (reading, writing, etc). Unless the access is to be short-lived, a refresh token is also granted to the application. 
+
+h3. Refresh the access code (optional)
+
+Access codes have a limited lifetime. A refresh token allows an application to request a new access token.
diff --git a/doc/api/crunch-scripts.html.textile.liquid b/doc/api/crunch-scripts.html.textile.liquid
new file mode 100644 (file)
index 0000000..98634cd
--- /dev/null
@@ -0,0 +1,46 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: Crunch scripts
+
+...
+
+h2. Crunch scripts
+
+A crunch script is responsible for completing a single JobTask. In doing so, it will:
+
+* (optionally) read some input from Keep
+* (optionally) store some output in Keep
+* (optionally) create some new JobTasks and add them to the current Job
+* (optionally) update the current JobTask record with the "output" attribute set to a Keep locator or a fragment of a manifest
+* update the current JobTask record with the "success" attribute set to True
+
+A task's context is provided in environment variables.
+
+table(table table-bordered table-condensed).
+|Environment variable|Description|
+|@JOB_UUID@|UUID of the current "Job":schema/Job.html|
+|@TASK_UUID@|UUID of the current "JobTask":schema/JobTask.html|
+|@ARVADOS_API_HOST@|Hostname and port number of API server|
+|@ARVADOS_API_TOKEN@|Authentication token to use with API calls made by the current task|
+
+The crunch script typically uses the Python SDK (or another suitable client library / SDK) to connect to the Arvados service and retrieve the rest of the details about the current job and task.
+
+The Python SDK has some shortcuts for common operations.
+
+In general, a crunch script can access information about the current job and task like this:
+
+<pre>
+import arvados
+import os
+
+job = arvados.api().jobs().get(uuid=os.environ['JOB_UUID']).execute()
+$sys.stderr.write("script_parameters['foo'] == %s"
+                  % job['script_parameters']['foo'])
+
+task = arvados.api().job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
+$sys.stderr.write("current task sequence number is %d"
+                  % task['sequence'])
+</pre>
+
diff --git a/doc/api/index.html.textile.liquid b/doc/api/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..81b2c1c
--- /dev/null
@@ -0,0 +1,50 @@
+---
+layout: default
+navsection: api
+title: API Reference
+
+...
+
+
+
+h2. Concepts
+
+* Each API uses the same "authentication mechanism":authentication.html.
+* Resources in requests and responses adhere to a "common structure":resources.html.
+* API transactions use common "REST methods":methods.html.
+* API transactions are subject to a "permission model":permission-model.html.
+* "Job tasks":schema/JobTask.html use some special API features.
+
+h2. Resources
+
+h3. Generic Resources
+
+* "Collection":schema/Collection.html
+* "Job":schema/Job.html
+* "JobTask":schema/JobTask.html
+* "Link":schema/Link.html
+* "Log":schema/Log.html
+* "PipelineTemplate":schema/PipelineTemplate.html
+* "PipelineInstance":schema/PipelineInstance.html
+* "Group":schema/Group.html
+* "Human":schema/Human.html
+* "Specimen":schema/Specimen.html
+* "Trait":schema/Trait.html
+* "User":schema/User.html
+
+h3. Authentication
+
+These Arvados resources govern authorization and "authentication":authentication.html:
+
+* "ApiClient":schema/ApiClient.html
+* "ApiClientAuthorization":schema/ApiClientAuthorization.html
+* "AuthorizedKey":schema/AuthorizedKey.html
+
+h3. Arvados Infrastructure
+
+These resources govern the Arvados infrastructure itself: Git repositories, Keep disks, active nodes, etc.
+
+* "KeepDisk":schema/KeepDisk.html
+* "Node":schema/Node.html
+* "Repository":schema/Repository.html
+* "VirtualMachine":schema/VirtualMachine.html
diff --git a/doc/api/methods.html.textile.liquid b/doc/api/methods.html.textile.liquid
new file mode 100644 (file)
index 0000000..2d530d1
--- /dev/null
@@ -0,0 +1,102 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: REST methods
+
+...
+
+
+
+(using Group as an example)
+
+h2(#index). Index, list, search
+
+<pre>
+GET https://{{ site.arvados_api_host }}/arvados/v1/groups?filters=[["owner_uuid","=","xyzzy-tpzed-a4lcehql0dv2u25"]]
+
+POST https://{{ site.arvados_api_host }}/arvados/v1/groups
+_method=GET
+filters=[["owner_uuid","=","xyzzy-tpzed-a4lcehql0dv2u25"]]
+</pre>
+
+&rarr; Group resource list
+
+table(table table-bordered table-condensed).
+|*Parameter name*|*Value*|*Description*|
+|limit   |integer|Maximum number of resources to return.|
+|offset  |integer|Skip the first 'offset' resources that match the given filter conditions.|
+|filters |array  |Conditions for selecting resources to return (see below).|
+|order   |array  |Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order.
+Example: @["head_uuid asc","modified_at desc"]@
+Default: @["created_at desc"]@|
+|select  |array  |Set of attributes to include in the response.
+Example: @["head_uuid","tail_uuid"]@
+Default: all available attributes, minus "manifest_text" in the case of collections.|
+|distinct|boolean|@true@: (default) do not return duplicate objects
+@false@: permitted to return duplicates|
+
+h3. Filters
+
+The value of the @filters@ parameter is an array of conditions. The @list@ method returns only the resources that satisfy all of the given conditions. In other words, the conjunction @AND@ is implicit.
+
+Each condition is expressed as an array with three elements: @[attribute, operator, operand]@.
+
+table(table table-bordered table-condensed).
+|_. Index|_. Element|_. Type|_. Description|_. Examples|
+|0|attribute|string|Name of the attribute to compare (or "any" to return resources with any matching attribute)|@script_version@, @head_uuid@, @any@|
+|1|operator|string|Comparison operator|@>@, @>=@, @like@, @not in@|
+|2|operand|string, array, or null|Value to compare with the resource attribute|@"d00220fb%"@, @"1234"@, @["foo","bar"]@, @nil@|
+
+The following operators are available.
+
+table(table table-bordered table-condensed).
+|_. Operator|_. Operand type|_. Example|
+|@<@, @<=@, @>=@, @>@, @like@, @ilike@|string|@["script_version","like","d00220fb%"]@|
+|@=@, @!=@|string or null|@["tail_uuid","=","xyzzy-j7d0g-fffffffffffffff"]@
+@["tail_uuid","!=",null]@|
+|@in@, @not in@|array of strings|@["script_version","in",["master","d00220fb38d4b85ca8fc28a8151702a2b9d1dec5"]]@|
+|@is_a@|string|@["head_uuid","is_a","arvados#pipelineInstance"]@|
+
+h2. Create
+
+<pre>
+POST https://{{ site.arvados_api_host }}/arvados/v1/groups
+group={"name":"fresh new group"}
+</pre>
+
+&rarr; Group resource
+
+h2. Delete
+
+<pre>
+DELETE https://{{ site.arvados_api_host }}/arvados/v1/groups/xyzzy-ldvyl-vyydjeplwaa6emg
+</pre>
+
+&rarr; Group resource
+
+h2. Update
+
+<pre>
+PUT https://{{ site.arvados_api_host }}/arvados/v1/groups/xyzzy-ldvyl-vyydjeplwaa6emg
+group={"uuid":"xyzzy-ldvyl-vyydjeplwaa6emg", "name":"Important group"}
+</pre>
+
+&rarr; Group resource
+
+<pre>
+PUT https://{{ site.arvados_api_host }}/arvados/v1/groups/xyzzy-ldvyl-vyydjeplwaa6emg
+group[uuid]=xyzzy-ldvyl-vyydjeplwaa6emg
+group[name]=Important group
+</pre>
+
+&rarr; Group resource
+
+More appropriate (but not yet implemented):
+
+<pre>
+PATCH https://{{ site.arvados_api_host }}/arvados/v1/groups/xyzzy-ldvyl-vyydjeplwaa6emg
+group={"uuid":"xyzzy-ldvyl-vyydjeplwaa6emg", "name":"Important group"}
+</pre>
+
+&rarr; Group resource
diff --git a/doc/api/methods/api_client_authorizations.html.textile.liquid b/doc/api/methods/api_client_authorizations.html.textile.liquid
new file mode 100644 (file)
index 0000000..7af9711
--- /dev/null
@@ -0,0 +1,78 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "api_client_authorizations"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_client_authorizations@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new ApiClientAuthorization.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|api_client_authorization|object||query||
+
+h2. create_system_auth
+
+create_system_auth api_client_authorizations
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|api_client_id|integer||query||
+|scopes|array||query||
+
+h2. delete
+
+Delete an existing ApiClientAuthorization.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
+
+h2. get
+
+Gets a ApiClientAuthorization's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
+
+h2. list
+
+List api_client_authorizations.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of api_client_authorizations to return.|query||
+|order|string|Order in which to return matching api_client_authorizations.|query||
+|filters|array|Conditions for filtering api_client_authorizations.|query||
+
+h2. update
+
+Update attributes of an existing ApiClientAuthorization.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||
+|api_client_authorization|object||query||
diff --git a/doc/api/methods/api_clients.html.textile.liquid b/doc/api/methods/api_clients.html.textile.liquid
new file mode 100644 (file)
index 0000000..056cc30
--- /dev/null
@@ -0,0 +1,66 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "api_clients"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_clients@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. create
+
+Create a new ApiClient.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|api_client|object||query||
+
+h2. delete
+
+Delete an existing ApiClient.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
+
+h2. get
+
+Gets a ApiClient's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
+
+h2. list
+
+List api_clients.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of api_clients to return.|query||
+|order|string|Order in which to return matching api_clients.|query||
+|filters|array|Conditions for filtering api_clients.|query||
+
+h2. update
+
+Update attributes of an existing ApiClient.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the ApiClient in question.|path||
+|api_client|object||query||
diff --git a/doc/api/methods/authorized_keys.html.textile.liquid b/doc/api/methods/authorized_keys.html.textile.liquid
new file mode 100644 (file)
index 0000000..9727c57
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "authorized_keys"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/authorized_keys@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new AuthorizedKey.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|authorized_key|object||query||
+
+h2. delete
+
+Delete an existing AuthorizedKey.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
+
+h2. get
+
+Gets a AuthorizedKey's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
+
+h2. list
+
+List authorized_keys.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of authorized_keys to return.|query||
+|order|string|Order in which to return matching authorized_keys.|query||
+|filters|array|Conditions for filtering authorized_keys.|query||
+
+h2. update
+
+Update attributes of an existing AuthorizedKey.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||
+|authorized_key|object||query||
diff --git a/doc/api/methods/collections.html.textile.liquid b/doc/api/methods/collections.html.textile.liquid
new file mode 100644 (file)
index 0000000..8760fe8
--- /dev/null
@@ -0,0 +1,69 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "collections"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/collections@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. create
+
+Create a new Collection.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|collection|object||query||
+
+h2. delete
+
+Delete an existing Collection.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
+
+h2. get
+
+Gets a Collection's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
+
+h2. list
+
+List collections.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of collections to return.|query||
+|order|string|Order in which to return matching collections.|query||
+|filters|array|Conditions for filtering collections.|query||
+|select|array|Data fields to return in the result list.|query|@["uuid", "manifest_text"]@|
+
+N.B.: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in results by default.  If you need it, pass a @select@ parameter that includes @manifest_text@.
+
+h2. update
+
+Update attributes of an existing Collection.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||
+|collection|object||query||
diff --git a/doc/api/methods/groups.html.textile.liquid b/doc/api/methods/groups.html.textile.liquid
new file mode 100644 (file)
index 0000000..478662e
--- /dev/null
@@ -0,0 +1,92 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "groups"
+
+...
+
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/groups@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. contents
+
+Retrieve a list of items which are associated with the given group by ownership (i.e., the group owns the item) or a "name" link (i.e., a "name" link referencing the item).
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the group in question.|path||
+|include_linked|boolean|If false, results will only include items whose @owner_uuid@ attribute is the specified group. If true, results will additionally include items for which a "name" link exists.|path|{white-space:nowrap}. @false@ (default)
+@true@|
+
+If @include_linked@ is @true@, the @"links"@ field in the response will contain the "name" links referencing the objects in the @"items"@ field.
+
+h2. create
+
+Create a new Group.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|group|object||query||
+
+h2. delete
+
+Delete an existing Group.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
+
+h2. get
+
+Gets a Group's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
+
+h2. list
+
+List groups.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of groups to return.|query||
+|order|string|Order in which to return matching groups.|query||
+|filters|array|Conditions for filtering groups.|query||
+
+h2. show
+
+show groups
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+h2. update
+
+Update attributes of an existing Group.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||
+|group|object||query||
diff --git a/doc/api/methods/humans.html.textile.liquid b/doc/api/methods/humans.html.textile.liquid
new file mode 100644 (file)
index 0000000..1d8c13e
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "humans"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/humans@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new Human.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|human|object||query||
+
+h2. delete
+
+Delete an existing Human.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
+
+h2. get
+
+Gets a Human's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
+
+h2. list
+
+List humans.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of humans to return.|query||
+|order|string|Order in which to return matching humans.|query||
+|filters|array|Conditions for filtering humans.|query||
+
+h2. update
+
+Update attributes of an existing Human.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Human in question.|path||
+|human|object||query||
diff --git a/doc/api/methods/job_tasks.html.textile.liquid b/doc/api/methods/job_tasks.html.textile.liquid
new file mode 100644 (file)
index 0000000..7b040d8
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "job_tasks"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/job_tasks@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new JobTask.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|job_task|object||query||
+
+h2. delete
+
+Delete an existing JobTask.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
+
+h2. get
+
+Gets a JobTask's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
+
+h2. list
+
+List job_tasks.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of job_tasks to return.|query||
+|order|string|Order in which to return matching job_tasks.|query||
+|filters|array|Conditions for filtering job_tasks.|query||
+
+h2. update
+
+Update attributes of an existing JobTask.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the JobTask in question.|path||
+|job_task|object||query||
diff --git a/doc/api/methods/jobs.html.textile.liquid b/doc/api/methods/jobs.html.textile.liquid
new file mode 100644 (file)
index 0000000..ac68129
--- /dev/null
@@ -0,0 +1,226 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "jobs"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/jobs@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. cancel
+
+Cancel a job that is queued or running.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+h2(#create). create
+
+Create a new Job.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|job|object|See "Job resource":{{site.baseurl}}/api/schema/Job.html|request body||
+|minimum_script_version |string     |Git branch, tag, or commit hash specifying the minimum acceptable script version (earliest ancestor) to consider when deciding whether to re-use a past job.[1]|query|@"c3e86c9"@|
+|exclude_script_versions|array of strings|Git commit branches, tags, or hashes to exclude when deciding whether to re-use a past job.|query|@["8f03c71","8f03c71"]@
+@["badtag1","badtag2"]@|
+|filters|array|Conditions to find Jobs to reuse.|query||
+|find_or_create         |boolean    |Before creating, look for an existing job that has identical script, script_version, and script_parameters to those in the present job, has nondeterministic=false, and did not fail (it could be queued, running, or completed). If such a job exists, respond with the existing job instead of submitting a new one.|query|@false@|
+
+When a job is submitted to the queue using the **create** method, the @script_version@ attribute is updated to a full 40-character Git commit hash based on the current content of the specified repository. If @script_version@ cannot be resolved, the job submission is rejected.
+
+fn1. See the "note about specifying Git commits on the Job resource page":{{site.baseurl}}/api/schema/Job.html#script_version for more detail.
+
+h3. Specialized filters
+
+Special filter operations are available for specific Job columns.
+
+* @script_version@ @in git@ @REFSPEC@, @arvados_sdk_version@ @in git@ @REFSPEC@<br>Resolve @REFSPEC@ to a list of Git commits, and match jobs with a @script_version@ or @arvados_sdk_version@ in that list.  When creating a job and filtering @script_version@, the search will find commits between @REFSPEC@ and the submitted job's @script_version@; all other searches will find commits between @REFSPEC@ and HEAD.  This list may include parallel branches if there is more than one path between @REFSPEC@ and the end commit in the graph.  Use @not in@ or @not in git@ filters (below) to blacklist specific commits.
+
+* @script_version@ @not in git@ @REFSPEC@, @arvados_sdk_version@ @not in git@ @REFSPEC@<br>Resolve @REFSPEC@ to a list of Git commits, and match jobs with a @script_version@ or @arvados_sdk_version@ not in that list.
+
+* @docker_image_locator@ @in docker@ @SEARCH@<br>@SEARCH@ can be a Docker image hash, a repository name, or a repository name and tag separated by a colon (@:@).  The server will find collections that contain a Docker image that match that search criteria, then match jobs with a @docker_image_locator@ in that list.
+
+* @docker_image_locator@ @not in docker@ @SEARCH@<br>Negate the @in docker@ filter.
+
+h3. Reusing jobs
+
+Because Arvados records the exact version of the script, input parameters, and runtime environment that was used to run the job, if the script is deterministic (meaning that the same code version is guaranteed to produce the same outputs from the same inputs) then it is possible to re-use the results of past jobs, and avoid re-running the computation to save time.  Arvados uses the following algorithm to determine if a past job can be re-used:
+
+notextile. <div class="spaced-out">
+
+# If @find_or_create@ is false or omitted, create a new job and skip the rest of these steps.
+# If @filters@ are specified, find jobs that match those filters.  Filters *must* be specified to limit the @repository@ and @script@ attributes.  An error is returned if they are missing.
+# If @filters@ are not specified, find jobs with the same @repository@ and @script@, with a @script_version@ between @minimum_script_version@ and @script_version@ (excluding @excluded_script_versions@), and a @docker_image_locator@ with the latest Collection that matches the submitted job's @docker_image@ constraint.  If the submitted job includes an @arvados_sdk_version@ constraint, jobs must have an @arvados_sdk_version@ between that refspec and HEAD to be found.
+# If the found jobs include a completed job, and all found completed jobs have consistent output, return one of them.  Which specific job is returned is undefined.
+# If the found jobs only include incomplete jobs, return one of them.  Which specific job is returned is undefined.
+# If no job has been returned so far, create and return a new job.
+
+</div>
+
+h3. Examples
+
+Run the script "crunch_scripts/hash.py" in the repository "you" using the "master" commit.  Arvados should re-use a previous job if the script_version of the previous job is the same as the current "master" commit. This works irrespective of whether the previous job was submitted using the name "master", a different branch name or tag indicating the same commit, a SHA-1 commit hash, etc.
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>",
+    "script_version": "master",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "find_or_create": true
+}
+</pre></notextile>
+
+Run using exactly the version "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5". Arvados should re-use a previous job if the "script_version" of that job is also "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5".
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>",
+    "script_version": "d00220fb38d4b85ca8fc28a8151702a2b9d1dec5",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "find_or_create": true
+}
+</pre></notextile>
+
+Arvados should re-use a previous job if the "script_version" of the previous job is between "earlier_version_tag" and the "master" commit (inclusive), but not the commit indicated by "blacklisted_version_tag". If there are no previous jobs matching these criteria, run the job using the "master" commit.
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>",
+    "script_version": "master",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "minimum_script_version": "earlier_version_tag",
+  "exclude_script_versions": ["blacklisted_version_tag"],
+  "find_or_create": true
+}
+</pre></notextile>
+
+The same behavior, using filters:
+
+<notextile><pre>
+{
+  "job": {
+    "script": "hash.py",
+    "repository": "<b>you</b>",
+    "script_version": "master",
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  },
+  "filters": [["script", "=", "hash.py"],
+              ["repository", "=", "<b>you</b>"],
+              ["script_version", "in git", "earlier_version_tag"],
+              ["script_version", "not in git", "blacklisted_version_tag"]],
+  "find_or_create": true
+}
+</pre></notextile>
+
+Run the script "crunch_scripts/monte-carlo.py" in the repository "you" using the current "master" commit. Because it is marked as "nondeterministic", this job will not be considered as a suitable candidate for future job submissions that use the "find_or_create" feature.
+
+<notextile><pre>
+{
+  "job": {
+    "script": "monte-carlo.py",
+    "repository": "<b>you</b>",
+    "script_version": "master",
+    "nondeterministic": true,
+    "script_parameters": {
+      "input": "c1bad4b39ca5a924e481008009d94e32+210"
+    }
+  }
+}
+</pre></notextile>
+
+h2. delete
+
+Delete an existing Job.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
+
+h2. get
+
+Gets a Job's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
+
+h2. list
+
+List jobs.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of jobs to return.|query||
+|order|string|Order in which to return matching jobs.|query||
+|filters|array|Conditions for filtering jobs.|query||
+
+See the create method documentation for more information about Job-specific filters.
+
+h2. log_tail_follow
+
+log_tail_follow jobs
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+|buffer_size|integer (default 8192)||query||
+
+h2. queue
+
+Get the current job queue.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|order|string||query||
+|filters|array||query||
+
+This method is equivalent to the "list method":#list, except that the results are restricted to queued jobs (i.e., jobs that have not yet been started or cancelled) and order defaults to queue priority.
+
+h2. update
+
+Update attributes of an existing Job.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Job in question.|path||
+|job|object||query||
diff --git a/doc/api/methods/keep_disks.html.textile.liquid b/doc/api/methods/keep_disks.html.textile.liquid
new file mode 100644 (file)
index 0000000..5179ccc
--- /dev/null
@@ -0,0 +1,82 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "keep_disks"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_disks@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. create
+
+Create a new KeepDisk.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|keep_disk|object||query||
+
+h2. delete
+
+Delete an existing KeepDisk.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
+
+h2. get
+
+Gets a KeepDisk's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
+
+h2. list
+
+List keep_disks.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of keep_disks to return.|query||
+|order|string|Order in which to return matching keep_disks.|query||
+|filters|array|Conditions for filtering keep_disks.|query||
+
+h2. ping
+
+ping keep_disks
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|ping_secret|string||query||
+{background:#ccffcc}.|service_port|string||query||
+{background:#ccffcc}.|service_ssl_flag|string||query||
+|filesystem_uuid|string||query||
+|node_uuid|string||query||
+|service_host|string||query||
+|uuid|string||query||
+
+h2. update
+
+Update attributes of an existing KeepDisk.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepDisk in question.|path||
+|keep_disk|object||query||
diff --git a/doc/api/methods/keep_services.html.textile.liquid b/doc/api/methods/keep_services.html.textile.liquid
new file mode 100644 (file)
index 0000000..da6818b
--- /dev/null
@@ -0,0 +1,75 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "keep_services"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_services@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. accessible
+
+Get a list of keep services that are accessible to the requesting client.  This
+is context-sensitive, for example providing the list of actual Keep servers
+when inside the cluster, but providing a proxy service if client contacts
+Arvados from outside the cluster.
+
+Takes no arguments.
+
+h2. create
+
+Create a new KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|keep_disk|object||query||
+
+h2. delete
+
+Delete an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h2. get
+
+Gets a KeepService's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+
+h2. list
+
+List keep_services.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of keep_services to return.|query||
+|order|string|Order in which to return matching keep_services.|query||
+|filters|array|Conditions for filtering keep_services.|query||
+
+h2. update
+
+Update attributes of an existing KeepService.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||
+|keep_service|object||query||
diff --git a/doc/api/methods/links.html.textile.liquid b/doc/api/methods/links.html.textile.liquid
new file mode 100644 (file)
index 0000000..3c0bdf3
--- /dev/null
@@ -0,0 +1,76 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "links"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/links@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+h2. create
+
+Create a new Link.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|link|object||query||
+
+h2. delete
+
+Delete an existing Link.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
+
+h2. get
+
+Gets a Link's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
+
+h2. list
+
+List links.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of links to return.|query||
+|order|string|Order in which to return matching links.|query||
+|filters|array|Conditions for filtering links.|query||
+
+h2. render_not_found
+
+render_not_found links
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|a|string||path||
+
+h2. update
+
+Update attributes of an existing Link.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||
+|link|object||query||
diff --git a/doc/api/methods/logs.html.textile.liquid b/doc/api/methods/logs.html.textile.liquid
new file mode 100644 (file)
index 0000000..c5895d7
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "logs"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/logs@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new log entry.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|log|object||query||
+
+h2. delete
+
+Delete an existing log entry. This method can only be used by privileged (system administrator) users.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
+
+h2. get
+
+Retrieve a log entry.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
+
+h2. list
+
+List log entries.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of log entries to return.|query||
+|order|string|Order in which to return matching log entries.|query||
+|filters|array|Conditions for filtering log entries.|query||
+
+h2. update
+
+Update attributes of an existing log entry. This method can only be used by privileged (system administrator) users.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||
+|log|object||query||
diff --git a/doc/api/methods/nodes.html.textile.liquid b/doc/api/methods/nodes.html.textile.liquid
new file mode 100644 (file)
index 0000000..7aa5896
--- /dev/null
@@ -0,0 +1,80 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "nodes"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/nodes@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new Node.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|node|object||query||
+
+h2. delete
+
+Delete an existing Node.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
+
+h2. get
+
+Gets a Node's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
+
+h2. list
+
+List nodes.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of nodes to return.|query||
+|order|string|Order in which to return matching nodes.|query||
+|filters|array|Conditions for filtering nodes.|query||
+
+h2. ping
+
+ping nodes
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|ping_secret|string||query||
+{background:#ccffcc}.|uuid|string||path||
+
+h2. update
+
+Update attributes of an existing Node.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Node in question.|path||
+|node|object||query||
+
+To remove a node's job assignment, update the node object's @job_uuid@ to null.
diff --git a/doc/api/methods/pipeline_instances.html.textile.liquid b/doc/api/methods/pipeline_instances.html.textile.liquid
new file mode 100644 (file)
index 0000000..d637a69
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "pipeline_instances"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_instances@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new PipelineInstance.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|pipeline_instance|object||query||
+
+h2. delete
+
+Delete an existing PipelineInstance.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
+
+h2. get
+
+Gets a PipelineInstance's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
+
+h2. list
+
+List pipeline_instances.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of pipeline_instances to return.|query||
+|order|string|Order in which to return matching pipeline_instances.|query||
+|filters|array|Conditions for filtering pipeline_instances.|query||
+
+h2. update
+
+Update attributes of an existing PipelineInstance.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineInstance in question.|path||
+|pipeline_instance|object||query||
diff --git a/doc/api/methods/pipeline_templates.html.textile.liquid b/doc/api/methods/pipeline_templates.html.textile.liquid
new file mode 100644 (file)
index 0000000..06684cc
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "pipeline_templates"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/pipeline_templates@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new PipelineTemplate.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|pipeline_template|object||query||
+
+h2. delete
+
+Delete an existing PipelineTemplate.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
+
+h2. get
+
+Gets a PipelineTemplate's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
+
+h2. list
+
+List pipeline_templates.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of pipeline_templates to return.|query||
+|order|string|Order in which to return matching pipeline_templates.|query||
+|filters|array|Conditions for filtering pipeline_templates.|query||
+
+h2. update
+
+Update attributes of an existing PipelineTemplate.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the PipelineTemplate in question.|path||
+|pipeline_template|object||query||
diff --git a/doc/api/methods/repositories.html.textile.liquid b/doc/api/methods/repositories.html.textile.liquid
new file mode 100644 (file)
index 0000000..7bd8dd9
--- /dev/null
@@ -0,0 +1,76 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "repositories"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/repositories@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new Repository.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|repository|object||query||
+
+h2. delete
+
+Delete an existing Repository.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
+
+h2. get
+
+Gets a Repository's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
+
+h2. get_all_permissions
+
+get_all_permissions repositories
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+
+h2. list
+
+List repositories.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of repositories to return.|query||
+|order|string|Order in which to return matching repositories.|query||
+|filters|array|Conditions for filtering repositories.|query||
+
+h2. update
+
+Update attributes of an existing Repository.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Repository in question.|path||
+|repository|object||query||
diff --git a/doc/api/methods/specimens.html.textile.liquid b/doc/api/methods/specimens.html.textile.liquid
new file mode 100644 (file)
index 0000000..6737c9b
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "specimens"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/specimens@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new Specimen.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|specimen|object||query||
+
+h2. delete
+
+Delete an existing Specimen.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
+
+h2. get
+
+Gets a Specimen's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
+
+h2. list
+
+List specimens.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of specimens to return.|query||
+|order|string|Order in which to return matching specimens.|query||
+|filters|array|Conditions for filtering specimens.|query||
+
+h2. update
+
+Update attributes of an existing Specimen.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Specimen in question.|path||
+|specimen|object||query||
diff --git a/doc/api/methods/traits.html.textile.liquid b/doc/api/methods/traits.html.textile.liquid
new file mode 100644 (file)
index 0000000..9b19a08
--- /dev/null
@@ -0,0 +1,67 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "traits"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/traits@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new Trait.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|trait|object||query||
+
+h2. delete
+
+Delete an existing Trait.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
+
+h2. get
+
+Gets a Trait's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
+
+h2. list
+
+List traits.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of traits to return.|query||
+|order|string|Order in which to return matching traits.|query||
+|filters|array|Conditions for filtering traits.|query||
+
+h2. update
+
+Update attributes of an existing Trait.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the Trait in question.|path||
+|trait|object||query||
diff --git a/doc/api/methods/users.html.textile.liquid b/doc/api/methods/users.html.textile.liquid
new file mode 100644 (file)
index 0000000..33f884b
--- /dev/null
@@ -0,0 +1,101 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "users"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/users@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new User.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|user|object||query||
+
+h2. current
+
+current users
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+
+h2. delete
+
+Delete an existing User.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+h2. get
+
+Gets a User's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
+
+h2. list
+
+List users.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of users to return.|query||
+|order|string|Order in which to return matching users.|query||
+|filters|array|Conditions for filtering users.|query||
+
+h2. show
+
+show users
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+h2. system
+
+system users
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+
+h2. update
+
+Update attributes of an existing User.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||
+|user|object||query||
diff --git a/doc/api/methods/virtual_machines.html.textile.liquid b/doc/api/methods/virtual_machines.html.textile.liquid
new file mode 100644 (file)
index 0000000..390abdc
--- /dev/null
@@ -0,0 +1,96 @@
+---
+layout: default
+navsection: api
+navmenu: API Methods
+title: "virtual_machines"
+
+...
+
+See "REST methods for working with Arvados resources":{{site.baseurl}}/api/methods.html
+
+API endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/virtual_machines@
+
+Required arguments are displayed in %{background:#ccffcc}green%.
+
+
+h2. create
+
+Create a new VirtualMachine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|virtual_machine|object||query||
+
+h2. delete
+
+Delete an existing VirtualMachine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
+
+h2. get
+
+Gets a VirtualMachine's metadata by UUID.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
+
+h2(#logins). logins
+
+Get a list of SSH keys and account names that should be able to log in to a given virtual machine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string||path||
+
+The response is a "resource list":{{site.baseurl}}/api/resources.html#resourceList with @kind@ set to @"arvados#HashList"@. Each item is a hash with the following keys:
+
+table(table table-bordered table-condensed).
+|_. Key|_. Value type|_. Description|_. Example|
+|username|string|Name of the Unix login account to which the user should be able to log in|@"jsmith"@|
+|hostname|string|Hostname of the virtual machine|@"shell.xyzzy.arvadosapi.com"@|
+|public_key|string|SSH public key|@"ssh-rsa AAAAB3NzaC1yc2E..."@|
+|user_uuid|string|UUID of the user who should be able to log in|@"xyzzy-tpzed-mv4d7dy7n91te11"@|
+|virtual_machine_uuid|string|UUID of the "VirtualMachine resource":{{site.baseurl}}/api/schema/VirtualMachine.html|@"xyzzy-2x53u-kvszmclnbjuv8xc"@|
+|authorized_key_uuid|string|UUID of the "AuthorizedKey resource":{{site.baseurl}}/api/schema/AuthorizedKey.html|@"xyzzy-fngyi-v9p0cyfmjxbio64"@|
+
+h2. get_all_logins
+
+Get a list, for every virtual machine in the system, of SSH keys and account names that should be able to log in.
+
+Arguments: none.
+
+The response has the same format as the response to the "logins method":#logins above.
+
+h2. list
+
+List virtual_machines.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+|limit|integer (default 100)|Maximum number of virtual_machines to return.|query||
+|order|string|Order in which to return matching virtual_machines.|query||
+|filters|array|Conditions for filtering virtual_machines.|query||
+
+h2. update
+
+Update attributes of an existing VirtualMachine.
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||
+|virtual_machine|object||query||
diff --git a/doc/api/permission-model.html.textile.liquid b/doc/api/permission-model.html.textile.liquid
new file mode 100644 (file)
index 0000000..8b085ee
--- /dev/null
@@ -0,0 +1,125 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: "Permission model"
+
+...
+
+
+
+Each API transaction (read, write, create, etc.) is done on behalf of a person.
+
+* An end user, via a web app
+* The owner of an installed app
+
+A user (person) is permitted to act on an object if there is a path (series of permission Links) from the acting user to the object in which
+
+* Every intervening object is a Group, and
+* Every intervening permission Link allows the current action
+
+Special case: A permission path can also include intervening User objects if the links _to_ the Users are "can_manage" links.
+
+Each object has exactly one _owner_, which can be either a User or a Group.
+
+* If the owner of X is A, then A is permitted to do any action on X.
+
+h3. Tokens
+
+An authorization token is issued at a user's request, and supplied to an API client using some suitable mechanism (_e.g._, cookie or application config file for a web app; environment variable or .rc-file for a CLI app).
+
+A user can have multiple valid tokens at a given time.  At the user's option, a token can be restricted to a combination of
+
+* API client program
+* time interval
+* transaction type
+
+h3. System pseudo-user
+
+A privileged user account exists for the use of built-in Arvados system components.  This user manages system-wide shared objects which can't really be "owned" by any particular user, like
+
+* Jobs and job steps (because a given job can be "wanted" by multiple users)
+* Provenance metadata (because no user should be able to modify this directly)
+* Storage metadata like
+** redundancy verified as N&times; at time Y
+** contents of collections A and B are identical
+
+The system pseudo-user's uuid is @{siteprefix}-tpzed-000000000000000@.
+
+h2. Example scenarios
+
+h3. 1. Private objects
+
+Alfred stores 3 data Collections in Keep and adds them to a new Group.
+
+The Collections and the Group can only be seen by Alfred, administrators, and the system user.
+
+The data in the Collections can only be retrieved by Alfred, administrators, and the system user.
+
+h3. 2. Public objects
+
+George creates a "PGP public data" Group, and grants "read" permission to all users.
+
+* ...by adding a Link: "All users" Group _can_read_&rarr; "PGP public data" Group
+
+George stores 4 data Collections in Keep and adds them to the "PGP public data" Group
+
+* ...by adding a Link: Group _can_read_&rarr; Collection
+
+Anyone who can connect to Arvados can log in with a Google/OpenID account and read the data.
+
+h3. 3. Group-managed objects
+
+Three lab members are working together on a project. All Specimens, Links, Jobs, etc. can be modified by any of the three lab members. _Other_ lab members, who are not working on this project, can view but not modify these objects.
+
+h3. 4. Group-level administrator
+
+The Ashton Lab administrator, Alison, manages user accounts within her lab. She can enable and disable accounts, and exercise any permission that her lab members have.
+
+George has read-only access to the same set of accounts. This lets him see things like user activity and resource usage reports, without worrying about accidentally messing up anyone's data.
+
+table(table table-bordered table-condensed).
+|Tail                   |Permission     |Head                      |Effect|
+|Group: Ashton Lab Admin|can_manage     |User: Lab Member 1        |Lab member 1 is in this administrative group|
+|Group: Ashton Lab Admin|can_manage     |User: Lab Member 2        |Lab member 2 is in this administrative group|
+|Group: Ashton Lab Admin|can_manage     |User: Lab Member 3        |Lab member 3 is in this administrative group|
+|Group: Ashton Lab Admin|can_manage     |User: Alison              |Alison is in this administrative group|
+|Group: Ashton Lab Admin|can_manage     |User: George              |George is in this administrative group|
+|Alison                 |can_manage     |Group: Ashton Lab Admin   |Alison can do everything the above lab members can do|
+|George                 |can_read       |Group: Ashton Lab Admin   |George can read everything the above lab members can read|
+
+h3. 5. Segregated roles
+
+Granwyth, at the Hulatberi Lab, sets up a Factory Robot which uses a hosted Arvados site to do work for the Hulatberi Lab.
+
+Frank uploads a data Collection using Factory Robot's upload interface.  Factory Robot sets data owner to Hulatberi Lab.
+
+Factory Robot processes the data using a pipeline.
+
+Factory Robot grants permission for anyone in the Ingeborg Lab (a Hulateberi Lab customer) to read the output of the pipeline, as well as the pipeline invocation details.  (Say, Ingeborg and Jill.)
+
+During and after processing, all members of the Hulatberi Lab (_e.g._, Mike) can inspect pipeline progress, read source/intermediate/output data, and modify objects.
+
+Possible encoding:
+
+table(table table-bordered table-condensed).
+|Tail           |Permission     |Head                      |Effect|
+|Frank          |(none)         |                          |Factory Robot uses only its own credentials during upload|
+|Granwyth       |can_manage     |User:    Factory Robot    |can revoke tokens, view activity... (needed?)|
+|Granwyth       |can_manage     |Group: Hulatberi Lab    |can grant group-write permission to Factory Robot|
+|Factory Robot  |can_write      |Group: Hulatberi Lab    |can add data, pipelines, jobs, etc. to the Lab group|
+|Mike           |can_write      |Group: Hulatberi Lab    |can edit/annotate/delete objects that belong to the Lab|
+
+h3. Actions governed by permissions
+
+table(table table-bordered table-condensed).
+|_Action_|_Permissions needed_|
+|Retrieve data from Keep|can_read (system-wide?)|
+|Store data in Keep|can_write (system-wide?)|
+|Add a Collection to Arvados|can_write (system-wide?)|
+|Run a job|can_run (system-wide?)|
+|See progress/result of a job|can_read (on job)|
+|Give group permissions to a user/group|can_manage (on group)|
+|Revoke group permissions from a user/group|can_manage (on group)|
+|Change owner of an object|can_manage (on object)|
+|Add an object to a group|can_write (on group)|
diff --git a/doc/api/resources.html.textile.liquid b/doc/api/resources.html.textile.liquid
new file mode 100644 (file)
index 0000000..a93b4ac
--- /dev/null
@@ -0,0 +1,49 @@
+---
+layout: default
+navsection: api
+navmenu: Concepts
+title: Resources
+
+...
+
+
+
+This page describes the common attributes of Arvados resources.
+
+The list of resource types is on the "front page of the API Reference":./.
+
+h2. Object IDs
+
+Object IDs are alphanumeric strings, unique across all installations (each installation has a unique prefix to prevent collisions).
+
+h2(#resource). Attributes of resources
+
+table(table table-bordered table-condensed).
+|*Attribute*|*Type*|*Description*|*Example*|
+|uuid|string|universally unique object identifier|@mk2qn-4zz18-w3anr2hk2wgfpuo@|
+|href|string|a URL that can be used to address this resource||
+|kind|string|@arvados#{resource_type}@|@arvados#collection@|
+|etag|string|The ETag[1] of the resource|@1xlmizzjq7wro3dlb2dirf505@|
+|self_link|string|||
+|owner_uuid|string|UUID of owner (typically User or Project)|@mk2qn-tpzed-a4lcehql0dv2u25@|
+|created_at|datetime|When resource was created|@2013-01-21T22:17:39Z@|
+|modified_by_client_uuid|string|API client software which most recently modified the resource|@mk2qn-ozdt8-vq8l5qkzj7pr7h7@|
+|modified_by_user_uuid|string|Authenticated user, on whose behalf the client was acting when modifying the resource|@mk2qn-tpzed-a4lcehql0dv2u25@|
+|modified_at|datetime|When resource was last modified|@2013-01-25T22:29:32Z@|
+
+h2(#resourceList). Attributes of resource lists
+
+table(table table-bordered table-condensed).
+|*Attribute*|*Type*|*Description*|*Example*|
+|kind|string|@arvados#{resource_type}List@|@arvados#projectList@|
+|etag|string|The ETag[1] of the resource list|@cd3o1wi9sf934saajykawrz2e@|
+|self_link|string|||
+|next_page_token|string|||
+|next_link|string|||
+|items[]|list|List of resources||
+
+
+h2. ETags
+
+fn1. Each response includes an ETag, a string which changes when the resource changes.  Clients can use this to check whether a resource has changed since they last retrieved it.  If a previous ETag is provided along with a request, and the resource has not changed since, the server may return a "not modified" response.
+
diff --git a/doc/api/schema/ApiClient.html.textile.liquid b/doc/api/schema/ApiClient.html.textile.liquid
new file mode 100644 (file)
index 0000000..0cda1af
--- /dev/null
@@ -0,0 +1,24 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: ApiClient
+
+...
+
+
+An **ApiClient** represents a client program that can issue requests to the API server.
+
+h2. Methods
+
+See "api_clients":{{site.baseurl}}/api/methods/api_clients.html
+
+h2. Resource
+
+Each ApiClient has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|url_prefix|string|||
+|is_trusted|boolean|Trusted by users to handle their API tokens (ApiClientAuthorizations).||
diff --git a/doc/api/schema/ApiClientAuthorization.html.textile.liquid b/doc/api/schema/ApiClientAuthorization.html.textile.liquid
new file mode 100644 (file)
index 0000000..dc9614a
--- /dev/null
@@ -0,0 +1,29 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: ApiClientAuthorization
+
+...
+
+An **ApiClientAuthorization** represents an API client's authorization to make API requests on a user's behalf.
+
+h2. Methods
+
+See "api_client_authorizations":{{site.baseurl}}/api/methods/api_client_authorizations.html
+
+h2. Resource
+
+An ApiClientAuthorization is not a generic Arvados resource.  The full list of properties that belong to an ApiClientAuthorization is:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|api_token|string|||
+|api_client_id|integer|||
+|user_id|integer|||
+|created_by_ip_address|string|||
+|last_used_by_ip_address|string|||
+|last_used_at|datetime|||
+|expires_at|datetime|||
+|default_owner_uuid|string|||
+|scopes|array|||
diff --git a/doc/api/schema/AuthorizedKey.html.textile.liquid b/doc/api/schema/AuthorizedKey.html.textile.liquid
new file mode 100644 (file)
index 0000000..d9f4354
--- /dev/null
@@ -0,0 +1,24 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: AuthorizedKey
+...
+
+An **AuthorizedKey** represents the public part of an SSH authentication key which can be used to authorize transactions on behalf of the user.
+
+h2. Methods
+
+See "authorized_keys":{{site.baseurl}}/api/methods/authorized_keys.html
+
+h2. Resource
+
+Each AuthorizedKey has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|key_type|string|||
+|authorized_user_uuid|string|||
+|public_key|text|||
+|expires_at|datetime|||
diff --git a/doc/api/schema/Collection.html.textile.liquid b/doc/api/schema/Collection.html.textile.liquid
new file mode 100644 (file)
index 0000000..69a8dc3
--- /dev/null
@@ -0,0 +1,40 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Collection
+
+...
+
+Note: This resource concerns indexing, usage accounting, and integrity checks for data stored in Arvados. Reading and writing the data _per se_ is achieved by the "Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html storage system.
+
+h2. Methods
+
+See "collections":{{site.baseurl}}/api/methods/collections.html
+
+h3. Conditions of creating a Collection
+
+The @uuid@ and @manifest_text@ attributes must be provided when creating a Collection. The cryptographic digest of the supplied @manifest_text@ must match the supplied @uuid@.
+
+h3. Side effects of creating a Collection
+
+Referenced data can be protected from garbage collection. See the section about "resources" links on the "Links":Link.html page.
+
+Data can be shared with other users via the Arvados permission model.
+
+Clients can request checks of data integrity and storage redundancy.
+
+h2. Resource
+
+Each collection has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|locator|string|||
+|portable_data_hash|string|||
+|name|string|||
+|redundancy|number|||
+|redundancy_confirmed_by_client_uuid|string|API client||
+|redundancy_confirmed_at|datetime|||
+|redundancy_confirmed_as|number|||
+|manifest_text|text|||
diff --git a/doc/api/schema/Group.html.textile.liquid b/doc/api/schema/Group.html.textile.liquid
new file mode 100644 (file)
index 0000000..2bf67eb
--- /dev/null
@@ -0,0 +1,25 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Group
+
+...
+
+A **Group** represents a set of objects. Groups allow you to organize content, define user roles, and apply permissions to sets of objects.
+
+h2. Methods
+
+See "groups":{{site.baseurl}}/api/methods/groups.html
+
+h2. Resource
+
+Each Group has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|group_class|string|Type of group. This does not affect behavior, but determines how the group is presented in the user interface. For example, @project@ indicates that the group should be displayed by Workbench and arv-mount as a project for organizing and naming objects.|@"project"@
+null|
+|description|text|||
+|writable_by|array|List of UUID strings identifying Users and other Groups that have write permission for this Group.  Only users who are allowed to administer the Group will receive a full list.  Other users will receive a partial list that includes the Group's owner_uuid and (if applicable) their own user UUID.||
diff --git a/doc/api/schema/Human.html.textile.liquid b/doc/api/schema/Human.html.textile.liquid
new file mode 100644 (file)
index 0000000..361e619
--- /dev/null
@@ -0,0 +1,19 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Human
+
+...
+
+h2. Methods
+
+See "humans":{{site.baseurl}}/api/methods/humans.html
+
+h2. Resource
+
+Each Human has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|properties|hash|||
diff --git a/doc/api/schema/Job.html.textile.liquid b/doc/api/schema/Job.html.textile.liquid
new file mode 100644 (file)
index 0000000..80f5de6
--- /dev/null
@@ -0,0 +1,64 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Job
+
+...
+
+Applications submit compute jobs when:
+* Provenance is important, i.e., it is worth recording how the output was produced; or
+* Computation time is significant; or
+* The job management features are convenient (failure detection/recovery, regression testing, etc).
+
+h2. Methods
+
+See "jobs":{{site.baseurl}}/api/methods/jobs.html
+
+h2. Resource
+
+Each job has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|script|string|The filename of the job script.|This program will be invoked by Crunch for each job task. It is given as a path to an executable file, relative to the @/crunch_scripts@ directory in the Git tree specified by the _repository_ and _script_version_ attributes.|
+|script_parameters|hash|The input parameters for the job.|Conventionally, one of the parameters is called @"input"@. Typically, some parameter values are collection UUIDs. Ultimately, though, the significance of parameters is left entirely up to the script itself.|
+|repository|string|Git repository|Given as the name of a locally hosted Git repository.|
+|script_version|string|Git commit|During a **create** transaction, this is the Git branch, tag, or hash supplied by the client. Before the job starts, Arvados updates it to the full 40-character SHA-1 hash of the commit used by the job.
+See "Specifying Git versions":#script_version below for more detail about acceptable ways to specify a commit.|
+|cancelled_by_client_uuid|string|API client ID|Is null if job has not been cancelled|
+|cancelled_by_user_uuid|string|Authenticated user ID|Is null if job has not been cancelled|
+|cancelled_at|datetime|When job was cancelled|Is null if job has not been cancelled|
+|started_at|datetime|When job started running|Is null if job has not [yet] started|
+|finished_at|datetime|When job finished running|Is null if job has not [yet] finished|
+|running|boolean|Whether the job is running||
+|success|boolean|Whether the job indicated successful completion|Is null if job has not finished|
+|is_locked_by_uuid|string|UUID of the user who has locked this job|Is null if job is not locked. The system user locks the job when starting the job, in order to prevent job attributes from being altered.|
+|node_uuids|array|List of UUID strings for node objects that have been assigned to this job||
+|log|string|Collection UUID|Is null if the job has not finished. After the job runs, the given collection contains a text file with log messages provided by the @arv-crunch-job@ task scheduler as well as the standard error streams provided by the task processes.|
+|tasks_summary|hash|Summary of task completion states.|Example: @{"done":0,"running":4,"todo":2,"failed":0}@|
+|output|string|Collection UUID|Is null if the job has not finished.|
+|nondeterministic|boolean|The job is expected to produce different results if run more than once.|If true, this job will not be considered as a candidate for automatic re-use when submitting subsequent identical jobs.|
+|submit_id|string|Unique ID provided by client when job was submitted|Optional. This can be used by a client to make the "jobs.create":{{site.baseurl}}/api/methods/jobs.html#create method idempotent.|
+|priority|string|||
+|arvados_sdk_version|string|Git commit hash that specifies the SDK version to use from the Arvados repository|This is set by searching the Arvados repository for a match for the arvados_sdk_version runtime constraint.|
+|docker_image_locator|string|Portable data hash of the collection that contains the Docker image to use|This is set by searching readable collections for a match for the docker_image runtime constraint.|
+|runtime_constraints|hash|Constraints that must be satisfied by the job/task scheduler in order to run the job.|See below.|
+
+h3(#script_version). Specifying Git versions
+
+The script_version attribute and arvados_sdk_version runtime constraint are typically given as a branch, tag, or commit hash, but there are many more ways to specify a Git commit. The "specifying revisions" section of the "gitrevisions manual page":http://git-scm.com/docs/gitrevisions.html has a definitive list. Arvados accepts Git versions in any format listed there that names a single commit (not a tree, a blob, or a range of commits). However, some kinds of names can be expected to resolve differently in Arvados than they do in your local repository. For example, <code>HEAD@{1}</code> refers to the local reflog, and @origin/master@ typically refers to a remote branch: neither is likely to work as desired if given as a Git version.
+
+h3. Runtime constraints
+
+table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Implemented|
+|arvados_sdk_version|string|The Git version of the SDKs to use from the Arvados git repository.  See "Specifying Git versions":#script_version for more detail about acceptable ways to specify a commit.  If you use this, you must also specify a @docker_image@ constraint (see below).  In order to install the Python SDK successfully, Crunch must be able to find and run virtualenv inside the container.|&#10003;|
+|docker_image|string|The Docker image that this Job needs to run.  If specified, Crunch will create a Docker container from this image, and run the Job's script inside that.  The Keep mount and work directories will be available as volumes inside this container.  The image must be uploaded to Arvados using @arv keep docker@.  You may specify the image in any format that Docker accepts, such as @arvados/jobs@, @debian:latest@, or the Docker image id.  Alternatively, you may specify the UUID or portable data hash of the image Collection, returned by @arv keep docker@.|&#10003;|
+|min_nodes|integer||&#10003;|
+|max_nodes|integer|||
+|min_cores_per_node|integer|Require that each node assigned to this Job have the specified number of CPU cores|&#10003;|
+|min_ram_mb_per_node|integer|Require that each node assigned to this Job have the specified amount of real memory (in MiB)|&#10003;|
+|min_scratch_mb_per_node|integer|Require that each node assigned to this Job have the specified amount of scratch storage available (in MiB)|&#10003;|
+|max_tasks_per_node|integer|Maximum simultaneous tasks on a single node|&#10003;|
+|min_ram_per_task|integer|Minimum real memory (KiB) per task||
diff --git a/doc/api/schema/JobTask.html.textile.liquid b/doc/api/schema/JobTask.html.textile.liquid
new file mode 100644 (file)
index 0000000..fbd4343
--- /dev/null
@@ -0,0 +1,47 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: JobTask
+
+...
+
+A Job Task is a well defined independently-computable portion of a "Job":Job.html.
+
+Job tasks are created two ways:
+* When a job starts, it is seeded with a job task with @sequence=0@ and an empty @parameters{}@ list.
+* Job task A can create additional job tasks B, C, D, which will belong to the same job. Tasks B, C, D will not be performed until job task A is complete. If job task A fails, tasks B, C, D will be deleted.
+
+Job tasks have particular update semantics:
+* Progress reporting: A job task should only be <code>PATCH</code>ed by a worker process which has been dispatched to work on that task and is reporting progress or completion status &mdash; and by the job manager itself.
+* Completion: When a job task process terminates, the task is considered complete only if its most recent @PATCH@ transaction had @progress=1.0@ and @success=true@.
+* Temporary failure: If a job task process terminates without updating @success@ to @true@ or @false@, it is assumed that the task failed but is worth re-attempting (at a different time, on a different node, etc).
+
+
+h2. Methods
+
+See "job_tasks":{{site.baseurl}}/api/methods/job_tasks.html
+
+h2. Resource
+
+Each JobTask has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|sequence|integer|Execution sequence.
+A step cannot be run until all steps with lower sequence numbers have completed.
+Job steps with the same sequence number can be run in any order.||
+|parameters|hash|||
+|output|text|||
+|progress|float|||
+|success|boolean|Is null if the task has neither completed successfully nor failed permanently.||
+
+The following attributes should not be updated by anyone other than the job manager:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Notes|
+|qsequence|integer|Order of arrival|0-based|
+|job_uuid|string|||
+|created_by_job_task_uuid|string|||
+
+
diff --git a/doc/api/schema/KeepDisk.html.textile.liquid b/doc/api/schema/KeepDisk.html.textile.liquid
new file mode 100644 (file)
index 0000000..c128c3e
--- /dev/null
@@ -0,0 +1,31 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: KeepDisk
+
+...
+
+A **KeepDisk** is a filesystem volume used by a Keep storage server to store data blocks.
+
+h2. Methods
+
+See "keep_disks":{{site.baseurl}}/api/methods/keep_disks.html
+
+h2. Resource
+
+Each KeepDisk has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|ping_secret|string|||
+|node_uuid|string|||
+|filesystem_uuid|string|||
+|bytes_total|integer|||
+|bytes_free|integer|||
+|is_readable|boolean|||
+|is_writable|boolean|||
+|last_read_at|datetime|||
+|last_write_at|datetime|||
+|last_ping_at|datetime|||
+|keep_service_uuid|string|||
diff --git a/doc/api/schema/KeepService.html.textile.liquid b/doc/api/schema/KeepService.html.textile.liquid
new file mode 100644 (file)
index 0000000..ac1d974
--- /dev/null
@@ -0,0 +1,24 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: KeepService
+
+...
+
+A **KeepService** is a service endpoint that supports the Keep protocol.
+
+h2. Methods
+
+See "keep_services":{{site.baseurl}}/api/methods/keep_services.html
+
+h2. Resource
+
+Each KeepService has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|service_host|string|||
+|service_port|integer|||
+|service_ssl_flag|boolean|||
+|service_type|string|||
\ No newline at end of file
diff --git a/doc/api/schema/Link.html.textile.liquid b/doc/api/schema/Link.html.textile.liquid
new file mode 100644 (file)
index 0000000..4abfdbc
--- /dev/null
@@ -0,0 +1,83 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Link
+
+...
+
+**Links** describe relationships between Arvados objects, and from objects to primitives.
+
+Links are directional: each metadata object has a tail (the "subject" being described), class, name, properties, and head (the "object" that describes the "subject").  A Link may describe a relationship between two objects in an Arvados database: e.g. a _permission_ link between a User and a Group defines the permissions that User has to read or modify the Group.  Other Links simply represent metadata for a single object, e.g. the _identifier_ Link, in which the _name_ property represents a human-readable identifier for the object at the link's head.
+
+For links that don't make sense to share between API clients, a _link_class_ that begins with @client@ (like @client.my_app_id@ or @client.my_app_id.anythinghere@) should be used.
+
+h2. Methods
+
+See "links":{{site.baseurl}}/api/methods/links.html
+
+h2. Resource
+
+Each link has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|
+|tail_uuid|string|Object UUID at the tail (start, source, origin) of this link|
+|link_class|string|Class (see below)|
+|name|string|Link type (see below)|
+|head_uuid|string|Object UUID at the head (end, destination, target) of this link|
+|properties|hash|Additional information, expressed as a key&rarr;value hash. Key: string. Value: string, number, array, or hash.|
+
+h2. Link classes
+
+Some classes are pre-defined by convention and have standard meanings attached to names.
+
+h3. provenance
+
+table(table table-bordered table-condensed).
+|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|_. Notes|
+|&rarr;Collection  |provided &rarr; _collection uuid_
+{url&rarr;http://example.com/foo.tgz, retrieved_at&rarr;1352616640.000}||
+|Job&rarr;Collection     |provided &rarr; _collection uuid_||
+|Specimen&rarr;Collection|provided &rarr; _collection uuid_||
+|Human&rarr;Specimen     |provided &rarr; _specimen uuid_||
+|Human&rarr;Collection   |provided &rarr; _collection uuid_||
+
+h3. permission
+
+table(table table-bordered table-condensed).
+|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|_. Notes|
+|User&rarr;Group  |{white-space:nowrap}. can_manage &rarr; _group uuid_|The User can read, write, and control permissions on the Group itself, every object owned by the Group, and every object on which the Group has _can_manage_ permission.|
+|User&rarr;Group  |can_read &rarr; _group uuid_  |The User can retrieve the Group itself and every object that is readable by the Group.|
+|User&rarr;Job|can_write &rarr; _job uuid_  |The User can read and update the Job. (This works for all objects, not just jobs.)|
+|User&rarr;Job|can_manage &rarr; _job uuid_  |The User can read, update, and change permissions for the Job. (This works for all objects, not just jobs.)|
+|Group&rarr;Job|can_manage &rarr; _job uuid_  |Anyone with _can_manage_ permission on the Group can also read, update, and change permissions for the Job. Anyone with _can_read_ permission on the Group can read the Job. (This works for all objects, not just jobs.)|
+
+h3. resources
+
+table(table table-bordered table-condensed).
+|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|_. Notes|
+|User&rarr;Collection|wants &rarr; _collection uuid_    |Determines whether data can be deleted|
+|User&rarr;Job       |wants &rarr; _job uuid_    |Determines whether a job can be cancelled|
+
+h3. tag
+
+A **tag** link describes an object using an unparsed plain text string. Tags can be used to annotate objects that are not editable, like collections and objects shared as read-only.
+
+table(table table-bordered table-condensed).
+|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|
+|&rarr;Collection           | _tag name_ &rarr; _collection uuid_|
+|&rarr;Job                  | _tag name_ &rarr; _job uuid_|
+
+h3. human_trait
+
+table(table table-bordered table-condensed).
+|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|_. Notes|
+|Human&rarr;Trait  |measured &rarr; _trait uuid_ {value&rarr;1.83, unit&rarr;metre, measured_at&rarr;1352616640.000}||
+
+h3. identifier
+
+table(table table-bordered table-condensed).
+|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|_. Notes|
+|&rarr;Human        |hu123456 &rarr; _human uuid_||
+
diff --git a/doc/api/schema/Log.html.textile.liquid b/doc/api/schema/Log.html.textile.liquid
new file mode 100644 (file)
index 0000000..425246a
--- /dev/null
@@ -0,0 +1,33 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Log
+
+...
+
+**Log** objects record events that occur in an Arvados cluster. Both user-written pipelines and the Arvados system itself may generate Log events.
+
+h2. Methods
+
+See "logs":{{site.baseurl}}/api/methods/logs.html
+
+h2. Creation
+
+Any user may create Log entries for any event they find useful. User-generated Logs have no intrinsic meaning to other users or to the Arvados system itself; it is up to each user to choose appropriate log event types and summaries for their project.
+
+h3. System Logs
+
+Arvados uses Logs to record creation, deletion, and updates of other Arvados resources.
+
+h2. Resource
+
+Each Log has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|object_uuid|string|||
+|event_at|datetime|||
+|event_type|string|A user-defined category or type for this event.|@LOGIN@|
+|summary|text|||
+|properties|hash|||
diff --git a/doc/api/schema/Node.html.textile.liquid b/doc/api/schema/Node.html.textile.liquid
new file mode 100644 (file)
index 0000000..ff9e882
--- /dev/null
@@ -0,0 +1,28 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Node
+
+...
+
+A **Node** represents a host that can be used to run Crunch job tasks.
+
+h2. Methods
+
+See "nodes":{{site.baseurl}}/api/methods/nodes.html
+
+h2. Resource
+
+Each Node has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|slot_number|integer|||
+|hostname|string|||
+|domain|string|||
+|ip_address|string|||
+|job_uuid|string|The UUID of the job that this node is assigned to work on.  If you do not have permission to read the job, this will be null.||
+|first_ping_at|datetime|||
+|last_ping_at|datetime|||
+|info|hash|||
diff --git a/doc/api/schema/PipelineInstance.html.textile.liquid b/doc/api/schema/PipelineInstance.html.textile.liquid
new file mode 100644 (file)
index 0000000..75c7885
--- /dev/null
@@ -0,0 +1,26 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: PipelineInstance
+
+...
+
+A **PipelineInstance** is the act or record of applying a pipeline template to a specific set of inputs; generally, a pipeline instance refers to a set of jobs that have been run to satisfy the pipeline components.
+
+h2. Methods
+
+See "pipeline_instances":{{site.baseurl}}/api/methods/pipeline_instances.html
+
+h2. Resource
+
+Each PipelineInstance has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|pipeline_template_uuid|string|||
+|name|string|||
+|components|hash|||
+|success|boolean|||
+|active|boolean|||
+|properties|Hash|||
diff --git a/doc/api/schema/PipelineTemplate.html.textile.liquid b/doc/api/schema/PipelineTemplate.html.textile.liquid
new file mode 100644 (file)
index 0000000..2b215c2
--- /dev/null
@@ -0,0 +1,161 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: PipelineTemplate
+...
+
+The pipeline template consists of "name" and "components".
+
+table(table table-bordered table-condensed).
+|_. Attribute    |_. Type |_. Accepted values                           |_. Required|_. Description|
+|name            |string  |any                                          |yes        |The human-readable name of the pipeline template.|
+|components      |object  |JSON object containing job submission objects|yes        |The component jobs that make up the pipeline, with the component name as the key. |
+
+h3. Components
+
+The components field of the pipeline template is a JSON object which describes the individual steps that make up the pipeline.  Each component is an Arvados job submission.  "Parameters for job submissions are described on the job method page.":{{site.baseurl}}/api/methods/jobs.html#create  In addition, a component can have the following parameters:
+
+table(table table-bordered table-condensed).
+|_. Attribute    |_. Type          |_. Accepted values |_. Required|_. Description|
+|output_name     |string or boolean|string or false    |no         |If a string is provided, use this name for the output collection of this component.  If the value is false, do not create a permanent output collection (an temporary intermediate collection will still be created).  If not provided, a default name will be assigned to the output.|
+
+h3. Script parameters
+
+When used in a pipeline, each parameter in the 'script_parameters' attribute of a component job can specify that the input parameter must be supplied by the user, or the input parameter should be linked to the output of another component.  To do this, the value of the parameter should be JSON object containing one of the following attributes:
+
+table(table table-bordered table-condensed).
+|_. Attribute    |_. Type |_. Accepted values                               |_. Description|
+|default         |any     |any                                              |The default value for this parameter.|
+|required        |boolean |true or false                                    |Specifies whether the parameter is required to have a value or not.|
+|dataclass       |string  |One of 'Collection', 'File' [1], 'number', or 'text' |Data type of this parameter.|
+|search_for      |string  |any string                                       |Substring to use as a default search string when choosing inputs.|
+|output_of       |string  |the name of another component in the pipeline    |Specifies that the value of this parameter should be set to the 'output' attribute of the job that corresponds to the specified component.|
+|title           |string  |any string                                       |User friendly title to display when choosing parameter values|
+|description     |string  |any string                                       |Extended text description for describing expected/valid values for the script parameter|
+|link_name       |string  |any string                                       |User friendly name to display for the parameter value instead of the actual parameter value|
+
+The 'output_of' parameter is especially important, as this is how components are actually linked together to form a pipeline.  Component jobs that depend on the output of other components do not run until the parent job completes and has produced output.  If the parent job fails, the entire pipeline fails.
+
+fn1. The 'File' type refers to a specific file within a Keep collection in the form 'collection_hash/filename', for example '887cd41e9c613463eab2f0d885c6dd96+83/bob.txt'.
+
+The 'search_for' parameter is meaningful only when input dataclass of type Collection or File is used. If a value is provided, this will be preloaded into the input data chooser dialog in Workbench. For example, if your input dataclass is a File and you are interested in a certain filename extention, you can preconfigure it in this attribute.
+
+h3. Examples
+
+This is a pipeline named "Filter MD5 hash values" with two components, "do_hash" and "filter".  The "input" script parameter of the "do_hash" component is required to be filled in by the user, and the expected data type is "Collection".  This also specifies that the "input" script parameter of the "filter" component is the output of "do_hash", so "filter" will not run until "do_hash" completes successfully.  When the pipeline runs, past jobs that meet the criteria described above may be substituted for either or both components to avoid redundant computation.
+
+<notextile><pre>
+{
+  "name": "Filter MD5 hash values",
+  "components": {
+    "do_hash": {
+      "script": "hash.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "required": true,
+          "dataclass": "Collection",
+          "search_for": ".fastq.gz",
+          "title":"Please select a fastq file"
+        }
+      },
+    },
+    "filter": {
+      "script": "0-filter.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "output_of": "do_hash"
+        }
+      },
+    }
+  }
+}
+</pre></notextile>
+
+This pipeline consists of three components.  The components "thing1" and "thing2" both depend on "cat_in_the_hat".  Once the "cat_in_the_hat" job is complete, both "thing1" and "thing2" can run in parallel, because they do not depend on each other.
+
+<notextile><pre>
+{
+  "name": "Wreck the house",
+  "components": {
+    "cat_in_the_hat": {
+      "script": "cat.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": { }
+    },
+    "thing1": {
+      "script": "thing1.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "output_of": "cat_in_the_hat"
+        }
+      },
+    },
+    "thing2": {
+      "script": "thing2.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "input": {
+          "output_of": "cat_in_the_hat"
+        }
+      },
+    },
+  }
+}
+</pre></notextile>
+
+This pipeline consists of three components.  The component "cleanup" depends on "thing1" and "thing2".  Both "thing1" and "thing2" are started immediately and can run in parallel, because they do not depend on each other, but "cleanup" cannot begin until both "thing1" and "thing2" have completed.
+
+<notextile><pre>
+{
+  "name": "Clean the house",
+  "components": {
+    "thing1": {
+      "script": "thing1.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": { }
+    },
+    "thing2": {
+      "script": "thing2.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": { }
+    },
+    "cleanup": {
+      "script": "cleanup.py",
+      "repository": "<b>you</b>",
+      "script_version": "master",
+      "script_parameters": {
+        "mess1": {
+          "output_of": "thing1"
+        },
+        "mess2": {
+          "output_of": "thing2"
+        }
+      }
+    }
+  }
+}
+</pre></notextile>
+
+h2. Methods
+
+See "pipeline_templates":{{site.baseurl}}/api/methods/pipeline_templates.html
+
+h2. Resource
+
+Each PipelineTemplate has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|components|hash|||
diff --git a/doc/api/schema/Repository.html.textile.liquid b/doc/api/schema/Repository.html.textile.liquid
new file mode 100644 (file)
index 0000000..0308f7d
--- /dev/null
@@ -0,0 +1,23 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Repository
+
+...
+
+A **Repository** represents a git repository hosted in an Arvados installation.
+
+h2. Methods
+
+See "repositories":{{site.baseurl}}/api/methods/repositories.html
+
+h2. Resource
+
+Each Repository has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|fetch_url|string|||
+|push_url|string|||
diff --git a/doc/api/schema/Specimen.html.textile.liquid b/doc/api/schema/Specimen.html.textile.liquid
new file mode 100644 (file)
index 0000000..1a7e483
--- /dev/null
@@ -0,0 +1,22 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Specimen
+
+...
+
+A **Specimen** represents a tissue sample or similar material obtained from a human that has some biomedical significance or interest.
+
+h2. Methods
+
+See "specimens":{{site.baseurl}}/api/methods/specimens.html
+
+h2. Resource
+
+Each Specimen has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|material|string|||
+|properties|hash|||
diff --git a/doc/api/schema/Trait.html.textile.liquid b/doc/api/schema/Trait.html.textile.liquid
new file mode 100644 (file)
index 0000000..80c74ab
--- /dev/null
@@ -0,0 +1,22 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: Trait
+
+...
+
+A **Trait** represents a measured or observed characteristic of a human.
+
+h2. Methods
+
+See "traits":{{site.baseurl}}/api/methods/traits.html
+
+h2. Resource
+
+Each Trait has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|name|string|||
+|properties|hash|||
diff --git a/doc/api/schema/User.html.textile.liquid b/doc/api/schema/User.html.textile.liquid
new file mode 100644 (file)
index 0000000..9a1b056
--- /dev/null
@@ -0,0 +1,29 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: User
+
+...
+
+A **User** represents a person who interacts with Arvados via an ApiClient.
+
+h2. Methods
+
+See "users":{{site.baseurl}}/api/methods/users.html
+
+h2. Resource
+
+Each User has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|email|string|||
+|first_name|string|||
+|last_name|string|||
+|identity_url|string|||
+|is_admin|boolean|||
+|prefs|hash|||
+|default_owner_uuid|string|||
+|is_active|boolean|||
+|writable_by|array|List of UUID strings identifying Groups and other Users that can modify this User object.  This will include the user's owner_uuid and, for administrators and users requesting their own User object, the requesting user's UUID.||
diff --git a/doc/api/schema/VirtualMachine.html.textile.liquid b/doc/api/schema/VirtualMachine.html.textile.liquid
new file mode 100644 (file)
index 0000000..1c6a4b6
--- /dev/null
@@ -0,0 +1,21 @@
+---
+layout: default
+navsection: api
+navmenu: Schema
+title: VirtualMachine
+
+...
+
+A **VirtualMachine** represents a network host, running within an Arvados installation, on which Arvados users are given login accounts.
+
+h2. Methods
+
+See "virtual_machines":{{site.baseurl}}/api/methods/virtual_machines.html
+
+h2. Resource
+
+Each VirtualMachine has, in addition to the usual "attributes of Arvados resources":{{site.baseurl}}/api/resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+|hostname|string|||
diff --git a/doc/css/badges.css b/doc/css/badges.css
new file mode 100644 (file)
index 0000000..82c4ab0
--- /dev/null
@@ -0,0 +1,28 @@
+/* Colors
+ * Contextual variations of badges
+ * Bootstrap 3.0 removed contexts for badges, we re-introduce them, based on what is done for labels
+ */
+
+.badge.badge-error {
+  background-color: #b94a48;
+}
+
+.badge.badge-warning {
+  background-color: #f89406;
+}
+
+.badge.badge-success {
+  background-color: #468847;
+}
+
+.badge.badge-info {
+  background-color: #3a87ad;
+}
+
+.badge.badge-inverse {
+  background-color: #333333;
+}
+
+.badge.badge-alert {
+    background: red;
+}
diff --git a/doc/css/bootstrap-theme.css b/doc/css/bootstrap-theme.css
new file mode 100644 (file)
index 0000000..11fcc9b
--- /dev/null
@@ -0,0 +1,347 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+.btn-default,
+.btn-primary,
+.btn-success,
+.btn-info,
+.btn-warning,
+.btn-danger {
+  text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);
+}
+.btn-default:active,
+.btn-primary:active,
+.btn-success:active,
+.btn-info:active,
+.btn-warning:active,
+.btn-danger:active,
+.btn-default.active,
+.btn-primary.active,
+.btn-success.active,
+.btn-info.active,
+.btn-warning.active,
+.btn-danger.active {
+  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn:active,
+.btn.active {
+  background-image: none;
+}
+.btn-default {
+  text-shadow: 0 1px 0 #fff;
+  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);
+  background-image:         linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #dbdbdb;
+  border-color: #ccc;
+}
+.btn-default:hover,
+.btn-default:focus {
+  background-color: #e0e0e0;
+  background-position: 0 -15px;
+}
+.btn-default:active,
+.btn-default.active {
+  background-color: #e0e0e0;
+  border-color: #dbdbdb;
+}
+.btn-primary {
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #2b669a;
+}
+.btn-primary:hover,
+.btn-primary:focus {
+  background-color: #2d6ca2;
+  background-position: 0 -15px;
+}
+.btn-primary:active,
+.btn-primary.active {
+  background-color: #2d6ca2;
+  border-color: #2b669a;
+}
+.btn-success {
+  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);
+  background-image:         linear-gradient(to bottom, #5cb85c 0%, #419641 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #3e8f3e;
+}
+.btn-success:hover,
+.btn-success:focus {
+  background-color: #419641;
+  background-position: 0 -15px;
+}
+.btn-success:active,
+.btn-success.active {
+  background-color: #419641;
+  border-color: #3e8f3e;
+}
+.btn-info {
+  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);
+  background-image:         linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #28a4c9;
+}
+.btn-info:hover,
+.btn-info:focus {
+  background-color: #2aabd2;
+  background-position: 0 -15px;
+}
+.btn-info:active,
+.btn-info.active {
+  background-color: #2aabd2;
+  border-color: #28a4c9;
+}
+.btn-warning {
+  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);
+  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #e38d13;
+}
+.btn-warning:hover,
+.btn-warning:focus {
+  background-color: #eb9316;
+  background-position: 0 -15px;
+}
+.btn-warning:active,
+.btn-warning.active {
+  background-color: #eb9316;
+  border-color: #e38d13;
+}
+.btn-danger {
+  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);
+  background-image:         linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-color: #b92c28;
+}
+.btn-danger:hover,
+.btn-danger:focus {
+  background-color: #c12e2a;
+  background-position: 0 -15px;
+}
+.btn-danger:active,
+.btn-danger.active {
+  background-color: #c12e2a;
+  border-color: #b92c28;
+}
+.thumbnail,
+.img-thumbnail {
+  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+}
+.dropdown-menu > li > a:hover,
+.dropdown-menu > li > a:focus {
+  background-color: #e8e8e8;
+  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
+  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
+  background-repeat: repeat-x;
+}
+.dropdown-menu > .active > a,
+.dropdown-menu > .active > a:hover,
+.dropdown-menu > .active > a:focus {
+  background-color: #357ebd;
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
+  background-repeat: repeat-x;
+}
+.navbar-default {
+  background-image: -webkit-linear-gradient(top, #fff 0%, #f8f8f8 100%);
+  background-image:         linear-gradient(to bottom, #fff 0%, #f8f8f8 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);
+}
+.navbar-default .navbar-nav > .active > a {
+  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%);
+  background-image:         linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);
+  background-repeat: repeat-x;
+  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);
+}
+.navbar-brand,
+.navbar-nav > li > a {
+  text-shadow: 0 1px 0 rgba(255, 255, 255, .25);
+}
+.navbar-inverse {
+  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);
+  background-image:         linear-gradient(to bottom, #3c3c3c 0%, #222 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+  background-repeat: repeat-x;
+}
+.navbar-inverse .navbar-nav > .active > a {
+  background-image: -webkit-linear-gradient(top, #222 0%, #282828 100%);
+  background-image:         linear-gradient(to bottom, #222 0%, #282828 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);
+  background-repeat: repeat-x;
+  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);
+          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);
+}
+.navbar-inverse .navbar-brand,
+.navbar-inverse .navbar-nav > li > a {
+  text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);
+}
+.navbar-static-top,
+.navbar-fixed-top,
+.navbar-fixed-bottom {
+  border-radius: 0;
+}
+.alert {
+  text-shadow: 0 1px 0 rgba(255, 255, 255, .2);
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);
+}
+.alert-success {
+  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);
+  background-image:         linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #b2dba1;
+}
+.alert-info {
+  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);
+  background-image:         linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #9acfea;
+}
+.alert-warning {
+  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);
+  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #f5e79e;
+}
+.alert-danger {
+  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);
+  background-image:         linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #dca7a7;
+}
+.progress {
+  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);
+  background-image:         linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar {
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #3071a9 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-success {
+  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);
+  background-image:         linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-info {
+  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);
+  background-image:         linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-warning {
+  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);
+  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);
+  background-repeat: repeat-x;
+}
+.progress-bar-danger {
+  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);
+  background-image:         linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);
+  background-repeat: repeat-x;
+}
+.list-group {
+  border-radius: 4px;
+  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);
+}
+.list-group-item.active,
+.list-group-item.active:hover,
+.list-group-item.active:focus {
+  text-shadow: 0 -1px 0 #3071a9;
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #3278b3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #3278b3;
+}
+.panel {
+  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .05);
+          box-shadow: 0 1px 2px rgba(0, 0, 0, .05);
+}
+.panel-default > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);
+  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-primary > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);
+  background-image:         linear-gradient(to bottom, #428bca 0%, #357ebd 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-success > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);
+  background-image:         linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-info > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);
+  background-image:         linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-warning > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);
+  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);
+  background-repeat: repeat-x;
+}
+.panel-danger > .panel-heading {
+  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);
+  background-image:         linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);
+  background-repeat: repeat-x;
+}
+.well {
+  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);
+  background-image:         linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);
+  background-repeat: repeat-x;
+  border-color: #dcdcdc;
+  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);
+          box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);
+}
+/*# sourceMappingURL=bootstrap-theme.css.map */
diff --git a/doc/css/bootstrap-theme.css.map b/doc/css/bootstrap-theme.css.map
new file mode 100644 (file)
index 0000000..29c1319
--- /dev/null
@@ -0,0 +1 @@
+{"version":3,"sources":["less/theme.less","less/mixins.less"],"names":[],"mappings":"AAeA;AACA;AACA;AACA;AACA;AACA;EACE,wCAAA;ECqGA,2FAAA;EACQ,mFAAA;;ADjGR,YAAC;AAAD,YAAC;AAAD,YAAC;AAAD,SAAC;AAAD,YAAC;AAAD,WAAC;AACD,YAAC;AAAD,YAAC;AAAD,YAAC;AAAD,SAAC;AAAD,YAAC;AAAD,WAAC;EC+FD,wDAAA;EACQ,gDAAA;;ADpER,IAAC;AACD,IAAC;EACC,sBAAA;;AAKJ;EC8PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;EAyB2C,yBAAA;EAA2B,kBAAA;;AAvBtE,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAeJ;EC6PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAgBJ;EC4PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAiBJ;EC2PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,SAAC;AACD,SAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,SAAC;AACD,SAAC;EACC,yBAAA;EACA,qBAAA;;AAkBJ;EC0PI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,YAAC;AACD,YAAC;EACC,yBAAA;EACA,qBAAA;;AAmBJ;ECyPI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EAEA,sHAAA;EAoCF,mEAAA;ED/TA,2BAAA;EACA,qBAAA;;AAEA,WAAC;AACD,WAAC;EACC,yBAAA;EACA,4BAAA;;AAGF,WAAC;AACD,WAAC;EACC,yBAAA;EACA,qBAAA;;AA2BJ;AACA;EC8CE,kDAAA;EACQ,0CAAA;;ADrCV,cAAe,KAAK,IAAG;AACvB,cAAe,KAAK,IAAG;ECqOnB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EDtOF,yBAAA;;AAEF,cAAe,UAAU;AACzB,cAAe,UAAU,IAAG;AAC5B,cAAe,UAAU,IAAG;EC+NxB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EDhOF,yBAAA;;AAUF;ECmNI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EAoCF,mEAAA;EDvPA,kBAAA;ECcA,2FAAA;EACQ,mFAAA;;ADlBV,eAOE,YAAY,UAAU;EC4MpB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EArMF,wDAAA;EACQ,gDAAA;;ADNV;AACA,WAAY,KAAK;EACf,8CAAA;;AAIF;ECiMI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EAoCF,mEAAA;;ADxOF,eAIE,YAAY,UAAU;EC6LpB,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;EArMF,uDAAA;EACQ,+CAAA;;ADAV,eASE;AATF,eAUE,YAAY,KAAK;EACf,yCAAA;;AAKJ;AACA;AACA;EACE,gBAAA;;AAUF;EACE,6CAAA;EC/BA,0FAAA;EACQ,kFAAA;;AD0CV;ECuJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAKF;ECsJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAMF;ECqJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAOF;ECoJI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED9JF,qBAAA;;AAgBF;EC2II,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADpIJ;ECiII,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADnIJ;ECgII,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADlIJ;EC+HI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADjIJ;EC8HI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADhIJ;EC6HI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADxHJ;EACE,kBAAA;EC9EA,kDAAA;EACQ,0CAAA;;ADgFV,gBAAgB;AAChB,gBAAgB,OAAO;AACvB,gBAAgB,OAAO;EACrB,6BAAA;EC8GE,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED/GF,qBAAA;;AAUF;EChGE,iDAAA;EACQ,yCAAA;;ADyGV,cAAe;ECwFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;AD1FJ,cAAe;ECuFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADzFJ,cAAe;ECsFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADxFJ,WAAY;ECqFR,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADvFJ,cAAe;ECoFX,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;ADtFJ,aAAc;ECmFV,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;;AD9EJ;EC2EI,kBAAkB,sDAAlB;EACA,kBAAkB,oDAAlB;EACA,2BAAA;EACA,sHAAA;ED5EF,qBAAA;ECzHA,yFAAA;EACQ,iFAAA","sourcesContent":["\n//\n// Load core variables and mixins\n// --------------------------------------------------\n\n@import \"variables.less\";\n@import \"mixins.less\";\n\n\n\n//\n// Buttons\n// --------------------------------------------------\n\n// Common styles\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0,0,0,.2);\n  @shadow: inset 0 1px 0 rgba(255,255,255,.15), 0 1px 1px rgba(0,0,0,.075);\n  .box-shadow(@shadow);\n\n  // Reset the shadow\n  &:active,\n  &.active {\n    .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n  }\n}\n\n// Mixin for generating new styles\n.btn-styles(@btn-color: #555) {\n  #gradient > .vertical(@start-color: @btn-color; @end-color: darken(@btn-color, 12%));\n  .reset-filter(); // Disable gradients for IE9 because filter bleeds through rounded corners\n  background-repeat: repeat-x;\n  border-color: darken(@btn-color, 14%);\n\n  &:hover,\n  &:focus  {\n    background-color: darken(@btn-color, 12%);\n    background-position: 0 -15px;\n  }\n\n  &:active,\n  &.active {\n    background-color: darken(@btn-color, 12%);\n    border-color: darken(@btn-color, 14%);\n  }\n}\n\n// Common styles\n.btn {\n  // Remove the gradient for the pressed/active state\n  &:active,\n  &.active {\n    background-image: none;\n  }\n}\n\n// Apply the mixin to the buttons\n.btn-default { .btn-styles(@btn-default-bg); text-shadow: 0 1px 0 #fff; border-color: #ccc; }\n.btn-primary { .btn-styles(@btn-primary-bg); }\n.btn-success { .btn-styles(@btn-success-bg); }\n.btn-info    { .btn-styles(@btn-info-bg); }\n.btn-warning { .btn-styles(@btn-warning-bg); }\n.btn-danger  { .btn-styles(@btn-danger-bg); }\n\n\n\n//\n// Images\n// --------------------------------------------------\n\n.thumbnail,\n.img-thumbnail {\n  .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n\n\n\n//\n// Dropdowns\n// --------------------------------------------------\n\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-hover-bg; @end-color: darken(@dropdown-link-hover-bg, 5%));\n  background-color: darken(@dropdown-link-hover-bg, 5%);\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  #gradient > .vertical(@start-color: @dropdown-link-active-bg; @end-color: darken(@dropdown-link-active-bg, 5%));\n  background-color: darken(@dropdown-link-active-bg, 5%);\n}\n\n\n\n//\n// Navbar\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  #gradient > .vertical(@start-color: lighten(@navbar-default-bg, 10%); @end-color: @navbar-default-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n  border-radius: @navbar-border-radius;\n  @shadow: inset 0 1px 0 rgba(255,255,255,.15), 0 1px 5px rgba(0,0,0,.075);\n  .box-shadow(@shadow);\n\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: darken(@navbar-default-bg, 5%); @end-color: darken(@navbar-default-bg, 2%));\n    .box-shadow(inset 0 3px 9px rgba(0,0,0,.075));\n  }\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255,255,255,.25);\n}\n\n// Inverted navbar\n.navbar-inverse {\n  #gradient > .vertical(@start-color: lighten(@navbar-inverse-bg, 10%); @end-color: @navbar-inverse-bg);\n  .reset-filter(); // Remove gradient in IE<10 to fix bug where dropdowns don't get triggered\n\n  .navbar-nav > .active > a {\n    #gradient > .vertical(@start-color: @navbar-inverse-bg; @end-color: lighten(@navbar-inverse-bg, 2.5%));\n    .box-shadow(inset 0 3px 9px rgba(0,0,0,.25));\n  }\n\n  .navbar-brand,\n  .navbar-nav > li > a {\n    text-shadow: 0 -1px 0 rgba(0,0,0,.25);\n  }\n}\n\n// Undo rounded corners in static and fixed navbars\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n\n\n\n//\n// Alerts\n// --------------------------------------------------\n\n// Common styles\n.alert {\n  text-shadow: 0 1px 0 rgba(255,255,255,.2);\n  @shadow: inset 0 1px 0 rgba(255,255,255,.25), 0 1px 2px rgba(0,0,0,.05);\n  .box-shadow(@shadow);\n}\n\n// Mixin for generating new styles\n.alert-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 7.5%));\n  border-color: darken(@color, 15%);\n}\n\n// Apply the mixin to the alerts\n.alert-success    { .alert-styles(@alert-success-bg); }\n.alert-info       { .alert-styles(@alert-info-bg); }\n.alert-warning    { .alert-styles(@alert-warning-bg); }\n.alert-danger     { .alert-styles(@alert-danger-bg); }\n\n\n\n//\n// Progress bars\n// --------------------------------------------------\n\n// Give the progress background some depth\n.progress {\n  #gradient > .vertical(@start-color: darken(@progress-bg, 4%); @end-color: @progress-bg)\n}\n\n// Mixin for generating new styles\n.progress-bar-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 10%));\n}\n\n// Apply the mixin to the progress bars\n.progress-bar            { .progress-bar-styles(@progress-bar-bg); }\n.progress-bar-success    { .progress-bar-styles(@progress-bar-success-bg); }\n.progress-bar-info       { .progress-bar-styles(@progress-bar-info-bg); }\n.progress-bar-warning    { .progress-bar-styles(@progress-bar-warning-bg); }\n.progress-bar-danger     { .progress-bar-styles(@progress-bar-danger-bg); }\n\n\n\n//\n// List groups\n// --------------------------------------------------\n\n.list-group {\n  border-radius: @border-radius-base;\n  .box-shadow(0 1px 2px rgba(0,0,0,.075));\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 darken(@list-group-active-bg, 10%);\n  #gradient > .vertical(@start-color: @list-group-active-bg; @end-color: darken(@list-group-active-bg, 7.5%));\n  border-color: darken(@list-group-active-border, 7.5%);\n}\n\n\n\n//\n// Panels\n// --------------------------------------------------\n\n// Common styles\n.panel {\n  .box-shadow(0 1px 2px rgba(0,0,0,.05));\n}\n\n// Mixin for generating new styles\n.panel-heading-styles(@color) {\n  #gradient > .vertical(@start-color: @color; @end-color: darken(@color, 5%));\n}\n\n// Apply the mixin to the panel headings only\n.panel-default > .panel-heading   { .panel-heading-styles(@panel-default-heading-bg); }\n.panel-primary > .panel-heading   { .panel-heading-styles(@panel-primary-heading-bg); }\n.panel-success > .panel-heading   { .panel-heading-styles(@panel-success-heading-bg); }\n.panel-info > .panel-heading      { .panel-heading-styles(@panel-info-heading-bg); }\n.panel-warning > .panel-heading   { .panel-heading-styles(@panel-warning-heading-bg); }\n.panel-danger > .panel-heading    { .panel-heading-styles(@panel-danger-heading-bg); }\n\n\n\n//\n// Wells\n// --------------------------------------------------\n\n.well {\n  #gradient > .vertical(@start-color: darken(@well-bg, 5%); @end-color: @well-bg);\n  border-color: darken(@well-bg, 10%);\n  @shadow: inset 0 1px 3px rgba(0,0,0,.05), 0 1px 0 rgba(255,255,255,.1);\n  .box-shadow(@shadow);\n}\n","//\n// Mixins\n// --------------------------------------------------\n\n\n// Utilities\n// -------------------------\n\n// Clearfix\n// Source: http://nicolasgallagher.com/micro-clearfix-hack/\n//\n// For modern browsers\n// 1. The space content is one way to avoid an Opera bug when the\n//    contenteditable attribute is included anywhere else in the document.\n//    Otherwise it causes space to appear at the top and bottom of elements\n//    that are clearfixed.\n// 2. The use of `table` rather than `block` is only necessary if using\n//    `:before` to contain the top-margins of child elements.\n.clearfix() {\n  &:before,\n  &:after {\n    content: \" \"; // 1\n    display: table; // 2\n  }\n  &:after {\n    clear: both;\n  }\n}\n\n// WebKit-style focus\n.tab-focus() {\n  // Default\n  outline: thin dotted;\n  // WebKit\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n\n// Center-align a block level element\n.center-block() {\n  display: block;\n  margin-left: auto;\n  margin-right: auto;\n}\n\n// Sizing shortcuts\n.size(@width; @height) {\n  width: @width;\n  height: @height;\n}\n.square(@size) {\n  .size(@size; @size);\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  &:-moz-placeholder            { color: @color; } // Firefox 4-18\n  &::-moz-placeholder           { color: @color;   // Firefox 19+\n                                  opacity: 1; } // See https://github.com/twbs/bootstrap/pull/11526\n  &:-ms-input-placeholder       { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Text overflow\n// Requires inline-block or block for proper styling\n.text-overflow() {\n  overflow: hidden;\n  text-overflow: ellipsis;\n  white-space: nowrap;\n}\n\n// CSS image replacement\n//\n// Heads up! v3 launched with with only `.hide-text()`, but per our pattern for\n// mixins being reused as classes with the same name, this doesn't hold up. As\n// of v3.0.1 we have added `.text-hide()` and deprecated `.hide-text()`. Note\n// that we cannot chain the mixins together in Less, so they are repeated.\n//\n// Source: https://github.com/h5bp/html5-boilerplate/commit/aa0396eae757\n\n// Deprecated as of v3.0.1 (will be removed in v4)\n.hide-text() {\n  font: ~\"0/0\" a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n// New mixin to use as of v3.0.1\n.text-hide() {\n  .hide-text();\n}\n\n\n\n// CSS3 PROPERTIES\n// --------------------------------------------------\n\n// Single side border-radius\n.border-top-radius(@radius) {\n  border-top-right-radius: @radius;\n   border-top-left-radius: @radius;\n}\n.border-right-radius(@radius) {\n  border-bottom-right-radius: @radius;\n     border-top-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n  border-bottom-right-radius: @radius;\n   border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n  border-bottom-left-radius: @radius;\n     border-top-left-radius: @radius;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n//   supported browsers that have box shadow capabilities now support the\n//   standard `box-shadow` property.\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Transitions\n.transition(@transition) {\n  -webkit-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n// Transformations\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n          transform: rotate(@degrees);\n}\n.scale(@ratio; @ratio-y...) {\n  -webkit-transform: scale(@ratio, @ratio-y);\n      -ms-transform: scale(@ratio, @ratio-y); // IE9 only\n          transform: scale(@ratio, @ratio-y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n          transform: translate(@x, @y);\n}\n.skew(@x; @y) {\n  -webkit-transform: skew(@x, @y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n          transform: skew(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n.backface-visibility(@visibility){\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// User select\n// For selecting text on the page\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n       -o-user-select: @select;\n          user-select: @select;\n}\n\n// Resize anything\n.resizable(@direction) {\n  resize: @direction; // Options: horizontal, vertical, both\n  overflow: auto; // Safari fix\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  word-wrap: break-word;\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n}\n\n// Opacity\n.opacity(@opacity) {\n  opacity: @opacity;\n  // IE8 filter\n  @opacity-ie: (@opacity * 100);\n  filter: ~\"alpha(opacity=@{opacity-ie})\";\n}\n\n\n\n// GRADIENTS\n// --------------------------------------------------\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, color-stop(@start-color @start-percent), color-stop(@end-color @end-percent)); // Safari 5.1-6, Chrome 10+\n    background-image:  linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-repeat: repeat-x;\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255,255,255,.15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n\n// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n\n\n\n// Retina images\n//\n// Short retina mixin for setting background-image and -size\n\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n  background-image: url(\"@{file-1x}\");\n\n  @media\n  only screen and (-webkit-min-device-pixel-ratio: 2),\n  only screen and (   min--moz-device-pixel-ratio: 2),\n  only screen and (     -o-min-device-pixel-ratio: 2/1),\n  only screen and (        min-device-pixel-ratio: 2),\n  only screen and (                min-resolution: 192dpi),\n  only screen and (                min-resolution: 2dppx) {\n    background-image: url(\"@{file-2x}\");\n    background-size: @width-1x @height-1x;\n  }\n}\n\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n\n.img-responsive(@display: block) {\n  display: @display;\n  max-width: 100%; // Part 1: Set a maximum relative to the parent\n  height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// COMPONENT MIXINS\n// --------------------------------------------------\n\n// Horizontal dividers\n// -------------------------\n// Dividers (basically an hr) within dropdowns and nav lists\n.nav-divider(@color: #e5e5e5) {\n  height: 1px;\n  margin: ((@line-height-computed / 2) - 1) 0;\n  overflow: hidden;\n  background-color: @color;\n}\n\n// Panels\n// -------------------------\n.panel-variant(@border; @heading-text-color; @heading-bg-color; @heading-border) {\n  border-color: @border;\n\n  & > .panel-heading {\n    color: @heading-text-color;\n    background-color: @heading-bg-color;\n    border-color: @heading-border;\n\n    + .panel-collapse .panel-body {\n      border-top-color: @border;\n    }\n  }\n  & > .panel-footer {\n    + .panel-collapse .panel-body {\n      border-bottom-color: @border;\n    }\n  }\n}\n\n// Alerts\n// -------------------------\n.alert-variant(@background; @border; @text-color) {\n  background-color: @background;\n  border-color: @border;\n  color: @text-color;\n\n  hr {\n    border-top-color: darken(@border, 5%);\n  }\n  .alert-link {\n    color: darken(@text-color, 10%);\n  }\n}\n\n// Tables\n// -------------------------\n.table-row-variant(@state; @background) {\n  // Exact selectors below required to override `.table-striped` and prevent\n  // inheritance to nested tables.\n  .table > thead > tr,\n  .table > tbody > tr,\n  .table > tfoot > tr {\n    > td.@{state},\n    > th.@{state},\n    &.@{state} > td,\n    &.@{state} > th {\n      background-color: @background;\n    }\n  }\n\n  // Hover states for `.table-hover`\n  // Note: this is not available for cells or rows within `thead` or `tfoot`.\n  .table-hover > tbody > tr {\n    > td.@{state}:hover,\n    > th.@{state}:hover,\n    &.@{state}:hover > td,\n    &.@{state}:hover > th {\n      background-color: darken(@background, 5%);\n    }\n  }\n}\n\n// List Groups\n// -------------------------\n.list-group-item-variant(@state; @background; @color) {\n  .list-group-item-@{state} {\n    color: @color;\n    background-color: @background;\n\n    a& {\n      color: @color;\n\n      .list-group-item-heading { color: inherit; }\n\n      &:hover,\n      &:focus {\n        color: @color;\n        background-color: darken(@background, 5%);\n      }\n      &.active,\n      &.active:hover,\n      &.active:focus {\n        color: #fff;\n        background-color: @color;\n        border-color: @color;\n      }\n    }\n  }\n}\n\n// Button variants\n// -------------------------\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n.button-variant(@color; @background; @border) {\n  color: @color;\n  background-color: @background;\n  border-color: @border;\n\n  &:hover,\n  &:focus,\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    color: @color;\n    background-color: darken(@background, 8%);\n        border-color: darken(@border, 12%);\n  }\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    background-image: none;\n  }\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      background-color: @background;\n          border-color: @border;\n    }\n  }\n\n  .badge {\n    color: @background;\n    background-color: @color;\n  }\n}\n\n// Button sizes\n// -------------------------\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n}\n\n// Pagination\n// -------------------------\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @border-radius) {\n  > li {\n    > a,\n    > span {\n      padding: @padding-vertical @padding-horizontal;\n      font-size: @font-size;\n    }\n    &:first-child {\n      > a,\n      > span {\n        .border-left-radius(@border-radius);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius);\n      }\n    }\n  }\n}\n\n// Labels\n// -------------------------\n.label-variant(@color) {\n  background-color: @color;\n  &[href] {\n    &:hover,\n    &:focus {\n      background-color: darken(@color, 10%);\n    }\n  }\n}\n\n// Contextual backgrounds\n// -------------------------\n.bg-variant(@color) {\n  background-color: @color;\n  a&:hover {\n    background-color: darken(@color, 10%);\n  }\n}\n\n// Typography\n// -------------------------\n.text-emphasis-variant(@color) {\n  color: @color;\n  a&:hover {\n    color: darken(@color, 10%);\n  }\n}\n\n// Navbar vertical align\n// -------------------------\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n.navbar-vertical-align(@element-height) {\n  margin-top: ((@navbar-height - @element-height) / 2);\n  margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n\n// Progress bars\n// -------------------------\n.progress-bar-variant(@color) {\n  background-color: @color;\n  .progress-striped & {\n    #gradient > .striped();\n  }\n}\n\n// Responsive utilities\n// -------------------------\n// More easily include all the states for responsive-utilities.less.\n.responsive-visibility() {\n  display: block !important;\n  table&  { display: table; }\n  tr&     { display: table-row !important; }\n  th&,\n  td&     { display: table-cell !important; }\n}\n\n.responsive-invisibility() {\n    &,\n  tr&,\n  th&,\n  td& { display: none !important; }\n}\n\n\n// Grid System\n// -----------\n\n// Centered container element\n.container-fixed() {\n  margin-right: auto;\n  margin-left: auto;\n  padding-left:  (@grid-gutter-width / 2);\n  padding-right: (@grid-gutter-width / 2);\n  &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n  margin-left:  (@gutter / -2);\n  margin-right: (@gutter / -2);\n  &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  float: left;\n  width: percentage((@columns / @grid-columns));\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n  @media (min-width: @screen-xs-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-push(@columns) {\n  @media (min-width: @screen-xs-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-pull(@columns) {\n  @media (min-width: @screen-xs-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-sm-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-offset(@columns) {\n  @media (min-width: @screen-sm-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-push(@columns) {\n  @media (min-width: @screen-sm-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-pull(@columns) {\n  @media (min-width: @screen-sm-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-md-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-offset(@columns) {\n  @media (min-width: @screen-md-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-push(@columns) {\n  @media (min-width: @screen-md-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-pull(@columns) {\n  @media (min-width: @screen-md-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-lg-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-offset(@columns) {\n  @media (min-width: @screen-lg-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-push(@columns) {\n  @media (min-width: @screen-lg-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-pull(@columns) {\n  @media (min-width: @screen-lg-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n  // Common styles for all sizes of grid columns, widths 1-12\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      position: relative;\n      // Prevent columns from collapsing when empty\n      min-height: 1px;\n      // Inner gutter via padding\n      padding-left:  (@grid-gutter-width / 2);\n      padding-right: (@grid-gutter-width / 2);\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.make-grid-columns-float(@class) {\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      float: left;\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.calc-grid(@index, @class, @type) when (@type = width) and (@index > 0) {\n  .col-@{class}-@{index} {\n    width: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = push) {\n  .col-@{class}-push-@{index} {\n    left: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = pull) {\n  .col-@{class}-pull-@{index} {\n    right: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = offset) {\n  .col-@{class}-offset-@{index} {\n    margin-left: percentage((@index / @grid-columns));\n  }\n}\n\n// Basic looping in LESS\n.make-grid(@index, @class, @type) when (@index >= 0) {\n  .calc-grid(@index, @class, @type);\n  // next iteration\n  .make-grid((@index - 1), @class, @type);\n}\n\n\n// Form validation states\n//\n// Used in forms.less to generate the form validation CSS for warnings, errors,\n// and successes.\n\n.form-control-validation(@text-color: #555; @border-color: #ccc; @background-color: #f5f5f5) {\n  // Color the label and help text\n  .help-block,\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline  {\n    color: @text-color;\n  }\n  // Set the border and box shadow on specific inputs to match\n  .form-control {\n    border-color: @border-color;\n    .box-shadow(inset 0 1px 1px rgba(0,0,0,.075)); // Redeclare so transitions work\n    &:focus {\n      border-color: darken(@border-color, 10%);\n      @shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 6px lighten(@border-color, 20%);\n      .box-shadow(@shadow);\n    }\n  }\n  // Set validation states also for addons\n  .input-group-addon {\n    color: @text-color;\n    border-color: @border-color;\n    background-color: @background-color;\n  }\n  // Optional feedback icon\n  .form-control-feedback {\n    color: @text-color;\n  }\n}\n\n// Form control focus state\n//\n// Generate a customized focus state and for any input with the specified color,\n// which defaults to the `@input-focus-border` variable.\n//\n// We highly encourage you to not customize the default value, but instead use\n// this to tweak colors on an as-needed basis. This aesthetic change is based on\n// WebKit's default styles, but applicable to a wider range of browsers. Its\n// usability and accessibility should be taken into account with any change.\n//\n// Example usage: change the default blue border and shadow to white for better\n// contrast against a dark gray background.\n\n.form-control-focus(@color: @input-border-focus) {\n  @color-rgba: rgba(red(@color), green(@color), blue(@color), .6);\n  &:focus {\n    border-color: @color;\n    outline: 0;\n    .box-shadow(~\"inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px @{color-rgba}\");\n  }\n}\n\n// Form control sizing\n//\n// Relative text size, padding, and border-radii changes for form controls. For\n// horizontal sizing, wrap controls in the predefined grid classes. `<select>`\n// element gets special love because it's special, and that's a fact!\n\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  height: @input-height;\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n\n  select& {\n    height: @input-height;\n    line-height: @input-height;\n  }\n\n  textarea&,\n  select[multiple]& {\n    height: auto;\n  }\n}\n"]}
\ No newline at end of file
diff --git a/doc/css/bootstrap-theme.min.css b/doc/css/bootstrap-theme.min.css
new file mode 100644 (file)
index 0000000..cff38df
--- /dev/null
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+.btn-default,.btn-primary,.btn-success,.btn-info,.btn-warning,.btn-danger{text-shadow:0 -1px 0 rgba(0,0,0,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 1px rgba(0,0,0,.075)}.btn-default:active,.btn-primary:active,.btn-success:active,.btn-info:active,.btn-warning:active,.btn-danger:active,.btn-default.active,.btn-primary.active,.btn-success.active,.btn-info.active,.btn-warning.active,.btn-danger.active{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn:active,.btn.active{background-image:none}.btn-default{background-image:-webkit-linear-gradient(top,#fff 0,#e0e0e0 100%);background-image:linear-gradient(to bottom,#fff 0,#e0e0e0 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#dbdbdb;text-shadow:0 1px 0 #fff;border-color:#ccc}.btn-default:hover,.btn-default:focus{background-color:#e0e0e0;background-position:0 -15px}.btn-default:active,.btn-default.active{background-color:#e0e0e0;border-color:#dbdbdb}.btn-primary{background-image:-webkit-linear-gradient(top,#428bca 0,#2d6ca2 100%);background-image:linear-gradient(to bottom,#428bca 0,#2d6ca2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#2b669a}.btn-primary:hover,.btn-primary:focus{background-color:#2d6ca2;background-position:0 -15px}.btn-primary:active,.btn-primary.active{background-color:#2d6ca2;border-color:#2b669a}.btn-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#419641 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#419641 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#3e8f3e}.btn-success:hover,.btn-success:focus{background-color:#419641;background-position:0 -15px}.btn-success:active,.btn-success.active{background-color:#419641;border-color:#3e8f3e}.btn-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#2aabd2 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#2aabd2 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#28a4c9}.btn-info:hover,.btn-info:focus{background-color:#2aabd2;background-position:0 -15px}.btn-info:active,.btn-info.active{background-color:#2aabd2;border-color:#28a4c9}.btn-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#eb9316 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#eb9316 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#e38d13}.btn-warning:hover,.btn-warning:focus{background-color:#eb9316;background-position:0 -15px}.btn-warning:active,.btn-warning.active{background-color:#eb9316;border-color:#e38d13}.btn-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c12e2a 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c12e2a 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);background-repeat:repeat-x;border-color:#b92c28}.btn-danger:hover,.btn-danger:focus{background-color:#c12e2a;background-position:0 -15px}.btn-danger:active,.btn-danger.active{background-color:#c12e2a;border-color:#b92c28}.thumbnail,.img-thumbnail{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);background-color:#e8e8e8}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{background-image:-webkit-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);background-color:#357ebd}.navbar-default{background-image:-webkit-linear-gradient(top,#fff 0,#f8f8f8 100%);background-image:linear-gradient(to bottom,#fff 0,#f8f8f8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);border-radius:4px;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075);box-shadow:inset 0 1px 0 rgba(255,255,255,.15),0 1px 5px rgba(0,0,0,.075)}.navbar-default .navbar-nav>.active>a{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f3f3f3 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f3f3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.075);box-shadow:inset 0 3px 9px rgba(0,0,0,.075)}.navbar-brand,.navbar-nav>li>a{text-shadow:0 1px 0 rgba(255,255,255,.25)}.navbar-inverse{background-image:-webkit-linear-gradient(top,#3c3c3c 0,#222 100%);background-image:linear-gradient(to bottom,#3c3c3c 0,#222 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.navbar-inverse .navbar-nav>.active>a{background-image:-webkit-linear-gradient(top,#222 0,#282828 100%);background-image:linear-gradient(to bottom,#222 0,#282828 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);-webkit-box-shadow:inset 0 3px 9px rgba(0,0,0,.25);box-shadow:inset 0 3px 9px rgba(0,0,0,.25)}.navbar-inverse .navbar-brand,.navbar-inverse .navbar-nav>li>a{text-shadow:0 -1px 0 rgba(0,0,0,.25)}.navbar-static-top,.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}.alert{text-shadow:0 1px 0 rgba(255,255,255,.2);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05);box-shadow:inset 0 1px 0 rgba(255,255,255,.25),0 1px 2px rgba(0,0,0,.05)}.alert-success{background-image:-webkit-linear-gradient(top,#dff0d8 0,#c8e5bc 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#c8e5bc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);border-color:#b2dba1}.alert-info{background-image:-webkit-linear-gradient(top,#d9edf7 0,#b9def0 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#b9def0 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);border-color:#9acfea}.alert-warning{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#f8efc0 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#f8efc0 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);border-color:#f5e79e}.alert-danger{background-image:-webkit-linear-gradient(top,#f2dede 0,#e7c3c3 100%);background-image:linear-gradient(to bottom,#f2dede 0,#e7c3c3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);border-color:#dca7a7}.progress{background-image:-webkit-linear-gradient(top,#ebebeb 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#ebebeb 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0)}.progress-bar{background-image:-webkit-linear-gradient(top,#428bca 0,#3071a9 100%);background-image:linear-gradient(to bottom,#428bca 0,#3071a9 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0)}.progress-bar-success{background-image:-webkit-linear-gradient(top,#5cb85c 0,#449d44 100%);background-image:linear-gradient(to bottom,#5cb85c 0,#449d44 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0)}.progress-bar-info{background-image:-webkit-linear-gradient(top,#5bc0de 0,#31b0d5 100%);background-image:linear-gradient(to bottom,#5bc0de 0,#31b0d5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0)}.progress-bar-warning{background-image:-webkit-linear-gradient(top,#f0ad4e 0,#ec971f 100%);background-image:linear-gradient(to bottom,#f0ad4e 0,#ec971f 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0)}.progress-bar-danger{background-image:-webkit-linear-gradient(top,#d9534f 0,#c9302c 100%);background-image:linear-gradient(to bottom,#d9534f 0,#c9302c 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0)}.list-group{border-radius:4px;-webkit-box-shadow:0 1px 2px rgba(0,0,0,.075);box-shadow:0 1px 2px rgba(0,0,0,.075)}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{text-shadow:0 -1px 0 #3071a9;background-image:-webkit-linear-gradient(top,#428bca 0,#3278b3 100%);background-image:linear-gradient(to bottom,#428bca 0,#3278b3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);border-color:#3278b3}.panel{-webkit-box-shadow:0 1px 2px rgba(0,0,0,.05);box-shadow:0 1px 2px rgba(0,0,0,.05)}.panel-default>.panel-heading{background-image:-webkit-linear-gradient(top,#f5f5f5 0,#e8e8e8 100%);background-image:linear-gradient(to bottom,#f5f5f5 0,#e8e8e8 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0)}.panel-primary>.panel-heading{background-image:-webkit-linear-gradient(top,#428bca 0,#357ebd 100%);background-image:linear-gradient(to bottom,#428bca 0,#357ebd 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0)}.panel-success>.panel-heading{background-image:-webkit-linear-gradient(top,#dff0d8 0,#d0e9c6 100%);background-image:linear-gradient(to bottom,#dff0d8 0,#d0e9c6 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0)}.panel-info>.panel-heading{background-image:-webkit-linear-gradient(top,#d9edf7 0,#c4e3f3 100%);background-image:linear-gradient(to bottom,#d9edf7 0,#c4e3f3 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0)}.panel-warning>.panel-heading{background-image:-webkit-linear-gradient(top,#fcf8e3 0,#faf2cc 100%);background-image:linear-gradient(to bottom,#fcf8e3 0,#faf2cc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0)}.panel-danger>.panel-heading{background-image:-webkit-linear-gradient(top,#f2dede 0,#ebcccc 100%);background-image:linear-gradient(to bottom,#f2dede 0,#ebcccc 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0)}.well{background-image:-webkit-linear-gradient(top,#e8e8e8 0,#f5f5f5 100%);background-image:linear-gradient(to bottom,#e8e8e8 0,#f5f5f5 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);border-color:#dcdcdc;-webkit-box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 3px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)}
\ No newline at end of file
diff --git a/doc/css/bootstrap.css b/doc/css/bootstrap.css
new file mode 100644 (file)
index 0000000..16b635c
--- /dev/null
@@ -0,0 +1,5835 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+/*! normalize.css v3.0.0 | MIT License | git.io/normalize */
+html {
+  font-family: sans-serif;
+  -webkit-text-size-adjust: 100%;
+      -ms-text-size-adjust: 100%;
+}
+body {
+  margin: 0;
+}
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+main,
+nav,
+section,
+summary {
+  display: block;
+}
+audio,
+canvas,
+progress,
+video {
+  display: inline-block;
+  vertical-align: baseline;
+}
+audio:not([controls]) {
+  display: none;
+  height: 0;
+}
+[hidden],
+template {
+  display: none;
+}
+a {
+  background: transparent;
+}
+a:active,
+a:hover {
+  outline: 0;
+}
+abbr[title] {
+  border-bottom: 1px dotted;
+}
+b,
+strong {
+  font-weight: bold;
+}
+dfn {
+  font-style: italic;
+}
+h1 {
+  margin: .67em 0;
+  font-size: 2em;
+}
+mark {
+  color: #000;
+  background: #ff0;
+}
+small {
+  font-size: 80%;
+}
+sub,
+sup {
+  position: relative;
+  font-size: 75%;
+  line-height: 0;
+  vertical-align: baseline;
+}
+sup {
+  top: -.5em;
+}
+sub {
+  bottom: -.25em;
+}
+img {
+  border: 0;
+}
+svg:not(:root) {
+  overflow: hidden;
+}
+figure {
+  margin: 1em 40px;
+}
+hr {
+  height: 0;
+  -moz-box-sizing: content-box;
+       box-sizing: content-box;
+}
+pre {
+  overflow: auto;
+}
+code,
+kbd,
+pre,
+samp {
+  font-family: monospace, monospace;
+  font-size: 1em;
+}
+button,
+input,
+optgroup,
+select,
+textarea {
+  margin: 0;
+  font: inherit;
+  color: inherit;
+}
+button {
+  overflow: visible;
+}
+button,
+select {
+  text-transform: none;
+}
+button,
+html input[type="button"],
+input[type="reset"],
+input[type="submit"] {
+  -webkit-appearance: button;
+  cursor: pointer;
+}
+button[disabled],
+html input[disabled] {
+  cursor: default;
+}
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+  padding: 0;
+  border: 0;
+}
+input {
+  line-height: normal;
+}
+input[type="checkbox"],
+input[type="radio"] {
+  box-sizing: border-box;
+  padding: 0;
+}
+input[type="number"]::-webkit-inner-spin-button,
+input[type="number"]::-webkit-outer-spin-button {
+  height: auto;
+}
+input[type="search"] {
+  -webkit-box-sizing: content-box;
+     -moz-box-sizing: content-box;
+          box-sizing: content-box;
+  -webkit-appearance: textfield;
+}
+input[type="search"]::-webkit-search-cancel-button,
+input[type="search"]::-webkit-search-decoration {
+  -webkit-appearance: none;
+}
+fieldset {
+  padding: .35em .625em .75em;
+  margin: 0 2px;
+  border: 1px solid #c0c0c0;
+}
+legend {
+  padding: 0;
+  border: 0;
+}
+textarea {
+  overflow: auto;
+}
+optgroup {
+  font-weight: bold;
+}
+table {
+  border-spacing: 0;
+  border-collapse: collapse;
+}
+td,
+th {
+  padding: 0;
+}
+@media print {
+  * {
+    color: #000 !important;
+    text-shadow: none !important;
+    background: transparent !important;
+    box-shadow: none !important;
+  }
+  a,
+  a:visited {
+    text-decoration: underline;
+  }
+  a[href]:after {
+    content: " (" attr(href) ")";
+  }
+  abbr[title]:after {
+    content: " (" attr(title) ")";
+  }
+  a[href^="javascript:"]:after,
+  a[href^="#"]:after {
+    content: "";
+  }
+  pre,
+  blockquote {
+    border: 1px solid #999;
+
+    page-break-inside: avoid;
+  }
+  thead {
+    display: table-header-group;
+  }
+  tr,
+  img {
+    page-break-inside: avoid;
+  }
+  img {
+    max-width: 100% !important;
+  }
+  p,
+  h2,
+  h3 {
+    orphans: 3;
+    widows: 3;
+  }
+  h2,
+  h3 {
+    page-break-after: avoid;
+  }
+  select {
+    background: #fff !important;
+  }
+  .navbar {
+    display: none;
+  }
+  .table td,
+  .table th {
+    background-color: #fff !important;
+  }
+  .btn > .caret,
+  .dropup > .btn > .caret {
+    border-top-color: #000 !important;
+  }
+  .label {
+    border: 1px solid #000;
+  }
+  .table {
+    border-collapse: collapse !important;
+  }
+  .table-bordered th,
+  .table-bordered td {
+    border: 1px solid #ddd !important;
+  }
+}
+* {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+*:before,
+*:after {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+html {
+  font-size: 62.5%;
+
+  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);
+}
+body {
+  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
+  font-size: 14px;
+  line-height: 1.428571429;
+  color: #333;
+  background-color: #fff;
+}
+input,
+button,
+select,
+textarea {
+  font-family: inherit;
+  font-size: inherit;
+  line-height: inherit;
+}
+a {
+  color: #428bca;
+  text-decoration: none;
+}
+a:hover,
+a:focus {
+  color: #2a6496;
+  text-decoration: underline;
+}
+a:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+figure {
+  margin: 0;
+}
+img {
+  vertical-align: middle;
+}
+.img-responsive {
+  display: block;
+  max-width: 100%;
+  height: auto;
+}
+.img-rounded {
+  border-radius: 6px;
+}
+.img-thumbnail {
+  display: inline-block;
+  max-width: 100%;
+  height: auto;
+  padding: 4px;
+  line-height: 1.428571429;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-radius: 4px;
+  -webkit-transition: all .2s ease-in-out;
+          transition: all .2s ease-in-out;
+}
+.img-circle {
+  border-radius: 50%;
+}
+hr {
+  margin-top: 20px;
+  margin-bottom: 20px;
+  border: 0;
+  border-top: 1px solid #eee;
+}
+.sr-only {
+  position: absolute;
+  width: 1px;
+  height: 1px;
+  padding: 0;
+  margin: -1px;
+  overflow: hidden;
+  clip: rect(0, 0, 0, 0);
+  border: 0;
+}
+h1,
+h2,
+h3,
+h4,
+h5,
+h6,
+.h1,
+.h2,
+.h3,
+.h4,
+.h5,
+.h6 {
+  font-family: inherit;
+  font-weight: 500;
+  line-height: 1.1;
+  color: inherit;
+}
+h1 small,
+h2 small,
+h3 small,
+h4 small,
+h5 small,
+h6 small,
+.h1 small,
+.h2 small,
+.h3 small,
+.h4 small,
+.h5 small,
+.h6 small,
+h1 .small,
+h2 .small,
+h3 .small,
+h4 .small,
+h5 .small,
+h6 .small,
+.h1 .small,
+.h2 .small,
+.h3 .small,
+.h4 .small,
+.h5 .small,
+.h6 .small {
+  font-weight: normal;
+  line-height: 1;
+  color: #999;
+}
+h1,
+.h1,
+h2,
+.h2,
+h3,
+.h3 {
+  margin-top: 20px;
+  margin-bottom: 10px;
+}
+h1 small,
+.h1 small,
+h2 small,
+.h2 small,
+h3 small,
+.h3 small,
+h1 .small,
+.h1 .small,
+h2 .small,
+.h2 .small,
+h3 .small,
+.h3 .small {
+  font-size: 65%;
+}
+h4,
+.h4,
+h5,
+.h5,
+h6,
+.h6 {
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+h4 small,
+.h4 small,
+h5 small,
+.h5 small,
+h6 small,
+.h6 small,
+h4 .small,
+.h4 .small,
+h5 .small,
+.h5 .small,
+h6 .small,
+.h6 .small {
+  font-size: 75%;
+}
+h1,
+.h1 {
+  font-size: 36px;
+}
+h2,
+.h2 {
+  font-size: 30px;
+}
+h3,
+.h3 {
+  font-size: 24px;
+}
+h4,
+.h4 {
+  font-size: 18px;
+}
+h5,
+.h5 {
+  font-size: 14px;
+}
+h6,
+.h6 {
+  font-size: 12px;
+}
+p {
+  margin: 0 0 10px;
+}
+.lead {
+  margin-bottom: 20px;
+  font-size: 16px;
+  font-weight: 200;
+  line-height: 1.4;
+}
+@media (min-width: 768px) {
+  .lead {
+    font-size: 21px;
+  }
+}
+small,
+.small {
+  font-size: 85%;
+}
+cite {
+  font-style: normal;
+}
+.text-left {
+  text-align: left;
+}
+.text-right {
+  text-align: right;
+}
+.text-center {
+  text-align: center;
+}
+.text-justify {
+  text-align: justify;
+}
+.text-muted {
+  color: #999;
+}
+.text-primary {
+  color: #428bca;
+}
+a.text-primary:hover {
+  color: #3071a9;
+}
+.text-success {
+  color: #3c763d;
+}
+a.text-success:hover {
+  color: #2b542c;
+}
+.text-info {
+  color: #31708f;
+}
+a.text-info:hover {
+  color: #245269;
+}
+.text-warning {
+  color: #8a6d3b;
+}
+a.text-warning:hover {
+  color: #66512c;
+}
+.text-danger {
+  color: #a94442;
+}
+a.text-danger:hover {
+  color: #843534;
+}
+.bg-primary {
+  color: #fff;
+  background-color: #428bca;
+}
+a.bg-primary:hover {
+  background-color: #3071a9;
+}
+.bg-success {
+  background-color: #dff0d8;
+}
+a.bg-success:hover {
+  background-color: #c1e2b3;
+}
+.bg-info {
+  background-color: #d9edf7;
+}
+a.bg-info:hover {
+  background-color: #afd9ee;
+}
+.bg-warning {
+  background-color: #fcf8e3;
+}
+a.bg-warning:hover {
+  background-color: #f7ecb5;
+}
+.bg-danger {
+  background-color: #f2dede;
+}
+a.bg-danger:hover {
+  background-color: #e4b9b9;
+}
+.page-header {
+  padding-bottom: 9px;
+  margin: 40px 0 20px;
+  border-bottom: 1px solid #eee;
+}
+ul,
+ol {
+  margin-top: 0;
+  margin-bottom: 10px;
+}
+ul ul,
+ol ul,
+ul ol,
+ol ol {
+  margin-bottom: 0;
+}
+.list-unstyled {
+  padding-left: 0;
+  list-style: none;
+}
+.list-inline {
+  padding-left: 0;
+  list-style: none;
+}
+.list-inline > li {
+  display: inline-block;
+  padding-right: 5px;
+  padding-left: 5px;
+}
+.list-inline > li:first-child {
+  padding-left: 0;
+}
+dl {
+  margin-top: 0;
+  margin-bottom: 20px;
+}
+dt,
+dd {
+  line-height: 1.428571429;
+}
+dt {
+  font-weight: bold;
+}
+dd {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .dl-horizontal dt {
+    float: left;
+    width: 160px;
+    overflow: hidden;
+    clear: left;
+    text-align: right;
+    text-overflow: ellipsis;
+    white-space: nowrap;
+  }
+  .dl-horizontal dd {
+    margin-left: 180px;
+  }
+}
+abbr[title],
+abbr[data-original-title] {
+  cursor: help;
+  border-bottom: 1px dotted #999;
+}
+.initialism {
+  font-size: 90%;
+  text-transform: uppercase;
+}
+blockquote {
+  padding: 10px 20px;
+  margin: 0 0 20px;
+  font-size: 17.5px;
+  border-left: 5px solid #eee;
+}
+blockquote p:last-child,
+blockquote ul:last-child,
+blockquote ol:last-child {
+  margin-bottom: 0;
+}
+blockquote footer,
+blockquote small,
+blockquote .small {
+  display: block;
+  font-size: 80%;
+  line-height: 1.428571429;
+  color: #999;
+}
+blockquote footer:before,
+blockquote small:before,
+blockquote .small:before {
+  content: '\2014 \00A0';
+}
+.blockquote-reverse,
+blockquote.pull-right {
+  padding-right: 15px;
+  padding-left: 0;
+  text-align: right;
+  border-right: 5px solid #eee;
+  border-left: 0;
+}
+.blockquote-reverse footer:before,
+blockquote.pull-right footer:before,
+.blockquote-reverse small:before,
+blockquote.pull-right small:before,
+.blockquote-reverse .small:before,
+blockquote.pull-right .small:before {
+  content: '';
+}
+.blockquote-reverse footer:after,
+blockquote.pull-right footer:after,
+.blockquote-reverse small:after,
+blockquote.pull-right small:after,
+.blockquote-reverse .small:after,
+blockquote.pull-right .small:after {
+  content: '\00A0 \2014';
+}
+blockquote:before,
+blockquote:after {
+  content: "";
+}
+address {
+  margin-bottom: 20px;
+  font-style: normal;
+  line-height: 1.428571429;
+}
+code,
+kbd,
+pre,
+samp {
+  font-family: Menlo, Monaco, Consolas, "Courier New", monospace;
+}
+code {
+  padding: 2px 4px;
+  font-size: 90%;
+  color: #c7254e;
+  white-space: nowrap;
+  background-color: #f9f2f4;
+  border-radius: 4px;
+}
+kbd {
+  padding: 2px 4px;
+  font-size: 90%;
+  color: #fff;
+  background-color: #333;
+  border-radius: 3px;
+  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);
+}
+pre {
+  display: block;
+  padding: 9.5px;
+  margin: 0 0 10px;
+  font-size: 13px;
+  line-height: 1.428571429;
+  color: #333;
+  word-break: break-all;
+  word-wrap: break-word;
+  background-color: #f5f5f5;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+}
+pre code {
+  padding: 0;
+  font-size: inherit;
+  color: inherit;
+  white-space: pre-wrap;
+  background-color: transparent;
+  border-radius: 0;
+}
+.pre-scrollable {
+  max-height: 340px;
+  overflow-y: scroll;
+}
+.container {
+  padding-right: 15px;
+  padding-left: 15px;
+  margin-right: auto;
+  margin-left: auto;
+}
+@media (min-width: 768px) {
+  .container {
+    width: 750px;
+  }
+}
+@media (min-width: 992px) {
+  .container {
+    width: 970px;
+  }
+}
+@media (min-width: 1200px) {
+  .container {
+    width: 1170px;
+  }
+}
+.container-fluid {
+  padding-right: 15px;
+  padding-left: 15px;
+  margin-right: auto;
+  margin-left: auto;
+}
+.row {
+  margin-right: -15px;
+  margin-left: -15px;
+}
+.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {
+  position: relative;
+  min-height: 1px;
+  padding-right: 15px;
+  padding-left: 15px;
+}
+.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {
+  float: left;
+}
+.col-xs-12 {
+  width: 100%;
+}
+.col-xs-11 {
+  width: 91.66666666666666%;
+}
+.col-xs-10 {
+  width: 83.33333333333334%;
+}
+.col-xs-9 {
+  width: 75%;
+}
+.col-xs-8 {
+  width: 66.66666666666666%;
+}
+.col-xs-7 {
+  width: 58.333333333333336%;
+}
+.col-xs-6 {
+  width: 50%;
+}
+.col-xs-5 {
+  width: 41.66666666666667%;
+}
+.col-xs-4 {
+  width: 33.33333333333333%;
+}
+.col-xs-3 {
+  width: 25%;
+}
+.col-xs-2 {
+  width: 16.666666666666664%;
+}
+.col-xs-1 {
+  width: 8.333333333333332%;
+}
+.col-xs-pull-12 {
+  right: 100%;
+}
+.col-xs-pull-11 {
+  right: 91.66666666666666%;
+}
+.col-xs-pull-10 {
+  right: 83.33333333333334%;
+}
+.col-xs-pull-9 {
+  right: 75%;
+}
+.col-xs-pull-8 {
+  right: 66.66666666666666%;
+}
+.col-xs-pull-7 {
+  right: 58.333333333333336%;
+}
+.col-xs-pull-6 {
+  right: 50%;
+}
+.col-xs-pull-5 {
+  right: 41.66666666666667%;
+}
+.col-xs-pull-4 {
+  right: 33.33333333333333%;
+}
+.col-xs-pull-3 {
+  right: 25%;
+}
+.col-xs-pull-2 {
+  right: 16.666666666666664%;
+}
+.col-xs-pull-1 {
+  right: 8.333333333333332%;
+}
+.col-xs-pull-0 {
+  right: 0;
+}
+.col-xs-push-12 {
+  left: 100%;
+}
+.col-xs-push-11 {
+  left: 91.66666666666666%;
+}
+.col-xs-push-10 {
+  left: 83.33333333333334%;
+}
+.col-xs-push-9 {
+  left: 75%;
+}
+.col-xs-push-8 {
+  left: 66.66666666666666%;
+}
+.col-xs-push-7 {
+  left: 58.333333333333336%;
+}
+.col-xs-push-6 {
+  left: 50%;
+}
+.col-xs-push-5 {
+  left: 41.66666666666667%;
+}
+.col-xs-push-4 {
+  left: 33.33333333333333%;
+}
+.col-xs-push-3 {
+  left: 25%;
+}
+.col-xs-push-2 {
+  left: 16.666666666666664%;
+}
+.col-xs-push-1 {
+  left: 8.333333333333332%;
+}
+.col-xs-push-0 {
+  left: 0;
+}
+.col-xs-offset-12 {
+  margin-left: 100%;
+}
+.col-xs-offset-11 {
+  margin-left: 91.66666666666666%;
+}
+.col-xs-offset-10 {
+  margin-left: 83.33333333333334%;
+}
+.col-xs-offset-9 {
+  margin-left: 75%;
+}
+.col-xs-offset-8 {
+  margin-left: 66.66666666666666%;
+}
+.col-xs-offset-7 {
+  margin-left: 58.333333333333336%;
+}
+.col-xs-offset-6 {
+  margin-left: 50%;
+}
+.col-xs-offset-5 {
+  margin-left: 41.66666666666667%;
+}
+.col-xs-offset-4 {
+  margin-left: 33.33333333333333%;
+}
+.col-xs-offset-3 {
+  margin-left: 25%;
+}
+.col-xs-offset-2 {
+  margin-left: 16.666666666666664%;
+}
+.col-xs-offset-1 {
+  margin-left: 8.333333333333332%;
+}
+.col-xs-offset-0 {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {
+    float: left;
+  }
+  .col-sm-12 {
+    width: 100%;
+  }
+  .col-sm-11 {
+    width: 91.66666666666666%;
+  }
+  .col-sm-10 {
+    width: 83.33333333333334%;
+  }
+  .col-sm-9 {
+    width: 75%;
+  }
+  .col-sm-8 {
+    width: 66.66666666666666%;
+  }
+  .col-sm-7 {
+    width: 58.333333333333336%;
+  }
+  .col-sm-6 {
+    width: 50%;
+  }
+  .col-sm-5 {
+    width: 41.66666666666667%;
+  }
+  .col-sm-4 {
+    width: 33.33333333333333%;
+  }
+  .col-sm-3 {
+    width: 25%;
+  }
+  .col-sm-2 {
+    width: 16.666666666666664%;
+  }
+  .col-sm-1 {
+    width: 8.333333333333332%;
+  }
+  .col-sm-pull-12 {
+    right: 100%;
+  }
+  .col-sm-pull-11 {
+    right: 91.66666666666666%;
+  }
+  .col-sm-pull-10 {
+    right: 83.33333333333334%;
+  }
+  .col-sm-pull-9 {
+    right: 75%;
+  }
+  .col-sm-pull-8 {
+    right: 66.66666666666666%;
+  }
+  .col-sm-pull-7 {
+    right: 58.333333333333336%;
+  }
+  .col-sm-pull-6 {
+    right: 50%;
+  }
+  .col-sm-pull-5 {
+    right: 41.66666666666667%;
+  }
+  .col-sm-pull-4 {
+    right: 33.33333333333333%;
+  }
+  .col-sm-pull-3 {
+    right: 25%;
+  }
+  .col-sm-pull-2 {
+    right: 16.666666666666664%;
+  }
+  .col-sm-pull-1 {
+    right: 8.333333333333332%;
+  }
+  .col-sm-pull-0 {
+    right: 0;
+  }
+  .col-sm-push-12 {
+    left: 100%;
+  }
+  .col-sm-push-11 {
+    left: 91.66666666666666%;
+  }
+  .col-sm-push-10 {
+    left: 83.33333333333334%;
+  }
+  .col-sm-push-9 {
+    left: 75%;
+  }
+  .col-sm-push-8 {
+    left: 66.66666666666666%;
+  }
+  .col-sm-push-7 {
+    left: 58.333333333333336%;
+  }
+  .col-sm-push-6 {
+    left: 50%;
+  }
+  .col-sm-push-5 {
+    left: 41.66666666666667%;
+  }
+  .col-sm-push-4 {
+    left: 33.33333333333333%;
+  }
+  .col-sm-push-3 {
+    left: 25%;
+  }
+  .col-sm-push-2 {
+    left: 16.666666666666664%;
+  }
+  .col-sm-push-1 {
+    left: 8.333333333333332%;
+  }
+  .col-sm-push-0 {
+    left: 0;
+  }
+  .col-sm-offset-12 {
+    margin-left: 100%;
+  }
+  .col-sm-offset-11 {
+    margin-left: 91.66666666666666%;
+  }
+  .col-sm-offset-10 {
+    margin-left: 83.33333333333334%;
+  }
+  .col-sm-offset-9 {
+    margin-left: 75%;
+  }
+  .col-sm-offset-8 {
+    margin-left: 66.66666666666666%;
+  }
+  .col-sm-offset-7 {
+    margin-left: 58.333333333333336%;
+  }
+  .col-sm-offset-6 {
+    margin-left: 50%;
+  }
+  .col-sm-offset-5 {
+    margin-left: 41.66666666666667%;
+  }
+  .col-sm-offset-4 {
+    margin-left: 33.33333333333333%;
+  }
+  .col-sm-offset-3 {
+    margin-left: 25%;
+  }
+  .col-sm-offset-2 {
+    margin-left: 16.666666666666664%;
+  }
+  .col-sm-offset-1 {
+    margin-left: 8.333333333333332%;
+  }
+  .col-sm-offset-0 {
+    margin-left: 0;
+  }
+}
+@media (min-width: 992px) {
+  .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {
+    float: left;
+  }
+  .col-md-12 {
+    width: 100%;
+  }
+  .col-md-11 {
+    width: 91.66666666666666%;
+  }
+  .col-md-10 {
+    width: 83.33333333333334%;
+  }
+  .col-md-9 {
+    width: 75%;
+  }
+  .col-md-8 {
+    width: 66.66666666666666%;
+  }
+  .col-md-7 {
+    width: 58.333333333333336%;
+  }
+  .col-md-6 {
+    width: 50%;
+  }
+  .col-md-5 {
+    width: 41.66666666666667%;
+  }
+  .col-md-4 {
+    width: 33.33333333333333%;
+  }
+  .col-md-3 {
+    width: 25%;
+  }
+  .col-md-2 {
+    width: 16.666666666666664%;
+  }
+  .col-md-1 {
+    width: 8.333333333333332%;
+  }
+  .col-md-pull-12 {
+    right: 100%;
+  }
+  .col-md-pull-11 {
+    right: 91.66666666666666%;
+  }
+  .col-md-pull-10 {
+    right: 83.33333333333334%;
+  }
+  .col-md-pull-9 {
+    right: 75%;
+  }
+  .col-md-pull-8 {
+    right: 66.66666666666666%;
+  }
+  .col-md-pull-7 {
+    right: 58.333333333333336%;
+  }
+  .col-md-pull-6 {
+    right: 50%;
+  }
+  .col-md-pull-5 {
+    right: 41.66666666666667%;
+  }
+  .col-md-pull-4 {
+    right: 33.33333333333333%;
+  }
+  .col-md-pull-3 {
+    right: 25%;
+  }
+  .col-md-pull-2 {
+    right: 16.666666666666664%;
+  }
+  .col-md-pull-1 {
+    right: 8.333333333333332%;
+  }
+  .col-md-pull-0 {
+    right: 0;
+  }
+  .col-md-push-12 {
+    left: 100%;
+  }
+  .col-md-push-11 {
+    left: 91.66666666666666%;
+  }
+  .col-md-push-10 {
+    left: 83.33333333333334%;
+  }
+  .col-md-push-9 {
+    left: 75%;
+  }
+  .col-md-push-8 {
+    left: 66.66666666666666%;
+  }
+  .col-md-push-7 {
+    left: 58.333333333333336%;
+  }
+  .col-md-push-6 {
+    left: 50%;
+  }
+  .col-md-push-5 {
+    left: 41.66666666666667%;
+  }
+  .col-md-push-4 {
+    left: 33.33333333333333%;
+  }
+  .col-md-push-3 {
+    left: 25%;
+  }
+  .col-md-push-2 {
+    left: 16.666666666666664%;
+  }
+  .col-md-push-1 {
+    left: 8.333333333333332%;
+  }
+  .col-md-push-0 {
+    left: 0;
+  }
+  .col-md-offset-12 {
+    margin-left: 100%;
+  }
+  .col-md-offset-11 {
+    margin-left: 91.66666666666666%;
+  }
+  .col-md-offset-10 {
+    margin-left: 83.33333333333334%;
+  }
+  .col-md-offset-9 {
+    margin-left: 75%;
+  }
+  .col-md-offset-8 {
+    margin-left: 66.66666666666666%;
+  }
+  .col-md-offset-7 {
+    margin-left: 58.333333333333336%;
+  }
+  .col-md-offset-6 {
+    margin-left: 50%;
+  }
+  .col-md-offset-5 {
+    margin-left: 41.66666666666667%;
+  }
+  .col-md-offset-4 {
+    margin-left: 33.33333333333333%;
+  }
+  .col-md-offset-3 {
+    margin-left: 25%;
+  }
+  .col-md-offset-2 {
+    margin-left: 16.666666666666664%;
+  }
+  .col-md-offset-1 {
+    margin-left: 8.333333333333332%;
+  }
+  .col-md-offset-0 {
+    margin-left: 0;
+  }
+}
+@media (min-width: 1200px) {
+  .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {
+    float: left;
+  }
+  .col-lg-12 {
+    width: 100%;
+  }
+  .col-lg-11 {
+    width: 91.66666666666666%;
+  }
+  .col-lg-10 {
+    width: 83.33333333333334%;
+  }
+  .col-lg-9 {
+    width: 75%;
+  }
+  .col-lg-8 {
+    width: 66.66666666666666%;
+  }
+  .col-lg-7 {
+    width: 58.333333333333336%;
+  }
+  .col-lg-6 {
+    width: 50%;
+  }
+  .col-lg-5 {
+    width: 41.66666666666667%;
+  }
+  .col-lg-4 {
+    width: 33.33333333333333%;
+  }
+  .col-lg-3 {
+    width: 25%;
+  }
+  .col-lg-2 {
+    width: 16.666666666666664%;
+  }
+  .col-lg-1 {
+    width: 8.333333333333332%;
+  }
+  .col-lg-pull-12 {
+    right: 100%;
+  }
+  .col-lg-pull-11 {
+    right: 91.66666666666666%;
+  }
+  .col-lg-pull-10 {
+    right: 83.33333333333334%;
+  }
+  .col-lg-pull-9 {
+    right: 75%;
+  }
+  .col-lg-pull-8 {
+    right: 66.66666666666666%;
+  }
+  .col-lg-pull-7 {
+    right: 58.333333333333336%;
+  }
+  .col-lg-pull-6 {
+    right: 50%;
+  }
+  .col-lg-pull-5 {
+    right: 41.66666666666667%;
+  }
+  .col-lg-pull-4 {
+    right: 33.33333333333333%;
+  }
+  .col-lg-pull-3 {
+    right: 25%;
+  }
+  .col-lg-pull-2 {
+    right: 16.666666666666664%;
+  }
+  .col-lg-pull-1 {
+    right: 8.333333333333332%;
+  }
+  .col-lg-pull-0 {
+    right: 0;
+  }
+  .col-lg-push-12 {
+    left: 100%;
+  }
+  .col-lg-push-11 {
+    left: 91.66666666666666%;
+  }
+  .col-lg-push-10 {
+    left: 83.33333333333334%;
+  }
+  .col-lg-push-9 {
+    left: 75%;
+  }
+  .col-lg-push-8 {
+    left: 66.66666666666666%;
+  }
+  .col-lg-push-7 {
+    left: 58.333333333333336%;
+  }
+  .col-lg-push-6 {
+    left: 50%;
+  }
+  .col-lg-push-5 {
+    left: 41.66666666666667%;
+  }
+  .col-lg-push-4 {
+    left: 33.33333333333333%;
+  }
+  .col-lg-push-3 {
+    left: 25%;
+  }
+  .col-lg-push-2 {
+    left: 16.666666666666664%;
+  }
+  .col-lg-push-1 {
+    left: 8.333333333333332%;
+  }
+  .col-lg-push-0 {
+    left: 0;
+  }
+  .col-lg-offset-12 {
+    margin-left: 100%;
+  }
+  .col-lg-offset-11 {
+    margin-left: 91.66666666666666%;
+  }
+  .col-lg-offset-10 {
+    margin-left: 83.33333333333334%;
+  }
+  .col-lg-offset-9 {
+    margin-left: 75%;
+  }
+  .col-lg-offset-8 {
+    margin-left: 66.66666666666666%;
+  }
+  .col-lg-offset-7 {
+    margin-left: 58.333333333333336%;
+  }
+  .col-lg-offset-6 {
+    margin-left: 50%;
+  }
+  .col-lg-offset-5 {
+    margin-left: 41.66666666666667%;
+  }
+  .col-lg-offset-4 {
+    margin-left: 33.33333333333333%;
+  }
+  .col-lg-offset-3 {
+    margin-left: 25%;
+  }
+  .col-lg-offset-2 {
+    margin-left: 16.666666666666664%;
+  }
+  .col-lg-offset-1 {
+    margin-left: 8.333333333333332%;
+  }
+  .col-lg-offset-0 {
+    margin-left: 0;
+  }
+}
+table {
+  max-width: 100%;
+  background-color: transparent;
+}
+th {
+  text-align: left;
+}
+.table {
+  width: 100%;
+  margin-bottom: 20px;
+}
+.table > thead > tr > th,
+.table > tbody > tr > th,
+.table > tfoot > tr > th,
+.table > thead > tr > td,
+.table > tbody > tr > td,
+.table > tfoot > tr > td {
+  padding: 8px;
+  line-height: 1.428571429;
+  vertical-align: top;
+  border-top: 1px solid #ddd;
+}
+.table > thead > tr > th {
+  vertical-align: bottom;
+  border-bottom: 2px solid #ddd;
+}
+.table > caption + thead > tr:first-child > th,
+.table > colgroup + thead > tr:first-child > th,
+.table > thead:first-child > tr:first-child > th,
+.table > caption + thead > tr:first-child > td,
+.table > colgroup + thead > tr:first-child > td,
+.table > thead:first-child > tr:first-child > td {
+  border-top: 0;
+}
+.table > tbody + tbody {
+  border-top: 2px solid #ddd;
+}
+.table .table {
+  background-color: #fff;
+}
+.table-condensed > thead > tr > th,
+.table-condensed > tbody > tr > th,
+.table-condensed > tfoot > tr > th,
+.table-condensed > thead > tr > td,
+.table-condensed > tbody > tr > td,
+.table-condensed > tfoot > tr > td {
+  padding: 5px;
+}
+.table-bordered {
+  border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > tbody > tr > th,
+.table-bordered > tfoot > tr > th,
+.table-bordered > thead > tr > td,
+.table-bordered > tbody > tr > td,
+.table-bordered > tfoot > tr > td {
+  border: 1px solid #ddd;
+}
+.table-bordered > thead > tr > th,
+.table-bordered > thead > tr > td {
+  border-bottom-width: 2px;
+}
+.table-striped > tbody > tr:nth-child(odd) > td,
+.table-striped > tbody > tr:nth-child(odd) > th {
+  background-color: #f9f9f9;
+}
+.table-hover > tbody > tr:hover > td,
+.table-hover > tbody > tr:hover > th {
+  background-color: #f5f5f5;
+}
+table col[class*="col-"] {
+  position: static;
+  display: table-column;
+  float: none;
+}
+table td[class*="col-"],
+table th[class*="col-"] {
+  position: static;
+  display: table-cell;
+  float: none;
+}
+.table > thead > tr > td.active,
+.table > tbody > tr > td.active,
+.table > tfoot > tr > td.active,
+.table > thead > tr > th.active,
+.table > tbody > tr > th.active,
+.table > tfoot > tr > th.active,
+.table > thead > tr.active > td,
+.table > tbody > tr.active > td,
+.table > tfoot > tr.active > td,
+.table > thead > tr.active > th,
+.table > tbody > tr.active > th,
+.table > tfoot > tr.active > th {
+  background-color: #f5f5f5;
+}
+.table-hover > tbody > tr > td.active:hover,
+.table-hover > tbody > tr > th.active:hover,
+.table-hover > tbody > tr.active:hover > td,
+.table-hover > tbody > tr.active:hover > th {
+  background-color: #e8e8e8;
+}
+.table > thead > tr > td.success,
+.table > tbody > tr > td.success,
+.table > tfoot > tr > td.success,
+.table > thead > tr > th.success,
+.table > tbody > tr > th.success,
+.table > tfoot > tr > th.success,
+.table > thead > tr.success > td,
+.table > tbody > tr.success > td,
+.table > tfoot > tr.success > td,
+.table > thead > tr.success > th,
+.table > tbody > tr.success > th,
+.table > tfoot > tr.success > th {
+  background-color: #dff0d8;
+}
+.table-hover > tbody > tr > td.success:hover,
+.table-hover > tbody > tr > th.success:hover,
+.table-hover > tbody > tr.success:hover > td,
+.table-hover > tbody > tr.success:hover > th {
+  background-color: #d0e9c6;
+}
+.table > thead > tr > td.info,
+.table > tbody > tr > td.info,
+.table > tfoot > tr > td.info,
+.table > thead > tr > th.info,
+.table > tbody > tr > th.info,
+.table > tfoot > tr > th.info,
+.table > thead > tr.info > td,
+.table > tbody > tr.info > td,
+.table > tfoot > tr.info > td,
+.table > thead > tr.info > th,
+.table > tbody > tr.info > th,
+.table > tfoot > tr.info > th {
+  background-color: #d9edf7;
+}
+.table-hover > tbody > tr > td.info:hover,
+.table-hover > tbody > tr > th.info:hover,
+.table-hover > tbody > tr.info:hover > td,
+.table-hover > tbody > tr.info:hover > th {
+  background-color: #c4e3f3;
+}
+.table > thead > tr > td.warning,
+.table > tbody > tr > td.warning,
+.table > tfoot > tr > td.warning,
+.table > thead > tr > th.warning,
+.table > tbody > tr > th.warning,
+.table > tfoot > tr > th.warning,
+.table > thead > tr.warning > td,
+.table > tbody > tr.warning > td,
+.table > tfoot > tr.warning > td,
+.table > thead > tr.warning > th,
+.table > tbody > tr.warning > th,
+.table > tfoot > tr.warning > th {
+  background-color: #fcf8e3;
+}
+.table-hover > tbody > tr > td.warning:hover,
+.table-hover > tbody > tr > th.warning:hover,
+.table-hover > tbody > tr.warning:hover > td,
+.table-hover > tbody > tr.warning:hover > th {
+  background-color: #faf2cc;
+}
+.table > thead > tr > td.danger,
+.table > tbody > tr > td.danger,
+.table > tfoot > tr > td.danger,
+.table > thead > tr > th.danger,
+.table > tbody > tr > th.danger,
+.table > tfoot > tr > th.danger,
+.table > thead > tr.danger > td,
+.table > tbody > tr.danger > td,
+.table > tfoot > tr.danger > td,
+.table > thead > tr.danger > th,
+.table > tbody > tr.danger > th,
+.table > tfoot > tr.danger > th {
+  background-color: #f2dede;
+}
+.table-hover > tbody > tr > td.danger:hover,
+.table-hover > tbody > tr > th.danger:hover,
+.table-hover > tbody > tr.danger:hover > td,
+.table-hover > tbody > tr.danger:hover > th {
+  background-color: #ebcccc;
+}
+@media (max-width: 767px) {
+  .table-responsive {
+    width: 100%;
+    margin-bottom: 15px;
+    overflow-x: scroll;
+    overflow-y: hidden;
+    -webkit-overflow-scrolling: touch;
+    -ms-overflow-style: -ms-autohiding-scrollbar;
+    border: 1px solid #ddd;
+  }
+  .table-responsive > .table {
+    margin-bottom: 0;
+  }
+  .table-responsive > .table > thead > tr > th,
+  .table-responsive > .table > tbody > tr > th,
+  .table-responsive > .table > tfoot > tr > th,
+  .table-responsive > .table > thead > tr > td,
+  .table-responsive > .table > tbody > tr > td,
+  .table-responsive > .table > tfoot > tr > td {
+    white-space: nowrap;
+  }
+  .table-responsive > .table-bordered {
+    border: 0;
+  }
+  .table-responsive > .table-bordered > thead > tr > th:first-child,
+  .table-responsive > .table-bordered > tbody > tr > th:first-child,
+  .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+  .table-responsive > .table-bordered > thead > tr > td:first-child,
+  .table-responsive > .table-bordered > tbody > tr > td:first-child,
+  .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+    border-left: 0;
+  }
+  .table-responsive > .table-bordered > thead > tr > th:last-child,
+  .table-responsive > .table-bordered > tbody > tr > th:last-child,
+  .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+  .table-responsive > .table-bordered > thead > tr > td:last-child,
+  .table-responsive > .table-bordered > tbody > tr > td:last-child,
+  .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+    border-right: 0;
+  }
+  .table-responsive > .table-bordered > tbody > tr:last-child > th,
+  .table-responsive > .table-bordered > tfoot > tr:last-child > th,
+  .table-responsive > .table-bordered > tbody > tr:last-child > td,
+  .table-responsive > .table-bordered > tfoot > tr:last-child > td {
+    border-bottom: 0;
+  }
+}
+fieldset {
+  min-width: 0;
+  padding: 0;
+  margin: 0;
+  border: 0;
+}
+legend {
+  display: block;
+  width: 100%;
+  padding: 0;
+  margin-bottom: 20px;
+  font-size: 21px;
+  line-height: inherit;
+  color: #333;
+  border: 0;
+  border-bottom: 1px solid #e5e5e5;
+}
+label {
+  display: inline-block;
+  margin-bottom: 5px;
+  font-weight: bold;
+}
+input[type="search"] {
+  -webkit-box-sizing: border-box;
+     -moz-box-sizing: border-box;
+          box-sizing: border-box;
+}
+input[type="radio"],
+input[type="checkbox"] {
+  margin: 4px 0 0;
+  margin-top: 1px \9;
+  /* IE8-9 */
+  line-height: normal;
+}
+input[type="file"] {
+  display: block;
+}
+input[type="range"] {
+  display: block;
+  width: 100%;
+}
+select[multiple],
+select[size] {
+  height: auto;
+}
+input[type="file"]:focus,
+input[type="radio"]:focus,
+input[type="checkbox"]:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+output {
+  display: block;
+  padding-top: 7px;
+  font-size: 14px;
+  line-height: 1.428571429;
+  color: #555;
+}
+.form-control {
+  display: block;
+  width: 100%;
+  height: 34px;
+  padding: 6px 12px;
+  font-size: 14px;
+  line-height: 1.428571429;
+  color: #555;
+  background-color: #fff;
+  background-image: none;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+          transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;
+}
+.form-control:focus {
+  border-color: #66afe9;
+  outline: 0;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+          box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);
+}
+.form-control:-moz-placeholder {
+  color: #999;
+}
+.form-control::-moz-placeholder {
+  color: #999;
+  opacity: 1;
+}
+.form-control:-ms-input-placeholder {
+  color: #999;
+}
+.form-control::-webkit-input-placeholder {
+  color: #999;
+}
+.form-control[disabled],
+.form-control[readonly],
+fieldset[disabled] .form-control {
+  cursor: not-allowed;
+  background-color: #eee;
+  opacity: 1;
+}
+textarea.form-control {
+  height: auto;
+}
+input[type="date"] {
+  line-height: 34px;
+}
+.form-group {
+  margin-bottom: 15px;
+}
+.radio,
+.checkbox {
+  display: block;
+  min-height: 20px;
+  padding-left: 20px;
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+.radio label,
+.checkbox label {
+  display: inline;
+  font-weight: normal;
+  cursor: pointer;
+}
+.radio input[type="radio"],
+.radio-inline input[type="radio"],
+.checkbox input[type="checkbox"],
+.checkbox-inline input[type="checkbox"] {
+  float: left;
+  margin-left: -20px;
+}
+.radio + .radio,
+.checkbox + .checkbox {
+  margin-top: -5px;
+}
+.radio-inline,
+.checkbox-inline {
+  display: inline-block;
+  padding-left: 20px;
+  margin-bottom: 0;
+  font-weight: normal;
+  vertical-align: middle;
+  cursor: pointer;
+}
+.radio-inline + .radio-inline,
+.checkbox-inline + .checkbox-inline {
+  margin-top: 0;
+  margin-left: 10px;
+}
+input[type="radio"][disabled],
+input[type="checkbox"][disabled],
+.radio[disabled],
+.radio-inline[disabled],
+.checkbox[disabled],
+.checkbox-inline[disabled],
+fieldset[disabled] input[type="radio"],
+fieldset[disabled] input[type="checkbox"],
+fieldset[disabled] .radio,
+fieldset[disabled] .radio-inline,
+fieldset[disabled] .checkbox,
+fieldset[disabled] .checkbox-inline {
+  cursor: not-allowed;
+}
+.input-sm {
+  height: 30px;
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+select.input-sm {
+  height: 30px;
+  line-height: 30px;
+}
+textarea.input-sm,
+select[multiple].input-sm {
+  height: auto;
+}
+.input-lg {
+  height: 46px;
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+select.input-lg {
+  height: 46px;
+  line-height: 46px;
+}
+textarea.input-lg,
+select[multiple].input-lg {
+  height: auto;
+}
+.has-feedback {
+  position: relative;
+}
+.has-feedback .form-control {
+  padding-right: 42.5px;
+}
+.has-feedback .form-control-feedback {
+  position: absolute;
+  top: 25px;
+  right: 0;
+  display: block;
+  width: 34px;
+  height: 34px;
+  line-height: 34px;
+  text-align: center;
+}
+.has-success .help-block,
+.has-success .control-label,
+.has-success .radio,
+.has-success .checkbox,
+.has-success .radio-inline,
+.has-success .checkbox-inline {
+  color: #3c763d;
+}
+.has-success .form-control {
+  border-color: #3c763d;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-success .form-control:focus {
+  border-color: #2b542c;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;
+}
+.has-success .input-group-addon {
+  color: #3c763d;
+  background-color: #dff0d8;
+  border-color: #3c763d;
+}
+.has-success .form-control-feedback {
+  color: #3c763d;
+}
+.has-warning .help-block,
+.has-warning .control-label,
+.has-warning .radio,
+.has-warning .checkbox,
+.has-warning .radio-inline,
+.has-warning .checkbox-inline {
+  color: #8a6d3b;
+}
+.has-warning .form-control {
+  border-color: #8a6d3b;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-warning .form-control:focus {
+  border-color: #66512c;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;
+}
+.has-warning .input-group-addon {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+  border-color: #8a6d3b;
+}
+.has-warning .form-control-feedback {
+  color: #8a6d3b;
+}
+.has-error .help-block,
+.has-error .control-label,
+.has-error .radio,
+.has-error .checkbox,
+.has-error .radio-inline,
+.has-error .checkbox-inline {
+  color: #a94442;
+}
+.has-error .form-control {
+  border-color: #a94442;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);
+}
+.has-error .form-control:focus {
+  border-color: #843534;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;
+}
+.has-error .input-group-addon {
+  color: #a94442;
+  background-color: #f2dede;
+  border-color: #a94442;
+}
+.has-error .form-control-feedback {
+  color: #a94442;
+}
+.form-control-static {
+  margin-bottom: 0;
+}
+.help-block {
+  display: block;
+  margin-top: 5px;
+  margin-bottom: 10px;
+  color: #737373;
+}
+@media (min-width: 768px) {
+  .form-inline .form-group {
+    display: inline-block;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .form-inline .form-control {
+    display: inline-block;
+    width: auto;
+    vertical-align: middle;
+  }
+  .form-inline .control-label {
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .form-inline .radio,
+  .form-inline .checkbox {
+    display: inline-block;
+    padding-left: 0;
+    margin-top: 0;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .form-inline .radio input[type="radio"],
+  .form-inline .checkbox input[type="checkbox"] {
+    float: none;
+    margin-left: 0;
+  }
+  .form-inline .has-feedback .form-control-feedback {
+    top: 0;
+  }
+}
+.form-horizontal .control-label,
+.form-horizontal .radio,
+.form-horizontal .checkbox,
+.form-horizontal .radio-inline,
+.form-horizontal .checkbox-inline {
+  padding-top: 7px;
+  margin-top: 0;
+  margin-bottom: 0;
+}
+.form-horizontal .radio,
+.form-horizontal .checkbox {
+  min-height: 27px;
+}
+.form-horizontal .form-group {
+  margin-right: -15px;
+  margin-left: -15px;
+}
+.form-horizontal .form-control-static {
+  padding-top: 7px;
+}
+@media (min-width: 768px) {
+  .form-horizontal .control-label {
+    text-align: right;
+  }
+}
+.form-horizontal .has-feedback .form-control-feedback {
+  top: 0;
+  right: 15px;
+}
+.btn {
+  display: inline-block;
+  padding: 6px 12px;
+  margin-bottom: 0;
+  font-size: 14px;
+  font-weight: normal;
+  line-height: 1.428571429;
+  text-align: center;
+  white-space: nowrap;
+  vertical-align: middle;
+  cursor: pointer;
+  -webkit-user-select: none;
+     -moz-user-select: none;
+      -ms-user-select: none;
+       -o-user-select: none;
+          user-select: none;
+  background-image: none;
+  border: 1px solid transparent;
+  border-radius: 4px;
+}
+.btn:focus {
+  outline: thin dotted;
+  outline: 5px auto -webkit-focus-ring-color;
+  outline-offset: -2px;
+}
+.btn:hover,
+.btn:focus {
+  color: #333;
+  text-decoration: none;
+}
+.btn:active,
+.btn.active {
+  background-image: none;
+  outline: 0;
+  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn.disabled,
+.btn[disabled],
+fieldset[disabled] .btn {
+  pointer-events: none;
+  cursor: not-allowed;
+  filter: alpha(opacity=65);
+  -webkit-box-shadow: none;
+          box-shadow: none;
+  opacity: .65;
+}
+.btn-default {
+  color: #333;
+  background-color: #fff;
+  border-color: #ccc;
+}
+.btn-default:hover,
+.btn-default:focus,
+.btn-default:active,
+.btn-default.active,
+.open .dropdown-toggle.btn-default {
+  color: #333;
+  background-color: #ebebeb;
+  border-color: #adadad;
+}
+.btn-default:active,
+.btn-default.active,
+.open .dropdown-toggle.btn-default {
+  background-image: none;
+}
+.btn-default.disabled,
+.btn-default[disabled],
+fieldset[disabled] .btn-default,
+.btn-default.disabled:hover,
+.btn-default[disabled]:hover,
+fieldset[disabled] .btn-default:hover,
+.btn-default.disabled:focus,
+.btn-default[disabled]:focus,
+fieldset[disabled] .btn-default:focus,
+.btn-default.disabled:active,
+.btn-default[disabled]:active,
+fieldset[disabled] .btn-default:active,
+.btn-default.disabled.active,
+.btn-default[disabled].active,
+fieldset[disabled] .btn-default.active {
+  background-color: #fff;
+  border-color: #ccc;
+}
+.btn-default .badge {
+  color: #fff;
+  background-color: #333;
+}
+.btn-primary {
+  color: #fff;
+  background-color: #428bca;
+  border-color: #357ebd;
+}
+.btn-primary:hover,
+.btn-primary:focus,
+.btn-primary:active,
+.btn-primary.active,
+.open .dropdown-toggle.btn-primary {
+  color: #fff;
+  background-color: #3276b1;
+  border-color: #285e8e;
+}
+.btn-primary:active,
+.btn-primary.active,
+.open .dropdown-toggle.btn-primary {
+  background-image: none;
+}
+.btn-primary.disabled,
+.btn-primary[disabled],
+fieldset[disabled] .btn-primary,
+.btn-primary.disabled:hover,
+.btn-primary[disabled]:hover,
+fieldset[disabled] .btn-primary:hover,
+.btn-primary.disabled:focus,
+.btn-primary[disabled]:focus,
+fieldset[disabled] .btn-primary:focus,
+.btn-primary.disabled:active,
+.btn-primary[disabled]:active,
+fieldset[disabled] .btn-primary:active,
+.btn-primary.disabled.active,
+.btn-primary[disabled].active,
+fieldset[disabled] .btn-primary.active {
+  background-color: #428bca;
+  border-color: #357ebd;
+}
+.btn-primary .badge {
+  color: #428bca;
+  background-color: #fff;
+}
+.btn-success {
+  color: #fff;
+  background-color: #5cb85c;
+  border-color: #4cae4c;
+}
+.btn-success:hover,
+.btn-success:focus,
+.btn-success:active,
+.btn-success.active,
+.open .dropdown-toggle.btn-success {
+  color: #fff;
+  background-color: #47a447;
+  border-color: #398439;
+}
+.btn-success:active,
+.btn-success.active,
+.open .dropdown-toggle.btn-success {
+  background-image: none;
+}
+.btn-success.disabled,
+.btn-success[disabled],
+fieldset[disabled] .btn-success,
+.btn-success.disabled:hover,
+.btn-success[disabled]:hover,
+fieldset[disabled] .btn-success:hover,
+.btn-success.disabled:focus,
+.btn-success[disabled]:focus,
+fieldset[disabled] .btn-success:focus,
+.btn-success.disabled:active,
+.btn-success[disabled]:active,
+fieldset[disabled] .btn-success:active,
+.btn-success.disabled.active,
+.btn-success[disabled].active,
+fieldset[disabled] .btn-success.active {
+  background-color: #5cb85c;
+  border-color: #4cae4c;
+}
+.btn-success .badge {
+  color: #5cb85c;
+  background-color: #fff;
+}
+.btn-info {
+  color: #fff;
+  background-color: #5bc0de;
+  border-color: #46b8da;
+}
+.btn-info:hover,
+.btn-info:focus,
+.btn-info:active,
+.btn-info.active,
+.open .dropdown-toggle.btn-info {
+  color: #fff;
+  background-color: #39b3d7;
+  border-color: #269abc;
+}
+.btn-info:active,
+.btn-info.active,
+.open .dropdown-toggle.btn-info {
+  background-image: none;
+}
+.btn-info.disabled,
+.btn-info[disabled],
+fieldset[disabled] .btn-info,
+.btn-info.disabled:hover,
+.btn-info[disabled]:hover,
+fieldset[disabled] .btn-info:hover,
+.btn-info.disabled:focus,
+.btn-info[disabled]:focus,
+fieldset[disabled] .btn-info:focus,
+.btn-info.disabled:active,
+.btn-info[disabled]:active,
+fieldset[disabled] .btn-info:active,
+.btn-info.disabled.active,
+.btn-info[disabled].active,
+fieldset[disabled] .btn-info.active {
+  background-color: #5bc0de;
+  border-color: #46b8da;
+}
+.btn-info .badge {
+  color: #5bc0de;
+  background-color: #fff;
+}
+.btn-warning {
+  color: #fff;
+  background-color: #f0ad4e;
+  border-color: #eea236;
+}
+.btn-warning:hover,
+.btn-warning:focus,
+.btn-warning:active,
+.btn-warning.active,
+.open .dropdown-toggle.btn-warning {
+  color: #fff;
+  background-color: #ed9c28;
+  border-color: #d58512;
+}
+.btn-warning:active,
+.btn-warning.active,
+.open .dropdown-toggle.btn-warning {
+  background-image: none;
+}
+.btn-warning.disabled,
+.btn-warning[disabled],
+fieldset[disabled] .btn-warning,
+.btn-warning.disabled:hover,
+.btn-warning[disabled]:hover,
+fieldset[disabled] .btn-warning:hover,
+.btn-warning.disabled:focus,
+.btn-warning[disabled]:focus,
+fieldset[disabled] .btn-warning:focus,
+.btn-warning.disabled:active,
+.btn-warning[disabled]:active,
+fieldset[disabled] .btn-warning:active,
+.btn-warning.disabled.active,
+.btn-warning[disabled].active,
+fieldset[disabled] .btn-warning.active {
+  background-color: #f0ad4e;
+  border-color: #eea236;
+}
+.btn-warning .badge {
+  color: #f0ad4e;
+  background-color: #fff;
+}
+.btn-danger {
+  color: #fff;
+  background-color: #d9534f;
+  border-color: #d43f3a;
+}
+.btn-danger:hover,
+.btn-danger:focus,
+.btn-danger:active,
+.btn-danger.active,
+.open .dropdown-toggle.btn-danger {
+  color: #fff;
+  background-color: #d2322d;
+  border-color: #ac2925;
+}
+.btn-danger:active,
+.btn-danger.active,
+.open .dropdown-toggle.btn-danger {
+  background-image: none;
+}
+.btn-danger.disabled,
+.btn-danger[disabled],
+fieldset[disabled] .btn-danger,
+.btn-danger.disabled:hover,
+.btn-danger[disabled]:hover,
+fieldset[disabled] .btn-danger:hover,
+.btn-danger.disabled:focus,
+.btn-danger[disabled]:focus,
+fieldset[disabled] .btn-danger:focus,
+.btn-danger.disabled:active,
+.btn-danger[disabled]:active,
+fieldset[disabled] .btn-danger:active,
+.btn-danger.disabled.active,
+.btn-danger[disabled].active,
+fieldset[disabled] .btn-danger.active {
+  background-color: #d9534f;
+  border-color: #d43f3a;
+}
+.btn-danger .badge {
+  color: #d9534f;
+  background-color: #fff;
+}
+.btn-link {
+  font-weight: normal;
+  color: #428bca;
+  cursor: pointer;
+  border-radius: 0;
+}
+.btn-link,
+.btn-link:active,
+.btn-link[disabled],
+fieldset[disabled] .btn-link {
+  background-color: transparent;
+  -webkit-box-shadow: none;
+          box-shadow: none;
+}
+.btn-link,
+.btn-link:hover,
+.btn-link:focus,
+.btn-link:active {
+  border-color: transparent;
+}
+.btn-link:hover,
+.btn-link:focus {
+  color: #2a6496;
+  text-decoration: underline;
+  background-color: transparent;
+}
+.btn-link[disabled]:hover,
+fieldset[disabled] .btn-link:hover,
+.btn-link[disabled]:focus,
+fieldset[disabled] .btn-link:focus {
+  color: #999;
+  text-decoration: none;
+}
+.btn-lg {
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+.btn-sm {
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-xs {
+  padding: 1px 5px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-block {
+  display: block;
+  width: 100%;
+  padding-right: 0;
+  padding-left: 0;
+}
+.btn-block + .btn-block {
+  margin-top: 5px;
+}
+input[type="submit"].btn-block,
+input[type="reset"].btn-block,
+input[type="button"].btn-block {
+  width: 100%;
+}
+.fade {
+  opacity: 0;
+  -webkit-transition: opacity .15s linear;
+          transition: opacity .15s linear;
+}
+.fade.in {
+  opacity: 1;
+}
+.collapse {
+  display: none;
+}
+.collapse.in {
+  display: block;
+}
+.collapsing {
+  position: relative;
+  height: 0;
+  overflow: hidden;
+  -webkit-transition: height .35s ease;
+          transition: height .35s ease;
+}
+@font-face {
+  font-family: 'Glyphicons Halflings';
+
+  src: url('../fonts/glyphicons-halflings-regular.eot');
+  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');
+}
+.glyphicon {
+  position: relative;
+  top: 1px;
+  display: inline-block;
+  font-family: 'Glyphicons Halflings';
+  font-style: normal;
+  font-weight: normal;
+  line-height: 1;
+
+  -webkit-font-smoothing: antialiased;
+  -moz-osx-font-smoothing: grayscale;
+}
+.glyphicon-asterisk:before {
+  content: "\2a";
+}
+.glyphicon-plus:before {
+  content: "\2b";
+}
+.glyphicon-euro:before {
+  content: "\20ac";
+}
+.glyphicon-minus:before {
+  content: "\2212";
+}
+.glyphicon-cloud:before {
+  content: "\2601";
+}
+.glyphicon-envelope:before {
+  content: "\2709";
+}
+.glyphicon-pencil:before {
+  content: "\270f";
+}
+.glyphicon-glass:before {
+  content: "\e001";
+}
+.glyphicon-music:before {
+  content: "\e002";
+}
+.glyphicon-search:before {
+  content: "\e003";
+}
+.glyphicon-heart:before {
+  content: "\e005";
+}
+.glyphicon-star:before {
+  content: "\e006";
+}
+.glyphicon-star-empty:before {
+  content: "\e007";
+}
+.glyphicon-user:before {
+  content: "\e008";
+}
+.glyphicon-film:before {
+  content: "\e009";
+}
+.glyphicon-th-large:before {
+  content: "\e010";
+}
+.glyphicon-th:before {
+  content: "\e011";
+}
+.glyphicon-th-list:before {
+  content: "\e012";
+}
+.glyphicon-ok:before {
+  content: "\e013";
+}
+.glyphicon-remove:before {
+  content: "\e014";
+}
+.glyphicon-zoom-in:before {
+  content: "\e015";
+}
+.glyphicon-zoom-out:before {
+  content: "\e016";
+}
+.glyphicon-off:before {
+  content: "\e017";
+}
+.glyphicon-signal:before {
+  content: "\e018";
+}
+.glyphicon-cog:before {
+  content: "\e019";
+}
+.glyphicon-trash:before {
+  content: "\e020";
+}
+.glyphicon-home:before {
+  content: "\e021";
+}
+.glyphicon-file:before {
+  content: "\e022";
+}
+.glyphicon-time:before {
+  content: "\e023";
+}
+.glyphicon-road:before {
+  content: "\e024";
+}
+.glyphicon-download-alt:before {
+  content: "\e025";
+}
+.glyphicon-download:before {
+  content: "\e026";
+}
+.glyphicon-upload:before {
+  content: "\e027";
+}
+.glyphicon-inbox:before {
+  content: "\e028";
+}
+.glyphicon-play-circle:before {
+  content: "\e029";
+}
+.glyphicon-repeat:before {
+  content: "\e030";
+}
+.glyphicon-refresh:before {
+  content: "\e031";
+}
+.glyphicon-list-alt:before {
+  content: "\e032";
+}
+.glyphicon-lock:before {
+  content: "\e033";
+}
+.glyphicon-flag:before {
+  content: "\e034";
+}
+.glyphicon-headphones:before {
+  content: "\e035";
+}
+.glyphicon-volume-off:before {
+  content: "\e036";
+}
+.glyphicon-volume-down:before {
+  content: "\e037";
+}
+.glyphicon-volume-up:before {
+  content: "\e038";
+}
+.glyphicon-qrcode:before {
+  content: "\e039";
+}
+.glyphicon-barcode:before {
+  content: "\e040";
+}
+.glyphicon-tag:before {
+  content: "\e041";
+}
+.glyphicon-tags:before {
+  content: "\e042";
+}
+.glyphicon-book:before {
+  content: "\e043";
+}
+.glyphicon-bookmark:before {
+  content: "\e044";
+}
+.glyphicon-print:before {
+  content: "\e045";
+}
+.glyphicon-camera:before {
+  content: "\e046";
+}
+.glyphicon-font:before {
+  content: "\e047";
+}
+.glyphicon-bold:before {
+  content: "\e048";
+}
+.glyphicon-italic:before {
+  content: "\e049";
+}
+.glyphicon-text-height:before {
+  content: "\e050";
+}
+.glyphicon-text-width:before {
+  content: "\e051";
+}
+.glyphicon-align-left:before {
+  content: "\e052";
+}
+.glyphicon-align-center:before {
+  content: "\e053";
+}
+.glyphicon-align-right:before {
+  content: "\e054";
+}
+.glyphicon-align-justify:before {
+  content: "\e055";
+}
+.glyphicon-list:before {
+  content: "\e056";
+}
+.glyphicon-indent-left:before {
+  content: "\e057";
+}
+.glyphicon-indent-right:before {
+  content: "\e058";
+}
+.glyphicon-facetime-video:before {
+  content: "\e059";
+}
+.glyphicon-picture:before {
+  content: "\e060";
+}
+.glyphicon-map-marker:before {
+  content: "\e062";
+}
+.glyphicon-adjust:before {
+  content: "\e063";
+}
+.glyphicon-tint:before {
+  content: "\e064";
+}
+.glyphicon-edit:before {
+  content: "\e065";
+}
+.glyphicon-share:before {
+  content: "\e066";
+}
+.glyphicon-check:before {
+  content: "\e067";
+}
+.glyphicon-move:before {
+  content: "\e068";
+}
+.glyphicon-step-backward:before {
+  content: "\e069";
+}
+.glyphicon-fast-backward:before {
+  content: "\e070";
+}
+.glyphicon-backward:before {
+  content: "\e071";
+}
+.glyphicon-play:before {
+  content: "\e072";
+}
+.glyphicon-pause:before {
+  content: "\e073";
+}
+.glyphicon-stop:before {
+  content: "\e074";
+}
+.glyphicon-forward:before {
+  content: "\e075";
+}
+.glyphicon-fast-forward:before {
+  content: "\e076";
+}
+.glyphicon-step-forward:before {
+  content: "\e077";
+}
+.glyphicon-eject:before {
+  content: "\e078";
+}
+.glyphicon-chevron-left:before {
+  content: "\e079";
+}
+.glyphicon-chevron-right:before {
+  content: "\e080";
+}
+.glyphicon-plus-sign:before {
+  content: "\e081";
+}
+.glyphicon-minus-sign:before {
+  content: "\e082";
+}
+.glyphicon-remove-sign:before {
+  content: "\e083";
+}
+.glyphicon-ok-sign:before {
+  content: "\e084";
+}
+.glyphicon-question-sign:before {
+  content: "\e085";
+}
+.glyphicon-info-sign:before {
+  content: "\e086";
+}
+.glyphicon-screenshot:before {
+  content: "\e087";
+}
+.glyphicon-remove-circle:before {
+  content: "\e088";
+}
+.glyphicon-ok-circle:before {
+  content: "\e089";
+}
+.glyphicon-ban-circle:before {
+  content: "\e090";
+}
+.glyphicon-arrow-left:before {
+  content: "\e091";
+}
+.glyphicon-arrow-right:before {
+  content: "\e092";
+}
+.glyphicon-arrow-up:before {
+  content: "\e093";
+}
+.glyphicon-arrow-down:before {
+  content: "\e094";
+}
+.glyphicon-share-alt:before {
+  content: "\e095";
+}
+.glyphicon-resize-full:before {
+  content: "\e096";
+}
+.glyphicon-resize-small:before {
+  content: "\e097";
+}
+.glyphicon-exclamation-sign:before {
+  content: "\e101";
+}
+.glyphicon-gift:before {
+  content: "\e102";
+}
+.glyphicon-leaf:before {
+  content: "\e103";
+}
+.glyphicon-fire:before {
+  content: "\e104";
+}
+.glyphicon-eye-open:before {
+  content: "\e105";
+}
+.glyphicon-eye-close:before {
+  content: "\e106";
+}
+.glyphicon-warning-sign:before {
+  content: "\e107";
+}
+.glyphicon-plane:before {
+  content: "\e108";
+}
+.glyphicon-calendar:before {
+  content: "\e109";
+}
+.glyphicon-random:before {
+  content: "\e110";
+}
+.glyphicon-comment:before {
+  content: "\e111";
+}
+.glyphicon-magnet:before {
+  content: "\e112";
+}
+.glyphicon-chevron-up:before {
+  content: "\e113";
+}
+.glyphicon-chevron-down:before {
+  content: "\e114";
+}
+.glyphicon-retweet:before {
+  content: "\e115";
+}
+.glyphicon-shopping-cart:before {
+  content: "\e116";
+}
+.glyphicon-folder-close:before {
+  content: "\e117";
+}
+.glyphicon-folder-open:before {
+  content: "\e118";
+}
+.glyphicon-resize-vertical:before {
+  content: "\e119";
+}
+.glyphicon-resize-horizontal:before {
+  content: "\e120";
+}
+.glyphicon-hdd:before {
+  content: "\e121";
+}
+.glyphicon-bullhorn:before {
+  content: "\e122";
+}
+.glyphicon-bell:before {
+  content: "\e123";
+}
+.glyphicon-certificate:before {
+  content: "\e124";
+}
+.glyphicon-thumbs-up:before {
+  content: "\e125";
+}
+.glyphicon-thumbs-down:before {
+  content: "\e126";
+}
+.glyphicon-hand-right:before {
+  content: "\e127";
+}
+.glyphicon-hand-left:before {
+  content: "\e128";
+}
+.glyphicon-hand-up:before {
+  content: "\e129";
+}
+.glyphicon-hand-down:before {
+  content: "\e130";
+}
+.glyphicon-circle-arrow-right:before {
+  content: "\e131";
+}
+.glyphicon-circle-arrow-left:before {
+  content: "\e132";
+}
+.glyphicon-circle-arrow-up:before {
+  content: "\e133";
+}
+.glyphicon-circle-arrow-down:before {
+  content: "\e134";
+}
+.glyphicon-globe:before {
+  content: "\e135";
+}
+.glyphicon-wrench:before {
+  content: "\e136";
+}
+.glyphicon-tasks:before {
+  content: "\e137";
+}
+.glyphicon-filter:before {
+  content: "\e138";
+}
+.glyphicon-briefcase:before {
+  content: "\e139";
+}
+.glyphicon-fullscreen:before {
+  content: "\e140";
+}
+.glyphicon-dashboard:before {
+  content: "\e141";
+}
+.glyphicon-paperclip:before {
+  content: "\e142";
+}
+.glyphicon-heart-empty:before {
+  content: "\e143";
+}
+.glyphicon-link:before {
+  content: "\e144";
+}
+.glyphicon-phone:before {
+  content: "\e145";
+}
+.glyphicon-pushpin:before {
+  content: "\e146";
+}
+.glyphicon-usd:before {
+  content: "\e148";
+}
+.glyphicon-gbp:before {
+  content: "\e149";
+}
+.glyphicon-sort:before {
+  content: "\e150";
+}
+.glyphicon-sort-by-alphabet:before {
+  content: "\e151";
+}
+.glyphicon-sort-by-alphabet-alt:before {
+  content: "\e152";
+}
+.glyphicon-sort-by-order:before {
+  content: "\e153";
+}
+.glyphicon-sort-by-order-alt:before {
+  content: "\e154";
+}
+.glyphicon-sort-by-attributes:before {
+  content: "\e155";
+}
+.glyphicon-sort-by-attributes-alt:before {
+  content: "\e156";
+}
+.glyphicon-unchecked:before {
+  content: "\e157";
+}
+.glyphicon-expand:before {
+  content: "\e158";
+}
+.glyphicon-collapse-down:before {
+  content: "\e159";
+}
+.glyphicon-collapse-up:before {
+  content: "\e160";
+}
+.glyphicon-log-in:before {
+  content: "\e161";
+}
+.glyphicon-flash:before {
+  content: "\e162";
+}
+.glyphicon-log-out:before {
+  content: "\e163";
+}
+.glyphicon-new-window:before {
+  content: "\e164";
+}
+.glyphicon-record:before {
+  content: "\e165";
+}
+.glyphicon-save:before {
+  content: "\e166";
+}
+.glyphicon-open:before {
+  content: "\e167";
+}
+.glyphicon-saved:before {
+  content: "\e168";
+}
+.glyphicon-import:before {
+  content: "\e169";
+}
+.glyphicon-export:before {
+  content: "\e170";
+}
+.glyphicon-send:before {
+  content: "\e171";
+}
+.glyphicon-floppy-disk:before {
+  content: "\e172";
+}
+.glyphicon-floppy-saved:before {
+  content: "\e173";
+}
+.glyphicon-floppy-remove:before {
+  content: "\e174";
+}
+.glyphicon-floppy-save:before {
+  content: "\e175";
+}
+.glyphicon-floppy-open:before {
+  content: "\e176";
+}
+.glyphicon-credit-card:before {
+  content: "\e177";
+}
+.glyphicon-transfer:before {
+  content: "\e178";
+}
+.glyphicon-cutlery:before {
+  content: "\e179";
+}
+.glyphicon-header:before {
+  content: "\e180";
+}
+.glyphicon-compressed:before {
+  content: "\e181";
+}
+.glyphicon-earphone:before {
+  content: "\e182";
+}
+.glyphicon-phone-alt:before {
+  content: "\e183";
+}
+.glyphicon-tower:before {
+  content: "\e184";
+}
+.glyphicon-stats:before {
+  content: "\e185";
+}
+.glyphicon-sd-video:before {
+  content: "\e186";
+}
+.glyphicon-hd-video:before {
+  content: "\e187";
+}
+.glyphicon-subtitles:before {
+  content: "\e188";
+}
+.glyphicon-sound-stereo:before {
+  content: "\e189";
+}
+.glyphicon-sound-dolby:before {
+  content: "\e190";
+}
+.glyphicon-sound-5-1:before {
+  content: "\e191";
+}
+.glyphicon-sound-6-1:before {
+  content: "\e192";
+}
+.glyphicon-sound-7-1:before {
+  content: "\e193";
+}
+.glyphicon-copyright-mark:before {
+  content: "\e194";
+}
+.glyphicon-registration-mark:before {
+  content: "\e195";
+}
+.glyphicon-cloud-download:before {
+  content: "\e197";
+}
+.glyphicon-cloud-upload:before {
+  content: "\e198";
+}
+.glyphicon-tree-conifer:before {
+  content: "\e199";
+}
+.glyphicon-tree-deciduous:before {
+  content: "\e200";
+}
+.caret {
+  display: inline-block;
+  width: 0;
+  height: 0;
+  margin-left: 2px;
+  vertical-align: middle;
+  border-top: 4px solid;
+  border-right: 4px solid transparent;
+  border-left: 4px solid transparent;
+}
+.dropdown {
+  position: relative;
+}
+.dropdown-toggle:focus {
+  outline: 0;
+}
+.dropdown-menu {
+  position: absolute;
+  top: 100%;
+  left: 0;
+  z-index: 1000;
+  display: none;
+  float: left;
+  min-width: 160px;
+  padding: 5px 0;
+  margin: 2px 0 0;
+  font-size: 14px;
+  list-style: none;
+  background-color: #fff;
+  background-clip: padding-box;
+  border: 1px solid #ccc;
+  border: 1px solid rgba(0, 0, 0, .15);
+  border-radius: 4px;
+  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+          box-shadow: 0 6px 12px rgba(0, 0, 0, .175);
+}
+.dropdown-menu.pull-right {
+  right: 0;
+  left: auto;
+}
+.dropdown-menu .divider {
+  height: 1px;
+  margin: 9px 0;
+  overflow: hidden;
+  background-color: #e5e5e5;
+}
+.dropdown-menu > li > a {
+  display: block;
+  padding: 3px 20px;
+  clear: both;
+  font-weight: normal;
+  line-height: 1.428571429;
+  color: #333;
+  white-space: nowrap;
+}
+.dropdown-menu > li > a:hover,
+.dropdown-menu > li > a:focus {
+  color: #262626;
+  text-decoration: none;
+  background-color: #f5f5f5;
+}
+.dropdown-menu > .active > a,
+.dropdown-menu > .active > a:hover,
+.dropdown-menu > .active > a:focus {
+  color: #fff;
+  text-decoration: none;
+  background-color: #428bca;
+  outline: 0;
+}
+.dropdown-menu > .disabled > a,
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+  color: #999;
+}
+.dropdown-menu > .disabled > a:hover,
+.dropdown-menu > .disabled > a:focus {
+  text-decoration: none;
+  cursor: not-allowed;
+  background-color: transparent;
+  background-image: none;
+  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);
+}
+.open > .dropdown-menu {
+  display: block;
+}
+.open > a {
+  outline: 0;
+}
+.dropdown-menu-right {
+  right: 0;
+  left: auto;
+}
+.dropdown-menu-left {
+  right: auto;
+  left: 0;
+}
+.dropdown-header {
+  display: block;
+  padding: 3px 20px;
+  font-size: 12px;
+  line-height: 1.428571429;
+  color: #999;
+}
+.dropdown-backdrop {
+  position: fixed;
+  top: 0;
+  right: 0;
+  bottom: 0;
+  left: 0;
+  z-index: 990;
+}
+.pull-right > .dropdown-menu {
+  right: 0;
+  left: auto;
+}
+.dropup .caret,
+.navbar-fixed-bottom .dropdown .caret {
+  content: "";
+  border-top: 0;
+  border-bottom: 4px solid;
+}
+.dropup .dropdown-menu,
+.navbar-fixed-bottom .dropdown .dropdown-menu {
+  top: auto;
+  bottom: 100%;
+  margin-bottom: 1px;
+}
+@media (min-width: 768px) {
+  .navbar-right .dropdown-menu {
+    right: 0;
+    left: auto;
+  }
+  .navbar-right .dropdown-menu-left {
+    right: auto;
+    left: 0;
+  }
+}
+.btn-group,
+.btn-group-vertical {
+  position: relative;
+  display: inline-block;
+  vertical-align: middle;
+}
+.btn-group > .btn,
+.btn-group-vertical > .btn {
+  position: relative;
+  float: left;
+}
+.btn-group > .btn:hover,
+.btn-group-vertical > .btn:hover,
+.btn-group > .btn:focus,
+.btn-group-vertical > .btn:focus,
+.btn-group > .btn:active,
+.btn-group-vertical > .btn:active,
+.btn-group > .btn.active,
+.btn-group-vertical > .btn.active {
+  z-index: 2;
+}
+.btn-group > .btn:focus,
+.btn-group-vertical > .btn:focus {
+  outline: none;
+}
+.btn-group .btn + .btn,
+.btn-group .btn + .btn-group,
+.btn-group .btn-group + .btn,
+.btn-group .btn-group + .btn-group {
+  margin-left: -1px;
+}
+.btn-toolbar {
+  margin-left: -5px;
+}
+.btn-toolbar .btn-group,
+.btn-toolbar .input-group {
+  float: left;
+}
+.btn-toolbar > .btn,
+.btn-toolbar > .btn-group,
+.btn-toolbar > .input-group {
+  margin-left: 5px;
+}
+.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {
+  border-radius: 0;
+}
+.btn-group > .btn:first-child {
+  margin-left: 0;
+}
+.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {
+  border-top-right-radius: 0;
+  border-bottom-right-radius: 0;
+}
+.btn-group > .btn:last-child:not(:first-child),
+.btn-group > .dropdown-toggle:not(:first-child) {
+  border-top-left-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group > .btn-group {
+  float: left;
+}
+.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {
+  border-radius: 0;
+}
+.btn-group > .btn-group:first-child > .btn:last-child,
+.btn-group > .btn-group:first-child > .dropdown-toggle {
+  border-top-right-radius: 0;
+  border-bottom-right-radius: 0;
+}
+.btn-group > .btn-group:last-child > .btn:first-child {
+  border-top-left-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group .dropdown-toggle:active,
+.btn-group.open .dropdown-toggle {
+  outline: 0;
+}
+.btn-group-xs > .btn {
+  padding: 1px 5px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-group-sm > .btn {
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+.btn-group-lg > .btn {
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+.btn-group > .btn + .dropdown-toggle {
+  padding-right: 8px;
+  padding-left: 8px;
+}
+.btn-group > .btn-lg + .dropdown-toggle {
+  padding-right: 12px;
+  padding-left: 12px;
+}
+.btn-group.open .dropdown-toggle {
+  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);
+}
+.btn-group.open .dropdown-toggle.btn-link {
+  -webkit-box-shadow: none;
+          box-shadow: none;
+}
+.btn .caret {
+  margin-left: 0;
+}
+.btn-lg .caret {
+  border-width: 5px 5px 0;
+  border-bottom-width: 0;
+}
+.dropup .btn-lg .caret {
+  border-width: 0 5px 5px;
+}
+.btn-group-vertical > .btn,
+.btn-group-vertical > .btn-group,
+.btn-group-vertical > .btn-group > .btn {
+  display: block;
+  float: none;
+  width: 100%;
+  max-width: 100%;
+}
+.btn-group-vertical > .btn-group > .btn {
+  float: none;
+}
+.btn-group-vertical > .btn + .btn,
+.btn-group-vertical > .btn + .btn-group,
+.btn-group-vertical > .btn-group + .btn,
+.btn-group-vertical > .btn-group + .btn-group {
+  margin-top: -1px;
+  margin-left: 0;
+}
+.btn-group-vertical > .btn:not(:first-child):not(:last-child) {
+  border-radius: 0;
+}
+.btn-group-vertical > .btn:first-child:not(:last-child) {
+  border-top-right-radius: 4px;
+  border-bottom-right-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn:last-child:not(:first-child) {
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+  border-bottom-left-radius: 4px;
+}
+.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {
+  border-radius: 0;
+}
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,
+.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {
+  border-bottom-right-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+}
+.btn-group-justified {
+  display: table;
+  width: 100%;
+  table-layout: fixed;
+  border-collapse: separate;
+}
+.btn-group-justified > .btn,
+.btn-group-justified > .btn-group {
+  display: table-cell;
+  float: none;
+  width: 1%;
+}
+.btn-group-justified > .btn-group .btn {
+  width: 100%;
+}
+[data-toggle="buttons"] > .btn > input[type="radio"],
+[data-toggle="buttons"] > .btn > input[type="checkbox"] {
+  display: none;
+}
+.input-group {
+  position: relative;
+  display: table;
+  border-collapse: separate;
+}
+.input-group[class*="col-"] {
+  float: none;
+  padding-right: 0;
+  padding-left: 0;
+}
+.input-group .form-control {
+  float: left;
+  width: 100%;
+  margin-bottom: 0;
+}
+.input-group-lg > .form-control,
+.input-group-lg > .input-group-addon,
+.input-group-lg > .input-group-btn > .btn {
+  height: 46px;
+  padding: 10px 16px;
+  font-size: 18px;
+  line-height: 1.33;
+  border-radius: 6px;
+}
+select.input-group-lg > .form-control,
+select.input-group-lg > .input-group-addon,
+select.input-group-lg > .input-group-btn > .btn {
+  height: 46px;
+  line-height: 46px;
+}
+textarea.input-group-lg > .form-control,
+textarea.input-group-lg > .input-group-addon,
+textarea.input-group-lg > .input-group-btn > .btn,
+select[multiple].input-group-lg > .form-control,
+select[multiple].input-group-lg > .input-group-addon,
+select[multiple].input-group-lg > .input-group-btn > .btn {
+  height: auto;
+}
+.input-group-sm > .form-control,
+.input-group-sm > .input-group-addon,
+.input-group-sm > .input-group-btn > .btn {
+  height: 30px;
+  padding: 5px 10px;
+  font-size: 12px;
+  line-height: 1.5;
+  border-radius: 3px;
+}
+select.input-group-sm > .form-control,
+select.input-group-sm > .input-group-addon,
+select.input-group-sm > .input-group-btn > .btn {
+  height: 30px;
+  line-height: 30px;
+}
+textarea.input-group-sm > .form-control,
+textarea.input-group-sm > .input-group-addon,
+textarea.input-group-sm > .input-group-btn > .btn,
+select[multiple].input-group-sm > .form-control,
+select[multiple].input-group-sm > .input-group-addon,
+select[multiple].input-group-sm > .input-group-btn > .btn {
+  height: auto;
+}
+.input-group-addon,
+.input-group-btn,
+.input-group .form-control {
+  display: table-cell;
+}
+.input-group-addon:not(:first-child):not(:last-child),
+.input-group-btn:not(:first-child):not(:last-child),
+.input-group .form-control:not(:first-child):not(:last-child) {
+  border-radius: 0;
+}
+.input-group-addon,
+.input-group-btn {
+  width: 1%;
+  white-space: nowrap;
+  vertical-align: middle;
+}
+.input-group-addon {
+  padding: 6px 12px;
+  font-size: 14px;
+  font-weight: normal;
+  line-height: 1;
+  color: #555;
+  text-align: center;
+  background-color: #eee;
+  border: 1px solid #ccc;
+  border-radius: 4px;
+}
+.input-group-addon.input-sm {
+  padding: 5px 10px;
+  font-size: 12px;
+  border-radius: 3px;
+}
+.input-group-addon.input-lg {
+  padding: 10px 16px;
+  font-size: 18px;
+  border-radius: 6px;
+}
+.input-group-addon input[type="radio"],
+.input-group-addon input[type="checkbox"] {
+  margin-top: 0;
+}
+.input-group .form-control:first-child,
+.input-group-addon:first-child,
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group > .btn,
+.input-group-btn:first-child > .dropdown-toggle,
+.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),
+.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {
+  border-top-right-radius: 0;
+  border-bottom-right-radius: 0;
+}
+.input-group-addon:first-child {
+  border-right: 0;
+}
+.input-group .form-control:last-child,
+.input-group-addon:last-child,
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group > .btn,
+.input-group-btn:last-child > .dropdown-toggle,
+.input-group-btn:first-child > .btn:not(:first-child),
+.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {
+  border-top-left-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.input-group-addon:last-child {
+  border-left: 0;
+}
+.input-group-btn {
+  position: relative;
+  font-size: 0;
+  white-space: nowrap;
+}
+.input-group-btn > .btn {
+  position: relative;
+}
+.input-group-btn > .btn + .btn {
+  margin-left: -1px;
+}
+.input-group-btn > .btn:hover,
+.input-group-btn > .btn:focus,
+.input-group-btn > .btn:active {
+  z-index: 2;
+}
+.input-group-btn:first-child > .btn,
+.input-group-btn:first-child > .btn-group {
+  margin-right: -1px;
+}
+.input-group-btn:last-child > .btn,
+.input-group-btn:last-child > .btn-group {
+  margin-left: -1px;
+}
+.nav {
+  padding-left: 0;
+  margin-bottom: 0;
+  list-style: none;
+}
+.nav > li {
+  position: relative;
+  display: block;
+}
+.nav > li > a {
+  position: relative;
+  display: block;
+  padding: 10px 15px;
+}
+.nav > li > a:hover,
+.nav > li > a:focus {
+  text-decoration: none;
+  background-color: #eee;
+}
+.nav > li.disabled > a {
+  color: #999;
+}
+.nav > li.disabled > a:hover,
+.nav > li.disabled > a:focus {
+  color: #999;
+  text-decoration: none;
+  cursor: not-allowed;
+  background-color: transparent;
+}
+.nav .open > a,
+.nav .open > a:hover,
+.nav .open > a:focus {
+  background-color: #eee;
+  border-color: #428bca;
+}
+.nav .nav-divider {
+  height: 1px;
+  margin: 9px 0;
+  overflow: hidden;
+  background-color: #e5e5e5;
+}
+.nav > li > a > img {
+  max-width: none;
+}
+.nav-tabs {
+  border-bottom: 1px solid #ddd;
+}
+.nav-tabs > li {
+  float: left;
+  margin-bottom: -1px;
+}
+.nav-tabs > li > a {
+  margin-right: 2px;
+  line-height: 1.428571429;
+  border: 1px solid transparent;
+  border-radius: 4px 4px 0 0;
+}
+.nav-tabs > li > a:hover {
+  border-color: #eee #eee #ddd;
+}
+.nav-tabs > li.active > a,
+.nav-tabs > li.active > a:hover,
+.nav-tabs > li.active > a:focus {
+  color: #555;
+  cursor: default;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-bottom-color: transparent;
+}
+.nav-tabs.nav-justified {
+  width: 100%;
+  border-bottom: 0;
+}
+.nav-tabs.nav-justified > li {
+  float: none;
+}
+.nav-tabs.nav-justified > li > a {
+  margin-bottom: 5px;
+  text-align: center;
+}
+.nav-tabs.nav-justified > .dropdown .dropdown-menu {
+  top: auto;
+  left: auto;
+}
+@media (min-width: 768px) {
+  .nav-tabs.nav-justified > li {
+    display: table-cell;
+    width: 1%;
+  }
+  .nav-tabs.nav-justified > li > a {
+    margin-bottom: 0;
+  }
+}
+.nav-tabs.nav-justified > li > a {
+  margin-right: 0;
+  border-radius: 4px;
+}
+.nav-tabs.nav-justified > .active > a,
+.nav-tabs.nav-justified > .active > a:hover,
+.nav-tabs.nav-justified > .active > a:focus {
+  border: 1px solid #ddd;
+}
+@media (min-width: 768px) {
+  .nav-tabs.nav-justified > li > a {
+    border-bottom: 1px solid #ddd;
+    border-radius: 4px 4px 0 0;
+  }
+  .nav-tabs.nav-justified > .active > a,
+  .nav-tabs.nav-justified > .active > a:hover,
+  .nav-tabs.nav-justified > .active > a:focus {
+    border-bottom-color: #fff;
+  }
+}
+.nav-pills > li {
+  float: left;
+}
+.nav-pills > li > a {
+  border-radius: 4px;
+}
+.nav-pills > li + li {
+  margin-left: 2px;
+}
+.nav-pills > li.active > a,
+.nav-pills > li.active > a:hover,
+.nav-pills > li.active > a:focus {
+  color: #fff;
+  background-color: #428bca;
+}
+.nav-stacked > li {
+  float: none;
+}
+.nav-stacked > li + li {
+  margin-top: 2px;
+  margin-left: 0;
+}
+.nav-justified {
+  width: 100%;
+}
+.nav-justified > li {
+  float: none;
+}
+.nav-justified > li > a {
+  margin-bottom: 5px;
+  text-align: center;
+}
+.nav-justified > .dropdown .dropdown-menu {
+  top: auto;
+  left: auto;
+}
+@media (min-width: 768px) {
+  .nav-justified > li {
+    display: table-cell;
+    width: 1%;
+  }
+  .nav-justified > li > a {
+    margin-bottom: 0;
+  }
+}
+.nav-tabs-justified {
+  border-bottom: 0;
+}
+.nav-tabs-justified > li > a {
+  margin-right: 0;
+  border-radius: 4px;
+}
+.nav-tabs-justified > .active > a,
+.nav-tabs-justified > .active > a:hover,
+.nav-tabs-justified > .active > a:focus {
+  border: 1px solid #ddd;
+}
+@media (min-width: 768px) {
+  .nav-tabs-justified > li > a {
+    border-bottom: 1px solid #ddd;
+    border-radius: 4px 4px 0 0;
+  }
+  .nav-tabs-justified > .active > a,
+  .nav-tabs-justified > .active > a:hover,
+  .nav-tabs-justified > .active > a:focus {
+    border-bottom-color: #fff;
+  }
+}
+.tab-content > .tab-pane {
+  display: none;
+}
+.tab-content > .active {
+  display: block;
+}
+.nav-tabs .dropdown-menu {
+  margin-top: -1px;
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+}
+.navbar {
+  position: relative;
+  min-height: 50px;
+  margin-bottom: 20px;
+  border: 1px solid transparent;
+}
+@media (min-width: 768px) {
+  .navbar {
+    border-radius: 4px;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-header {
+    float: left;
+  }
+}
+.navbar-collapse {
+  max-height: 340px;
+  padding-right: 15px;
+  padding-left: 15px;
+  overflow-x: visible;
+  -webkit-overflow-scrolling: touch;
+  border-top: 1px solid transparent;
+  box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);
+}
+.navbar-collapse.in {
+  overflow-y: auto;
+}
+@media (min-width: 768px) {
+  .navbar-collapse {
+    width: auto;
+    border-top: 0;
+    box-shadow: none;
+  }
+  .navbar-collapse.collapse {
+    display: block !important;
+    height: auto !important;
+    padding-bottom: 0;
+    overflow: visible !important;
+  }
+  .navbar-collapse.in {
+    overflow-y: visible;
+  }
+  .navbar-fixed-top .navbar-collapse,
+  .navbar-static-top .navbar-collapse,
+  .navbar-fixed-bottom .navbar-collapse {
+    padding-right: 0;
+    padding-left: 0;
+  }
+}
+.container > .navbar-header,
+.container-fluid > .navbar-header,
+.container > .navbar-collapse,
+.container-fluid > .navbar-collapse {
+  margin-right: -15px;
+  margin-left: -15px;
+}
+@media (min-width: 768px) {
+  .container > .navbar-header,
+  .container-fluid > .navbar-header,
+  .container > .navbar-collapse,
+  .container-fluid > .navbar-collapse {
+    margin-right: 0;
+    margin-left: 0;
+  }
+}
+.navbar-static-top {
+  z-index: 1000;
+  border-width: 0 0 1px;
+}
+@media (min-width: 768px) {
+  .navbar-static-top {
+    border-radius: 0;
+  }
+}
+.navbar-fixed-top,
+.navbar-fixed-bottom {
+  position: fixed;
+  right: 0;
+  left: 0;
+  z-index: 1030;
+}
+@media (min-width: 768px) {
+  .navbar-fixed-top,
+  .navbar-fixed-bottom {
+    border-radius: 0;
+  }
+}
+.navbar-fixed-top {
+  top: 0;
+  border-width: 0 0 1px;
+}
+.navbar-fixed-bottom {
+  bottom: 0;
+  margin-bottom: 0;
+  border-width: 1px 0 0;
+}
+.navbar-brand {
+  float: left;
+  height: 20px;
+  padding: 15px 15px;
+  font-size: 18px;
+  line-height: 20px;
+}
+.navbar-brand:hover,
+.navbar-brand:focus {
+  text-decoration: none;
+}
+@media (min-width: 768px) {
+  .navbar > .container .navbar-brand,
+  .navbar > .container-fluid .navbar-brand {
+    margin-left: -15px;
+  }
+}
+.navbar-toggle {
+  position: relative;
+  float: right;
+  padding: 9px 10px;
+  margin-top: 8px;
+  margin-right: 15px;
+  margin-bottom: 8px;
+  background-color: transparent;
+  background-image: none;
+  border: 1px solid transparent;
+  border-radius: 4px;
+}
+.navbar-toggle:focus {
+  outline: none;
+}
+.navbar-toggle .icon-bar {
+  display: block;
+  width: 22px;
+  height: 2px;
+  border-radius: 1px;
+}
+.navbar-toggle .icon-bar + .icon-bar {
+  margin-top: 4px;
+}
+@media (min-width: 768px) {
+  .navbar-toggle {
+    display: none;
+  }
+}
+.navbar-nav {
+  margin: 7.5px -15px;
+}
+.navbar-nav > li > a {
+  padding-top: 10px;
+  padding-bottom: 10px;
+  line-height: 20px;
+}
+@media (max-width: 767px) {
+  .navbar-nav .open .dropdown-menu {
+    position: static;
+    float: none;
+    width: auto;
+    margin-top: 0;
+    background-color: transparent;
+    border: 0;
+    box-shadow: none;
+  }
+  .navbar-nav .open .dropdown-menu > li > a,
+  .navbar-nav .open .dropdown-menu .dropdown-header {
+    padding: 5px 15px 5px 25px;
+  }
+  .navbar-nav .open .dropdown-menu > li > a {
+    line-height: 20px;
+  }
+  .navbar-nav .open .dropdown-menu > li > a:hover,
+  .navbar-nav .open .dropdown-menu > li > a:focus {
+    background-image: none;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-nav {
+    float: left;
+    margin: 0;
+  }
+  .navbar-nav > li {
+    float: left;
+  }
+  .navbar-nav > li > a {
+    padding-top: 15px;
+    padding-bottom: 15px;
+  }
+  .navbar-nav.navbar-right:last-child {
+    margin-right: -15px;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-left {
+    float: left !important;
+  }
+  .navbar-right {
+    float: right !important;
+  }
+}
+.navbar-form {
+  padding: 10px 15px;
+  margin-top: 8px;
+  margin-right: -15px;
+  margin-bottom: 8px;
+  margin-left: -15px;
+  border-top: 1px solid transparent;
+  border-bottom: 1px solid transparent;
+  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);
+          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);
+}
+@media (min-width: 768px) {
+  .navbar-form .form-group {
+    display: inline-block;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .navbar-form .form-control {
+    display: inline-block;
+    width: auto;
+    vertical-align: middle;
+  }
+  .navbar-form .control-label {
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .navbar-form .radio,
+  .navbar-form .checkbox {
+    display: inline-block;
+    padding-left: 0;
+    margin-top: 0;
+    margin-bottom: 0;
+    vertical-align: middle;
+  }
+  .navbar-form .radio input[type="radio"],
+  .navbar-form .checkbox input[type="checkbox"] {
+    float: none;
+    margin-left: 0;
+  }
+  .navbar-form .has-feedback .form-control-feedback {
+    top: 0;
+  }
+}
+@media (max-width: 767px) {
+  .navbar-form .form-group {
+    margin-bottom: 5px;
+  }
+}
+@media (min-width: 768px) {
+  .navbar-form {
+    width: auto;
+    padding-top: 0;
+    padding-bottom: 0;
+    margin-right: 0;
+    margin-left: 0;
+    border: 0;
+    -webkit-box-shadow: none;
+            box-shadow: none;
+  }
+  .navbar-form.navbar-right:last-child {
+    margin-right: -15px;
+  }
+}
+.navbar-nav > li > .dropdown-menu {
+  margin-top: 0;
+  border-top-left-radius: 0;
+  border-top-right-radius: 0;
+}
+.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {
+  border-bottom-right-radius: 0;
+  border-bottom-left-radius: 0;
+}
+.navbar-btn {
+  margin-top: 8px;
+  margin-bottom: 8px;
+}
+.navbar-btn.btn-sm {
+  margin-top: 10px;
+  margin-bottom: 10px;
+}
+.navbar-btn.btn-xs {
+  margin-top: 14px;
+  margin-bottom: 14px;
+}
+.navbar-text {
+  margin-top: 15px;
+  margin-bottom: 15px;
+}
+@media (min-width: 768px) {
+  .navbar-text {
+    float: left;
+    margin-right: 15px;
+    margin-left: 15px;
+  }
+  .navbar-text.navbar-right:last-child {
+    margin-right: 0;
+  }
+}
+.navbar-default {
+  background-color: #f8f8f8;
+  border-color: #e7e7e7;
+}
+.navbar-default .navbar-brand {
+  color: #777;
+}
+.navbar-default .navbar-brand:hover,
+.navbar-default .navbar-brand:focus {
+  color: #5e5e5e;
+  background-color: transparent;
+}
+.navbar-default .navbar-text {
+  color: #777;
+}
+.navbar-default .navbar-nav > li > a {
+  color: #777;
+}
+.navbar-default .navbar-nav > li > a:hover,
+.navbar-default .navbar-nav > li > a:focus {
+  color: #333;
+  background-color: transparent;
+}
+.navbar-default .navbar-nav > .active > a,
+.navbar-default .navbar-nav > .active > a:hover,
+.navbar-default .navbar-nav > .active > a:focus {
+  color: #555;
+  background-color: #e7e7e7;
+}
+.navbar-default .navbar-nav > .disabled > a,
+.navbar-default .navbar-nav > .disabled > a:hover,
+.navbar-default .navbar-nav > .disabled > a:focus {
+  color: #ccc;
+  background-color: transparent;
+}
+.navbar-default .navbar-toggle {
+  border-color: #ddd;
+}
+.navbar-default .navbar-toggle:hover,
+.navbar-default .navbar-toggle:focus {
+  background-color: #ddd;
+}
+.navbar-default .navbar-toggle .icon-bar {
+  background-color: #888;
+}
+.navbar-default .navbar-collapse,
+.navbar-default .navbar-form {
+  border-color: #e7e7e7;
+}
+.navbar-default .navbar-nav > .open > a,
+.navbar-default .navbar-nav > .open > a:hover,
+.navbar-default .navbar-nav > .open > a:focus {
+  color: #555;
+  background-color: #e7e7e7;
+}
+@media (max-width: 767px) {
+  .navbar-default .navbar-nav .open .dropdown-menu > li > a {
+    color: #777;
+  }
+  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,
+  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {
+    color: #333;
+    background-color: transparent;
+  }
+  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,
+  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,
+  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {
+    color: #555;
+    background-color: #e7e7e7;
+  }
+  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,
+  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+    color: #ccc;
+    background-color: transparent;
+  }
+}
+.navbar-default .navbar-link {
+  color: #777;
+}
+.navbar-default .navbar-link:hover {
+  color: #333;
+}
+.navbar-inverse {
+  background-color: #222;
+  border-color: #080808;
+}
+.navbar-inverse .navbar-brand {
+  color: #999;
+}
+.navbar-inverse .navbar-brand:hover,
+.navbar-inverse .navbar-brand:focus {
+  color: #fff;
+  background-color: transparent;
+}
+.navbar-inverse .navbar-text {
+  color: #999;
+}
+.navbar-inverse .navbar-nav > li > a {
+  color: #999;
+}
+.navbar-inverse .navbar-nav > li > a:hover,
+.navbar-inverse .navbar-nav > li > a:focus {
+  color: #fff;
+  background-color: transparent;
+}
+.navbar-inverse .navbar-nav > .active > a,
+.navbar-inverse .navbar-nav > .active > a:hover,
+.navbar-inverse .navbar-nav > .active > a:focus {
+  color: #fff;
+  background-color: #080808;
+}
+.navbar-inverse .navbar-nav > .disabled > a,
+.navbar-inverse .navbar-nav > .disabled > a:hover,
+.navbar-inverse .navbar-nav > .disabled > a:focus {
+  color: #444;
+  background-color: transparent;
+}
+.navbar-inverse .navbar-toggle {
+  border-color: #333;
+}
+.navbar-inverse .navbar-toggle:hover,
+.navbar-inverse .navbar-toggle:focus {
+  background-color: #333;
+}
+.navbar-inverse .navbar-toggle .icon-bar {
+  background-color: #fff;
+}
+.navbar-inverse .navbar-collapse,
+.navbar-inverse .navbar-form {
+  border-color: #101010;
+}
+.navbar-inverse .navbar-nav > .open > a,
+.navbar-inverse .navbar-nav > .open > a:hover,
+.navbar-inverse .navbar-nav > .open > a:focus {
+  color: #fff;
+  background-color: #080808;
+}
+@media (max-width: 767px) {
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {
+    border-color: #080808;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {
+    background-color: #080808;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {
+    color: #999;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {
+    color: #fff;
+    background-color: transparent;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {
+    color: #fff;
+    background-color: #080808;
+  }
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,
+  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {
+    color: #444;
+    background-color: transparent;
+  }
+}
+.navbar-inverse .navbar-link {
+  color: #999;
+}
+.navbar-inverse .navbar-link:hover {
+  color: #fff;
+}
+.breadcrumb {
+  padding: 8px 15px;
+  margin-bottom: 20px;
+  list-style: none;
+  background-color: #f5f5f5;
+  border-radius: 4px;
+}
+.breadcrumb > li {
+  display: inline-block;
+}
+.breadcrumb > li + li:before {
+  padding: 0 5px;
+  color: #ccc;
+  content: "/\00a0";
+}
+.breadcrumb > .active {
+  color: #999;
+}
+.pagination {
+  display: inline-block;
+  padding-left: 0;
+  margin: 20px 0;
+  border-radius: 4px;
+}
+.pagination > li {
+  display: inline;
+}
+.pagination > li > a,
+.pagination > li > span {
+  position: relative;
+  float: left;
+  padding: 6px 12px;
+  margin-left: -1px;
+  line-height: 1.428571429;
+  color: #428bca;
+  text-decoration: none;
+  background-color: #fff;
+  border: 1px solid #ddd;
+}
+.pagination > li:first-child > a,
+.pagination > li:first-child > span {
+  margin-left: 0;
+  border-top-left-radius: 4px;
+  border-bottom-left-radius: 4px;
+}
+.pagination > li:last-child > a,
+.pagination > li:last-child > span {
+  border-top-right-radius: 4px;
+  border-bottom-right-radius: 4px;
+}
+.pagination > li > a:hover,
+.pagination > li > span:hover,
+.pagination > li > a:focus,
+.pagination > li > span:focus {
+  color: #2a6496;
+  background-color: #eee;
+  border-color: #ddd;
+}
+.pagination > .active > a,
+.pagination > .active > span,
+.pagination > .active > a:hover,
+.pagination > .active > span:hover,
+.pagination > .active > a:focus,
+.pagination > .active > span:focus {
+  z-index: 2;
+  color: #fff;
+  cursor: default;
+  background-color: #428bca;
+  border-color: #428bca;
+}
+.pagination > .disabled > span,
+.pagination > .disabled > span:hover,
+.pagination > .disabled > span:focus,
+.pagination > .disabled > a,
+.pagination > .disabled > a:hover,
+.pagination > .disabled > a:focus {
+  color: #999;
+  cursor: not-allowed;
+  background-color: #fff;
+  border-color: #ddd;
+}
+.pagination-lg > li > a,
+.pagination-lg > li > span {
+  padding: 10px 16px;
+  font-size: 18px;
+}
+.pagination-lg > li:first-child > a,
+.pagination-lg > li:first-child > span {
+  border-top-left-radius: 6px;
+  border-bottom-left-radius: 6px;
+}
+.pagination-lg > li:last-child > a,
+.pagination-lg > li:last-child > span {
+  border-top-right-radius: 6px;
+  border-bottom-right-radius: 6px;
+}
+.pagination-sm > li > a,
+.pagination-sm > li > span {
+  padding: 5px 10px;
+  font-size: 12px;
+}
+.pagination-sm > li:first-child > a,
+.pagination-sm > li:first-child > span {
+  border-top-left-radius: 3px;
+  border-bottom-left-radius: 3px;
+}
+.pagination-sm > li:last-child > a,
+.pagination-sm > li:last-child > span {
+  border-top-right-radius: 3px;
+  border-bottom-right-radius: 3px;
+}
+.pager {
+  padding-left: 0;
+  margin: 20px 0;
+  text-align: center;
+  list-style: none;
+}
+.pager li {
+  display: inline;
+}
+.pager li > a,
+.pager li > span {
+  display: inline-block;
+  padding: 5px 14px;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-radius: 15px;
+}
+.pager li > a:hover,
+.pager li > a:focus {
+  text-decoration: none;
+  background-color: #eee;
+}
+.pager .next > a,
+.pager .next > span {
+  float: right;
+}
+.pager .previous > a,
+.pager .previous > span {
+  float: left;
+}
+.pager .disabled > a,
+.pager .disabled > a:hover,
+.pager .disabled > a:focus,
+.pager .disabled > span {
+  color: #999;
+  cursor: not-allowed;
+  background-color: #fff;
+}
+.label {
+  display: inline;
+  padding: .2em .6em .3em;
+  font-size: 75%;
+  font-weight: bold;
+  line-height: 1;
+  color: #fff;
+  text-align: center;
+  white-space: nowrap;
+  vertical-align: baseline;
+  border-radius: .25em;
+}
+.label[href]:hover,
+.label[href]:focus {
+  color: #fff;
+  text-decoration: none;
+  cursor: pointer;
+}
+.label:empty {
+  display: none;
+}
+.btn .label {
+  position: relative;
+  top: -1px;
+}
+.label-default {
+  background-color: #999;
+}
+.label-default[href]:hover,
+.label-default[href]:focus {
+  background-color: #808080;
+}
+.label-primary {
+  background-color: #428bca;
+}
+.label-primary[href]:hover,
+.label-primary[href]:focus {
+  background-color: #3071a9;
+}
+.label-success {
+  background-color: #5cb85c;
+}
+.label-success[href]:hover,
+.label-success[href]:focus {
+  background-color: #449d44;
+}
+.label-info {
+  background-color: #5bc0de;
+}
+.label-info[href]:hover,
+.label-info[href]:focus {
+  background-color: #31b0d5;
+}
+.label-warning {
+  background-color: #f0ad4e;
+}
+.label-warning[href]:hover,
+.label-warning[href]:focus {
+  background-color: #ec971f;
+}
+.label-danger {
+  background-color: #d9534f;
+}
+.label-danger[href]:hover,
+.label-danger[href]:focus {
+  background-color: #c9302c;
+}
+.badge {
+  display: inline-block;
+  min-width: 10px;
+  padding: 3px 7px;
+  font-size: 12px;
+  font-weight: bold;
+  line-height: 1;
+  color: #fff;
+  text-align: center;
+  white-space: nowrap;
+  vertical-align: baseline;
+  background-color: #999;
+  border-radius: 10px;
+}
+.badge:empty {
+  display: none;
+}
+.btn .badge {
+  position: relative;
+  top: -1px;
+}
+.btn-xs .badge {
+  top: 0;
+  padding: 1px 5px;
+}
+a.badge:hover,
+a.badge:focus {
+  color: #fff;
+  text-decoration: none;
+  cursor: pointer;
+}
+a.list-group-item.active > .badge,
+.nav-pills > .active > a > .badge {
+  color: #428bca;
+  background-color: #fff;
+}
+.nav-pills > li > a > .badge {
+  margin-left: 3px;
+}
+.jumbotron {
+  padding: 30px;
+  margin-bottom: 30px;
+  color: inherit;
+  background-color: #eee;
+}
+.jumbotron h1,
+.jumbotron .h1 {
+  color: inherit;
+}
+.jumbotron p {
+  margin-bottom: 15px;
+  font-size: 21px;
+  font-weight: 200;
+}
+.container .jumbotron {
+  border-radius: 6px;
+}
+.jumbotron .container {
+  max-width: 100%;
+}
+@media screen and (min-width: 768px) {
+  .jumbotron {
+    padding-top: 48px;
+    padding-bottom: 48px;
+  }
+  .container .jumbotron {
+    padding-right: 60px;
+    padding-left: 60px;
+  }
+  .jumbotron h1,
+  .jumbotron .h1 {
+    font-size: 63px;
+  }
+}
+.thumbnail {
+  display: block;
+  padding: 4px;
+  margin-bottom: 20px;
+  line-height: 1.428571429;
+  background-color: #fff;
+  border: 1px solid #ddd;
+  border-radius: 4px;
+  -webkit-transition: all .2s ease-in-out;
+          transition: all .2s ease-in-out;
+}
+.thumbnail > img,
+.thumbnail a > img {
+  display: block;
+  max-width: 100%;
+  height: auto;
+  margin-right: auto;
+  margin-left: auto;
+}
+a.thumbnail:hover,
+a.thumbnail:focus,
+a.thumbnail.active {
+  border-color: #428bca;
+}
+.thumbnail .caption {
+  padding: 9px;
+  color: #333;
+}
+.alert {
+  padding: 15px;
+  margin-bottom: 20px;
+  border: 1px solid transparent;
+  border-radius: 4px;
+}
+.alert h4 {
+  margin-top: 0;
+  color: inherit;
+}
+.alert .alert-link {
+  font-weight: bold;
+}
+.alert > p,
+.alert > ul {
+  margin-bottom: 0;
+}
+.alert > p + p {
+  margin-top: 5px;
+}
+.alert-dismissable {
+  padding-right: 35px;
+}
+.alert-dismissable .close {
+  position: relative;
+  top: -2px;
+  right: -21px;
+  color: inherit;
+}
+.alert-success {
+  color: #3c763d;
+  background-color: #dff0d8;
+  border-color: #d6e9c6;
+}
+.alert-success hr {
+  border-top-color: #c9e2b3;
+}
+.alert-success .alert-link {
+  color: #2b542c;
+}
+.alert-info {
+  color: #31708f;
+  background-color: #edf6fa;
+  border-color: #bce8f1;
+}
+.alert-info hr {
+  border-top-color: #a6e1ec;
+}
+.alert-info .alert-link {
+  color: #245269;
+  font-weight: bold;
+}
+.alert-info a {
+  font-weight: bold;
+}
+.alert-warning {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+  border-color: #faebcc;
+}
+.alert-warning hr {
+  border-top-color: #f7e1b5;
+}
+.alert-warning .alert-link {
+  color: #66512c;
+}
+.alert-danger {
+  color: #a94442;
+  background-color: #f2dede;
+  border-color: #ebccd1;
+}
+.alert-danger hr {
+  border-top-color: #e4b9c0;
+}
+.alert-danger .alert-link {
+  color: #843534;
+}
+@-webkit-keyframes progress-bar-stripes {
+  from {
+    background-position: 40px 0;
+  }
+  to {
+    background-position: 0 0;
+  }
+}
+@keyframes progress-bar-stripes {
+  from {
+    background-position: 40px 0;
+  }
+  to {
+    background-position: 0 0;
+  }
+}
+.progress {
+  height: 20px;
+  margin-bottom: 20px;
+  overflow: hidden;
+  background-color: #f5f5f5;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);
+          box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);
+}
+.progress-bar {
+  float: left;
+  width: 0;
+  height: 100%;
+  font-size: 12px;
+  line-height: 20px;
+  color: #fff;
+  text-align: center;
+  background-color: #428bca;
+  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);
+          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);
+  -webkit-transition: width .6s ease;
+          transition: width .6s ease;
+}
+.progress-striped .progress-bar {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-size: 40px 40px;
+}
+.progress.active .progress-bar {
+  -webkit-animation: progress-bar-stripes 2s linear infinite;
+          animation: progress-bar-stripes 2s linear infinite;
+}
+.progress-bar-success {
+  background-color: #5cb85c;
+}
+.progress-striped .progress-bar-success {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-info {
+  background-color: #5bc0de;
+}
+.progress-striped .progress-bar-info {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-warning {
+  background-color: #f0ad4e;
+}
+.progress-striped .progress-bar-warning {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.progress-bar-danger {
+  background-color: #d9534f;
+}
+.progress-striped .progress-bar-danger {
+  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);
+}
+.media,
+.media-body {
+  overflow: hidden;
+  zoom: 1;
+}
+.media,
+.media .media {
+  margin-top: 15px;
+}
+.media:first-child {
+  margin-top: 0;
+}
+.media-object {
+  display: block;
+}
+.media-heading {
+  margin: 0 0 5px;
+}
+.media > .pull-left {
+  margin-right: 10px;
+}
+.media > .pull-right {
+  margin-left: 10px;
+}
+.media-list {
+  padding-left: 0;
+  list-style: none;
+}
+.list-group {
+  padding-left: 0;
+  margin-bottom: 20px;
+}
+.list-group-item {
+  position: relative;
+  display: block;
+  padding: 10px 15px;
+  margin-bottom: -1px;
+  background-color: #fff;
+  border: 1px solid #ddd;
+}
+.list-group-item:first-child {
+  border-top-left-radius: 4px;
+  border-top-right-radius: 4px;
+}
+.list-group-item:last-child {
+  margin-bottom: 0;
+  border-bottom-right-radius: 4px;
+  border-bottom-left-radius: 4px;
+}
+.list-group-item > .badge {
+  float: right;
+}
+.list-group-item > .badge + .badge {
+  margin-right: 5px;
+}
+a.list-group-item {
+  color: #555;
+}
+a.list-group-item .list-group-item-heading {
+  color: #333;
+}
+a.list-group-item:hover,
+a.list-group-item:focus {
+  text-decoration: none;
+  background-color: #f5f5f5;
+}
+a.list-group-item.active,
+a.list-group-item.active:hover,
+a.list-group-item.active:focus {
+  z-index: 2;
+  color: #fff;
+  background-color: #428bca;
+  border-color: #428bca;
+}
+a.list-group-item.active .list-group-item-heading,
+a.list-group-item.active:hover .list-group-item-heading,
+a.list-group-item.active:focus .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item.active .list-group-item-text,
+a.list-group-item.active:hover .list-group-item-text,
+a.list-group-item.active:focus .list-group-item-text {
+  color: #e1edf7;
+}
+.list-group-item-success {
+  color: #3c763d;
+  background-color: #dff0d8;
+}
+a.list-group-item-success {
+  color: #3c763d;
+}
+a.list-group-item-success .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-success:hover,
+a.list-group-item-success:focus {
+  color: #3c763d;
+  background-color: #d0e9c6;
+}
+a.list-group-item-success.active,
+a.list-group-item-success.active:hover,
+a.list-group-item-success.active:focus {
+  color: #fff;
+  background-color: #3c763d;
+  border-color: #3c763d;
+}
+.list-group-item-info {
+  color: #31708f;
+  background-color: #d9edf7;
+}
+a.list-group-item-info {
+  color: #31708f;
+}
+a.list-group-item-info .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-info:hover,
+a.list-group-item-info:focus {
+  color: #31708f;
+  background-color: #c4e3f3;
+}
+a.list-group-item-info.active,
+a.list-group-item-info.active:hover,
+a.list-group-item-info.active:focus {
+  color: #fff;
+  background-color: #31708f;
+  border-color: #31708f;
+}
+.list-group-item-warning {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+}
+a.list-group-item-warning {
+  color: #8a6d3b;
+}
+a.list-group-item-warning .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-warning:hover,
+a.list-group-item-warning:focus {
+  color: #8a6d3b;
+  background-color: #faf2cc;
+}
+a.list-group-item-warning.active,
+a.list-group-item-warning.active:hover,
+a.list-group-item-warning.active:focus {
+  color: #fff;
+  background-color: #8a6d3b;
+  border-color: #8a6d3b;
+}
+.list-group-item-danger {
+  color: #a94442;
+  background-color: #f2dede;
+}
+a.list-group-item-danger {
+  color: #a94442;
+}
+a.list-group-item-danger .list-group-item-heading {
+  color: inherit;
+}
+a.list-group-item-danger:hover,
+a.list-group-item-danger:focus {
+  color: #a94442;
+  background-color: #ebcccc;
+}
+a.list-group-item-danger.active,
+a.list-group-item-danger.active:hover,
+a.list-group-item-danger.active:focus {
+  color: #fff;
+  background-color: #a94442;
+  border-color: #a94442;
+}
+.list-group-item-heading {
+  margin-top: 0;
+  margin-bottom: 5px;
+}
+.list-group-item-text {
+  margin-bottom: 0;
+  line-height: 1.3;
+}
+.panel {
+  margin-bottom: 20px;
+  background-color: #fff;
+  border: 1px solid transparent;
+  border-radius: 4px;
+  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);
+          box-shadow: 0 1px 1px rgba(0, 0, 0, .05);
+}
+.panel-body {
+  padding: 15px;
+}
+.panel > .list-group {
+  margin-bottom: 0;
+}
+.panel > .list-group .list-group-item {
+  border-width: 1px 0;
+  border-radius: 0;
+}
+.panel > .list-group .list-group-item:first-child {
+  border-top: 0;
+}
+.panel > .list-group .list-group-item:last-child {
+  border-bottom: 0;
+}
+.panel > .list-group:first-child .list-group-item:first-child {
+  border-top-left-radius: 3px;
+  border-top-right-radius: 3px;
+}
+.panel > .list-group:last-child .list-group-item:last-child {
+  border-bottom-right-radius: 3px;
+  border-bottom-left-radius: 3px;
+}
+.panel-heading + .list-group .list-group-item:first-child {
+  border-top-width: 0;
+}
+.panel > .table,
+.panel > .table-responsive > .table {
+  margin-bottom: 0;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {
+  border-top-left-radius: 3px;
+}
+.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,
+.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,
+.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,
+.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {
+  border-top-right-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {
+  border-bottom-left-radius: 3px;
+}
+.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,
+.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,
+.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,
+.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {
+  border-bottom-right-radius: 3px;
+}
+.panel > .panel-body + .table,
+.panel > .panel-body + .table-responsive {
+  border-top: 1px solid #ddd;
+}
+.panel > .table > tbody:first-child > tr:first-child th,
+.panel > .table > tbody:first-child > tr:first-child td {
+  border-top: 0;
+}
+.panel > .table-bordered,
+.panel > .table-responsive > .table-bordered {
+  border: 0;
+}
+.panel > .table-bordered > thead > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,
+.panel > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,
+.panel > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,
+.panel > .table-bordered > thead > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,
+.panel > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,
+.panel > .table-bordered > tfoot > tr > td:first-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {
+  border-left: 0;
+}
+.panel > .table-bordered > thead > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,
+.panel > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,
+.panel > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,
+.panel > .table-bordered > thead > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,
+.panel > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,
+.panel > .table-bordered > tfoot > tr > td:last-child,
+.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {
+  border-right: 0;
+}
+.panel > .table-bordered > thead > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,
+.panel > .table-bordered > tbody > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th,
+.panel > .table-bordered > tfoot > tr:first-child > th,
+.panel > .table-responsive > .table-bordered > tfoot > tr:first-child > th,
+.panel > .table-bordered > thead > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,
+.panel > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,
+.panel > .table-bordered > tfoot > tr:first-child > td,
+.panel > .table-responsive > .table-bordered > tfoot > tr:first-child > td {
+  border-top: 0;
+}
+.panel > .table-bordered > thead > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > thead > tr:last-child > th,
+.panel > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,
+.panel > .table-bordered > tfoot > tr:last-child > th,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th,
+.panel > .table-bordered > thead > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > thead > tr:last-child > td,
+.panel > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,
+.panel > .table-bordered > tfoot > tr:last-child > td,
+.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td {
+  border-bottom: 0;
+}
+.panel > .table-responsive {
+  margin-bottom: 0;
+  border: 0;
+}
+.panel-heading {
+  padding: 10px 15px;
+  border-bottom: 1px solid transparent;
+  border-top-left-radius: 3px;
+  border-top-right-radius: 3px;
+}
+.panel-heading > .dropdown .dropdown-toggle {
+  color: inherit;
+}
+.panel-title {
+  margin-top: 0;
+  margin-bottom: 0;
+  font-size: 16px;
+  color: inherit;
+}
+.panel-title > a {
+  color: inherit;
+}
+.panel-footer {
+  padding: 10px 15px;
+  background-color: #f5f5f5;
+  border-top: 1px solid #ddd;
+  border-bottom-right-radius: 3px;
+  border-bottom-left-radius: 3px;
+}
+.panel-group {
+  margin-bottom: 20px;
+}
+.panel-group .panel {
+  margin-bottom: 0;
+  overflow: hidden;
+  border-radius: 4px;
+}
+.panel-group .panel + .panel {
+  margin-top: 5px;
+}
+.panel-group .panel-heading {
+  border-bottom: 0;
+}
+.panel-group .panel-heading + .panel-collapse .panel-body {
+  border-top: 1px solid #ddd;
+}
+.panel-group .panel-footer {
+  border-top: 0;
+}
+.panel-group .panel-footer + .panel-collapse .panel-body {
+  border-bottom: 1px solid #ddd;
+}
+.panel-default {
+  border-color: #ddd;
+}
+.panel-default > .panel-heading {
+  color: #333;
+  background-color: #f5f5f5;
+  border-color: #ddd;
+}
+.panel-default > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #ddd;
+}
+.panel-default > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #ddd;
+}
+.panel-primary {
+  border-color: #428bca;
+}
+.panel-primary > .panel-heading {
+  color: #fff;
+  background-color: #428bca;
+  border-color: #428bca;
+}
+.panel-primary > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #428bca;
+}
+.panel-primary > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #428bca;
+}
+.panel-success {
+  border-color: #d6e9c6;
+}
+.panel-success > .panel-heading {
+  color: #3c763d;
+  background-color: #dff0d8;
+  border-color: #d6e9c6;
+}
+.panel-success > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #d6e9c6;
+}
+.panel-success > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #d6e9c6;
+}
+.panel-info {
+  border-color: #bce8f1;
+}
+.panel-info > .panel-heading {
+  color: #31708f;
+  background-color: #d9edf7;
+  border-color: #bce8f1;
+}
+.panel-info > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #bce8f1;
+}
+.panel-info > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #bce8f1;
+}
+.panel-warning {
+  border-color: #faebcc;
+}
+.panel-warning > .panel-heading {
+  color: #8a6d3b;
+  background-color: #fcf8e3;
+  border-color: #faebcc;
+}
+.panel-warning > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #faebcc;
+}
+.panel-warning > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #faebcc;
+}
+.panel-danger {
+  border-color: #ebccd1;
+}
+.panel-danger > .panel-heading {
+  color: #a94442;
+  background-color: #f2dede;
+  border-color: #ebccd1;
+}
+.panel-danger > .panel-heading + .panel-collapse .panel-body {
+  border-top-color: #ebccd1;
+}
+.panel-danger > .panel-footer + .panel-collapse .panel-body {
+  border-bottom-color: #ebccd1;
+}
+.well {
+  min-height: 20px;
+  padding: 19px;
+  margin-bottom: 20px;
+  background-color: #f5f5f5;
+  border: 1px solid #e3e3e3;
+  border-radius: 4px;
+  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);
+          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);
+}
+.well blockquote {
+  border-color: #ddd;
+  border-color: rgba(0, 0, 0, .15);
+}
+.well-lg {
+  padding: 24px;
+  border-radius: 6px;
+}
+.well-sm {
+  padding: 9px;
+  border-radius: 3px;
+}
+.close {
+  float: right;
+  font-size: 21px;
+  font-weight: bold;
+  line-height: 1;
+  color: #000;
+  text-shadow: 0 1px 0 #fff;
+  filter: alpha(opacity=20);
+  opacity: .2;
+}
+.close:hover,
+.close:focus {
+  color: #000;
+  text-decoration: none;
+  cursor: pointer;
+  filter: alpha(opacity=50);
+  opacity: .5;
+}
+button.close {
+  -webkit-appearance: none;
+  padding: 0;
+  cursor: pointer;
+  background: transparent;
+  border: 0;
+}
+.modal-open {
+  overflow: hidden;
+}
+.modal {
+  position: fixed;
+  top: 0;
+  right: 0;
+  bottom: 0;
+  left: 0;
+  z-index: 1050;
+  display: none;
+  overflow: auto;
+  overflow-y: scroll;
+  -webkit-overflow-scrolling: touch;
+  outline: 0;
+}
+.modal.fade .modal-dialog {
+  -webkit-transition: -webkit-transform .3s ease-out;
+     -moz-transition:    -moz-transform .3s ease-out;
+       -o-transition:      -o-transform .3s ease-out;
+          transition:         transform .3s ease-out;
+  -webkit-transform: translate(0, -25%);
+      -ms-transform: translate(0, -25%);
+          transform: translate(0, -25%);
+}
+.modal.in .modal-dialog {
+  -webkit-transform: translate(0, 0);
+      -ms-transform: translate(0, 0);
+          transform: translate(0, 0);
+}
+.modal-dialog {
+  position: relative;
+  width: auto;
+  margin: 10px;
+}
+.modal-content {
+  position: relative;
+  background-color: #fff;
+  background-clip: padding-box;
+  border: 1px solid #999;
+  border: 1px solid rgba(0, 0, 0, .2);
+  border-radius: 6px;
+  outline: none;
+  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);
+          box-shadow: 0 3px 9px rgba(0, 0, 0, .5);
+}
+.modal-backdrop {
+  position: fixed;
+  top: 0;
+  right: 0;
+  bottom: 0;
+  left: 0;
+  z-index: 1040;
+  background-color: #000;
+}
+.modal-backdrop.fade {
+  filter: alpha(opacity=0);
+  opacity: 0;
+}
+.modal-backdrop.in {
+  filter: alpha(opacity=50);
+  opacity: .5;
+}
+.modal-header {
+  min-height: 16.428571429px;
+  padding: 15px;
+  border-bottom: 1px solid #e5e5e5;
+}
+.modal-header .close {
+  margin-top: -2px;
+}
+.modal-title {
+  margin: 0;
+  line-height: 1.428571429;
+}
+.modal-body {
+  position: relative;
+  padding: 20px;
+}
+.modal-footer {
+  padding: 19px 20px 20px;
+  margin-top: 15px;
+  text-align: right;
+  border-top: 1px solid #e5e5e5;
+}
+.modal-footer .btn + .btn {
+  margin-bottom: 0;
+  margin-left: 5px;
+}
+.modal-footer .btn-group .btn + .btn {
+  margin-left: -1px;
+}
+.modal-footer .btn-block + .btn-block {
+  margin-left: 0;
+}
+@media (min-width: 768px) {
+  .modal-dialog {
+    width: 600px;
+    margin: 30px auto;
+  }
+  .modal-content {
+    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);
+            box-shadow: 0 5px 15px rgba(0, 0, 0, .5);
+  }
+  .modal-sm {
+    width: 300px;
+  }
+  .modal-lg {
+    width: 900px;
+  }
+}
+.tooltip {
+  position: absolute;
+  z-index: 1030;
+  display: block;
+  font-size: 12px;
+  line-height: 1.4;
+  visibility: visible;
+  filter: alpha(opacity=0);
+  opacity: 0;
+}
+.tooltip.in {
+  filter: alpha(opacity=90);
+  opacity: .9;
+}
+.tooltip.top {
+  padding: 5px 0;
+  margin-top: -3px;
+}
+.tooltip.right {
+  padding: 0 5px;
+  margin-left: 3px;
+}
+.tooltip.bottom {
+  padding: 5px 0;
+  margin-top: 3px;
+}
+.tooltip.left {
+  padding: 0 5px;
+  margin-left: -3px;
+}
+.tooltip-inner {
+  max-width: 200px;
+  padding: 3px 8px;
+  color: #fff;
+  text-align: center;
+  text-decoration: none;
+  background-color: #000;
+  border-radius: 4px;
+}
+.tooltip-arrow {
+  position: absolute;
+  width: 0;
+  height: 0;
+  border-color: transparent;
+  border-style: solid;
+}
+.tooltip.top .tooltip-arrow {
+  bottom: 0;
+  left: 50%;
+  margin-left: -5px;
+  border-width: 5px 5px 0;
+  border-top-color: #000;
+}
+.tooltip.top-left .tooltip-arrow {
+  bottom: 0;
+  left: 5px;
+  border-width: 5px 5px 0;
+  border-top-color: #000;
+}
+.tooltip.top-right .tooltip-arrow {
+  right: 5px;
+  bottom: 0;
+  border-width: 5px 5px 0;
+  border-top-color: #000;
+}
+.tooltip.right .tooltip-arrow {
+  top: 50%;
+  left: 0;
+  margin-top: -5px;
+  border-width: 5px 5px 5px 0;
+  border-right-color: #000;
+}
+.tooltip.left .tooltip-arrow {
+  top: 50%;
+  right: 0;
+  margin-top: -5px;
+  border-width: 5px 0 5px 5px;
+  border-left-color: #000;
+}
+.tooltip.bottom .tooltip-arrow {
+  top: 0;
+  left: 50%;
+  margin-left: -5px;
+  border-width: 0 5px 5px;
+  border-bottom-color: #000;
+}
+.tooltip.bottom-left .tooltip-arrow {
+  top: 0;
+  left: 5px;
+  border-width: 0 5px 5px;
+  border-bottom-color: #000;
+}
+.tooltip.bottom-right .tooltip-arrow {
+  top: 0;
+  right: 5px;
+  border-width: 0 5px 5px;
+  border-bottom-color: #000;
+}
+.popover {
+  position: absolute;
+  top: 0;
+  left: 0;
+  z-index: 1010;
+  display: none;
+  max-width: 276px;
+  padding: 1px;
+  text-align: left;
+  white-space: normal;
+  background-color: #fff;
+  background-clip: padding-box;
+  border: 1px solid #ccc;
+  border: 1px solid rgba(0, 0, 0, .2);
+  border-radius: 6px;
+  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
+          box-shadow: 0 5px 10px rgba(0, 0, 0, .2);
+}
+.popover.top {
+  margin-top: -10px;
+}
+.popover.right {
+  margin-left: 10px;
+}
+.popover.bottom {
+  margin-top: 10px;
+}
+.popover.left {
+  margin-left: -10px;
+}
+.popover-title {
+  padding: 8px 14px;
+  margin: 0;
+  font-size: 14px;
+  font-weight: normal;
+  line-height: 18px;
+  background-color: #f7f7f7;
+  border-bottom: 1px solid #ebebeb;
+  border-radius: 5px 5px 0 0;
+}
+.popover-content {
+  padding: 9px 14px;
+}
+.popover .arrow,
+.popover .arrow:after {
+  position: absolute;
+  display: block;
+  width: 0;
+  height: 0;
+  border-color: transparent;
+  border-style: solid;
+}
+.popover .arrow {
+  border-width: 11px;
+}
+.popover .arrow:after {
+  content: "";
+  border-width: 10px;
+}
+.popover.top .arrow {
+  bottom: -11px;
+  left: 50%;
+  margin-left: -11px;
+  border-top-color: #999;
+  border-top-color: rgba(0, 0, 0, .25);
+  border-bottom-width: 0;
+}
+.popover.top .arrow:after {
+  bottom: 1px;
+  margin-left: -10px;
+  content: " ";
+  border-top-color: #fff;
+  border-bottom-width: 0;
+}
+.popover.right .arrow {
+  top: 50%;
+  left: -11px;
+  margin-top: -11px;
+  border-right-color: #999;
+  border-right-color: rgba(0, 0, 0, .25);
+  border-left-width: 0;
+}
+.popover.right .arrow:after {
+  bottom: -10px;
+  left: 1px;
+  content: " ";
+  border-right-color: #fff;
+  border-left-width: 0;
+}
+.popover.bottom .arrow {
+  top: -11px;
+  left: 50%;
+  margin-left: -11px;
+  border-top-width: 0;
+  border-bottom-color: #999;
+  border-bottom-color: rgba(0, 0, 0, .25);
+}
+.popover.bottom .arrow:after {
+  top: 1px;
+  margin-left: -10px;
+  content: " ";
+  border-top-width: 0;
+  border-bottom-color: #fff;
+}
+.popover.left .arrow {
+  top: 50%;
+  right: -11px;
+  margin-top: -11px;
+  border-right-width: 0;
+  border-left-color: #999;
+  border-left-color: rgba(0, 0, 0, .25);
+}
+.popover.left .arrow:after {
+  right: 1px;
+  bottom: -10px;
+  content: " ";
+  border-right-width: 0;
+  border-left-color: #fff;
+}
+.carousel {
+  position: relative;
+}
+.carousel-inner {
+  position: relative;
+  width: 100%;
+  overflow: hidden;
+}
+.carousel-inner > .item {
+  position: relative;
+  display: none;
+  -webkit-transition: .6s ease-in-out left;
+          transition: .6s ease-in-out left;
+}
+.carousel-inner > .item > img,
+.carousel-inner > .item > a > img {
+  display: block;
+  max-width: 100%;
+  height: auto;
+  line-height: 1;
+}
+.carousel-inner > .active,
+.carousel-inner > .next,
+.carousel-inner > .prev {
+  display: block;
+}
+.carousel-inner > .active {
+  left: 0;
+}
+.carousel-inner > .next,
+.carousel-inner > .prev {
+  position: absolute;
+  top: 0;
+  width: 100%;
+}
+.carousel-inner > .next {
+  left: 100%;
+}
+.carousel-inner > .prev {
+  left: -100%;
+}
+.carousel-inner > .next.left,
+.carousel-inner > .prev.right {
+  left: 0;
+}
+.carousel-inner > .active.left {
+  left: -100%;
+}
+.carousel-inner > .active.right {
+  left: 100%;
+}
+.carousel-control {
+  position: absolute;
+  top: 0;
+  bottom: 0;
+  left: 0;
+  width: 15%;
+  font-size: 20px;
+  color: #fff;
+  text-align: center;
+  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);
+  filter: alpha(opacity=50);
+  opacity: .5;
+}
+.carousel-control.left {
+  background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .5) 0%), color-stop(rgba(0, 0, 0, .0001) 100%));
+  background-image:         linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);
+  background-repeat: repeat-x;
+}
+.carousel-control.right {
+  right: 0;
+  left: auto;
+  background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .0001) 0%), color-stop(rgba(0, 0, 0, .5) 100%));
+  background-image:         linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);
+  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);
+  background-repeat: repeat-x;
+}
+.carousel-control:hover,
+.carousel-control:focus {
+  color: #fff;
+  text-decoration: none;
+  filter: alpha(opacity=90);
+  outline: none;
+  opacity: .9;
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-left,
+.carousel-control .glyphicon-chevron-right {
+  position: absolute;
+  top: 50%;
+  z-index: 5;
+  display: inline-block;
+}
+.carousel-control .icon-prev,
+.carousel-control .glyphicon-chevron-left {
+  left: 50%;
+}
+.carousel-control .icon-next,
+.carousel-control .glyphicon-chevron-right {
+  right: 50%;
+}
+.carousel-control .icon-prev,
+.carousel-control .icon-next {
+  width: 20px;
+  height: 20px;
+  margin-top: -10px;
+  margin-left: -10px;
+  font-family: serif;
+}
+.carousel-control .icon-prev:before {
+  content: '\2039';
+}
+.carousel-control .icon-next:before {
+  content: '\203a';
+}
+.carousel-indicators {
+  position: absolute;
+  bottom: 10px;
+  left: 50%;
+  z-index: 15;
+  width: 60%;
+  padding-left: 0;
+  margin-left: -30%;
+  text-align: center;
+  list-style: none;
+}
+.carousel-indicators li {
+  display: inline-block;
+  width: 10px;
+  height: 10px;
+  margin: 1px;
+  text-indent: -999px;
+  cursor: pointer;
+  background-color: #000 \9;
+  background-color: rgba(0, 0, 0, 0);
+  border: 1px solid #fff;
+  border-radius: 10px;
+}
+.carousel-indicators .active {
+  width: 12px;
+  height: 12px;
+  margin: 0;
+  background-color: #fff;
+}
+.carousel-caption {
+  position: absolute;
+  right: 15%;
+  bottom: 20px;
+  left: 15%;
+  z-index: 10;
+  padding-top: 20px;
+  padding-bottom: 20px;
+  color: #fff;
+  text-align: center;
+  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);
+}
+.carousel-caption .btn {
+  text-shadow: none;
+}
+@media screen and (min-width: 768px) {
+  .carousel-control .glyphicons-chevron-left,
+  .carousel-control .glyphicons-chevron-right,
+  .carousel-control .icon-prev,
+  .carousel-control .icon-next {
+    width: 30px;
+    height: 30px;
+    margin-top: -15px;
+    margin-left: -15px;
+    font-size: 30px;
+  }
+  .carousel-caption {
+    right: 20%;
+    left: 20%;
+    padding-bottom: 30px;
+  }
+  .carousel-indicators {
+    bottom: 20px;
+  }
+}
+.clearfix:before,
+.clearfix:after,
+.container:before,
+.container:after,
+.container-fluid:before,
+.container-fluid:after,
+.row:before,
+.row:after,
+.form-horizontal .form-group:before,
+.form-horizontal .form-group:after,
+.btn-toolbar:before,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:before,
+.btn-group-vertical > .btn-group:after,
+.nav:before,
+.nav:after,
+.navbar:before,
+.navbar:after,
+.navbar-header:before,
+.navbar-header:after,
+.navbar-collapse:before,
+.navbar-collapse:after,
+.pager:before,
+.pager:after,
+.panel-body:before,
+.panel-body:after,
+.modal-footer:before,
+.modal-footer:after {
+  display: table;
+  content: " ";
+}
+.clearfix:after,
+.container:after,
+.container-fluid:after,
+.row:after,
+.form-horizontal .form-group:after,
+.btn-toolbar:after,
+.btn-group-vertical > .btn-group:after,
+.nav:after,
+.navbar:after,
+.navbar-header:after,
+.navbar-collapse:after,
+.pager:after,
+.panel-body:after,
+.modal-footer:after {
+  clear: both;
+}
+.center-block {
+  display: block;
+  margin-right: auto;
+  margin-left: auto;
+}
+.pull-right {
+  float: right !important;
+}
+.pull-left {
+  float: left !important;
+}
+.hide {
+  display: none !important;
+}
+.show {
+  display: block !important;
+}
+.invisible {
+  visibility: hidden;
+}
+.text-hide {
+  font: 0/0 a;
+  color: transparent;
+  text-shadow: none;
+  background-color: transparent;
+  border: 0;
+}
+.hidden {
+  display: none !important;
+  visibility: hidden !important;
+}
+.affix {
+  position: fixed;
+}
+@-ms-viewport {
+  width: device-width;
+}
+.visible-xs,
+tr.visible-xs,
+th.visible-xs,
+td.visible-xs {
+  display: none !important;
+}
+@media (max-width: 767px) {
+  .visible-xs {
+    display: block !important;
+  }
+  table.visible-xs {
+    display: table;
+  }
+  tr.visible-xs {
+    display: table-row !important;
+  }
+  th.visible-xs,
+  td.visible-xs {
+    display: table-cell !important;
+  }
+}
+.visible-sm,
+tr.visible-sm,
+th.visible-sm,
+td.visible-sm {
+  display: none !important;
+}
+@media (min-width: 768px) and (max-width: 991px) {
+  .visible-sm {
+    display: block !important;
+  }
+  table.visible-sm {
+    display: table;
+  }
+  tr.visible-sm {
+    display: table-row !important;
+  }
+  th.visible-sm,
+  td.visible-sm {
+    display: table-cell !important;
+  }
+}
+.visible-md,
+tr.visible-md,
+th.visible-md,
+td.visible-md {
+  display: none !important;
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+  .visible-md {
+    display: block !important;
+  }
+  table.visible-md {
+    display: table;
+  }
+  tr.visible-md {
+    display: table-row !important;
+  }
+  th.visible-md,
+  td.visible-md {
+    display: table-cell !important;
+  }
+}
+.visible-lg,
+tr.visible-lg,
+th.visible-lg,
+td.visible-lg {
+  display: none !important;
+}
+@media (min-width: 1200px) {
+  .visible-lg {
+    display: block !important;
+  }
+  table.visible-lg {
+    display: table;
+  }
+  tr.visible-lg {
+    display: table-row !important;
+  }
+  th.visible-lg,
+  td.visible-lg {
+    display: table-cell !important;
+  }
+}
+@media (max-width: 767px) {
+  .hidden-xs,
+  tr.hidden-xs,
+  th.hidden-xs,
+  td.hidden-xs {
+    display: none !important;
+  }
+}
+@media (min-width: 768px) and (max-width: 991px) {
+  .hidden-sm,
+  tr.hidden-sm,
+  th.hidden-sm,
+  td.hidden-sm {
+    display: none !important;
+  }
+}
+@media (min-width: 992px) and (max-width: 1199px) {
+  .hidden-md,
+  tr.hidden-md,
+  th.hidden-md,
+  td.hidden-md {
+    display: none !important;
+  }
+}
+@media (min-width: 1200px) {
+  .hidden-lg,
+  tr.hidden-lg,
+  th.hidden-lg,
+  td.hidden-lg {
+    display: none !important;
+  }
+}
+.visible-print,
+tr.visible-print,
+th.visible-print,
+td.visible-print {
+  display: none !important;
+}
+@media print {
+  .visible-print {
+    display: block !important;
+  }
+  table.visible-print {
+    display: table;
+  }
+  tr.visible-print {
+    display: table-row !important;
+  }
+  th.visible-print,
+  td.visible-print {
+    display: table-cell !important;
+  }
+}
+@media print {
+  .hidden-print,
+  tr.hidden-print,
+  th.hidden-print,
+  td.hidden-print {
+    display: none !important;
+  }
+}
+/*# sourceMappingURL=bootstrap.css.map */
diff --git a/doc/css/bootstrap.css.map b/doc/css/bootstrap.css.map
new file mode 100644 (file)
index 0000000..e1836ba
--- /dev/null
@@ -0,0 +1 @@
+{"version":3,"sources":["less/normalize.less","less/print.less","less/scaffolding.less","less/mixins.less","less/variables.less","less/type.less","less/code.less","less/grid.less","less/tables.less","less/forms.less","less/buttons.less","less/component-animations.less","less/glyphicons.less","less/dropdowns.less","less/button-groups.less","less/input-groups.less","less/navs.less","less/navbar.less","less/utilities.less","less/breadcrumbs.less","less/pagination.less","less/pager.less","less/labels.less","less/badges.less","less/jumbotron.less","less/thumbnails.less","less/alerts.less","less/progress-bars.less","less/media.less","less/list-group.less","less/panels.less","less/wells.less","less/close.less","less/modals.less","less/tooltip.less","less/popovers.less","less/carousel.less","less/responsive-utilities.less"],"names":[],"mappings":";AAQA;EACE,uBAAA;EACA,0BAAA;EACA,8BAAA;;AAOF;EACE,SAAA;;AAUF;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;EACE,cAAA;;AAQF;AACA;AACA;AACA;EACE,qBAAA;EACA,wBAAA;;AAQF,KAAK,IAAI;EACP,aAAA;EACA,SAAA;;AAQF;AACA;EACE,aAAA;;AAUF;EACE,uBAAA;;AAOF,CAAC;AACD,CAAC;EACC,UAAA;;AAUF,IAAI;EACF,yBAAA;;AAOF;AACA;EACE,iBAAA;;AAOF;EACE,kBAAA;;AAQF;EACE,cAAA;EACA,gBAAA;;AAOF;EACE,gBAAA;EACA,WAAA;;AAOF;EACE,cAAA;;AAOF;AACA;EACE,cAAA;EACA,cAAA;EACA,kBAAA;EACA,wBAAA;;AAGF;EACE,WAAA;;AAGF;EACE,eAAA;;AAUF;EACE,SAAA;;AAOF,GAAG,IAAI;EACL,gBAAA;;AAUF;EACE,gBAAA;;AAOF;EACE,4BAAA;EACA,uBAAA;EACA,SAAA;;AAOF;EACE,cAAA;;AAOF;AACA;AACA;AACA;EACE,iCAAA;EACA,cAAA;;AAkBF;AACA;AACA;AACA;AACA;EACE,cAAA;EACA,aAAA;EACA,SAAA;;AAOF;EACE,iBAAA;;AAUF;AACA;EACE,oBAAA;;AAWF;AACA,IAAK,MAAK;AACV,KAAK;AACL,KAAK;EACH,0BAAA;EACA,eAAA;;AAOF,MAAM;AACN,IAAK,MAAK;EACR,eAAA;;AAOF,MAAM;AACN,KAAK;EACH,SAAA;EACA,UAAA;;AAQF;EACE,mBAAA;;AAWF,KAAK;AACL,KAAK;EACH,sBAAA;EACA,UAAA;;AASF,KAAK,eAAe;AACpB,KAAK,eAAe;EAClB,YAAA;;AASF,KAAK;EACH,6BAAA;EACA,4BAAA;EACA,+BAAA;EACA,uBAAA;;AASF,KAAK,eAAe;AACpB,KAAK,eAAe;EAClB,wBAAA;;AAOF;EACE,yBAAA;EACA,aAAA;EACA,8BAAA;;AAQF;EACE,SAAA;EACA,UAAA;;AAOF;EACE,cAAA;;AAQF;EACE,iBAAA;;AAUF;EACE,yBAAA;EACA,iBAAA;;AAGF;AACA;EACE,UAAA;;AChUF;EA9FE;IACE,4BAAA;IACA,sBAAA;IACA,kCAAA;IACA,2BAAA;;EAGF;EACA,CAAC;IACC,0BAAA;;EAGF,CAAC,MAAM;IACL,SAAS,KAAK,WAAW,GAAzB;;EAGF,IAAI,OAAO;IACT,SAAS,KAAK,YAAY,GAA1B;;EAIF,CAAC,qBAAqB;EACtB,CAAC,WAAW;IACV,SAAS,EAAT;;EAGF;EACA;IACE,sBAAA;IACA,wBAAA;;EAGF;IACE,2BAAA;;EAGF;EACA;IACE,wBAAA;;EAGF;IACE,0BAAA;;EAGF;EACA;EACA;IACE,UAAA;IACA,SAAA;;EAGF;EACA;IACE,uBAAA;;EAKF;IACE,2BAAA;;EAIF;IACE,aAAA;;EAEF,MACE;EADF,MAEE;IACE,iCAAA;;EAGJ,IAEE;EADF,OAAQ,OACN;IACE,iCAAA;;EAGJ;IACE,sBAAA;;EAGF;IACE,oCAAA;;EAEF,eACE;EADF,eAEE;IACE,iCAAA;;;ACtFN;EC0OE,8BAAA;EACG,2BAAA;EACK,sBAAA;;ADzOV,CAAC;AACD,CAAC;ECsOC,8BAAA;EACG,2BAAA;EACK,sBAAA;;ADjOV;EACE,gBAAA;EACA,6CAAA;;AAGF;EACE,aEcwB,8CFdxB;EACA,eAAA;EACA,wBAAA;EACA,cAAA;EACA,yBAAA;;AAIF;AACA;AACA;AACA;EACE,oBAAA;EACA,kBAAA;EACA,oBAAA;;AAMF;EACE,cAAA;EACA,qBAAA;;AAEA,CAAC;AACD,CAAC;EACC,cAAA;EACA,0BAAA;;AAGF,CAAC;ECzBD,oBAAA;EAEA,0CAAA;EACA,oBAAA;;ADiCF;EACE,SAAA;;AAMF;EACE,sBAAA;;AAIF;ECiTE,cAAA;EACA,eAAA;EACA,YAAA;;AD9SF;EACE,kBAAA;;AAMF;EACE,YAAA;EACA,wBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;EC+BA,wCAAA;EACQ,gCAAA;EAgQR,qBAAA;EACA,eAAA;EACA,YAAA;;AD1RF;EACE,kBAAA;;AAMF;EACE,gBAAA;EACA,mBAAA;EACA,SAAA;EACA,6BAAA;;AAQF;EACE,kBAAA;EACA,UAAA;EACA,WAAA;EACA,YAAA;EACA,UAAA;EACA,gBAAA;EACA,MAAM,gBAAN;EACA,SAAA;;AG5HF;AAAI;AAAI;AAAI;AAAI;AAAI;AACpB;AAAK;AAAK;AAAK;AAAK;AAAK;EACvB,oBAAA;EACA,gBAAA;EACA,gBAAA;EACA,cAAA;;AALF,EAOE;AAPE,EAOF;AAPM,EAON;AAPU,EAOV;AAPc,EAOd;AAPkB,EAOlB;AANF,GAME;AANG,GAMH;AANQ,GAMR;AANa,GAMb;AANkB,GAMlB;AANuB,GAMvB;AAPF,EAQE;AARE,EAQF;AARM,EAQN;AARU,EAQV;AARc,EAQd;AARkB,EAQlB;AAPF,GAOE;AAPG,GAOH;AAPQ,GAOR;AAPa,GAOb;AAPkB,GAOlB;AAPuB,GAOvB;EACE,mBAAA;EACA,cAAA;EACA,cAAA;;AAIJ;AAAI;AACJ;AAAI;AACJ;AAAI;EACF,gBAAA;EACA,mBAAA;;AAJF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;AAJF,EAIE;AAJE,GAIF;AANF,EAOE;AAPE,GAOF;AANF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;EACE,cAAA;;AAGJ;AAAI;AACJ;AAAI;AACJ;AAAI;EACF,gBAAA;EACA,mBAAA;;AAJF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;AAJF,EAIE;AAJE,GAIF;AANF,EAOE;AAPE,GAOF;AANF,EAME;AANE,GAMF;AALF,EAKE;AALE,GAKF;EACE,cAAA;;AAIJ;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AACV;AAAI;EAAM,eAAA;;AAMV;EACE,gBAAA;;AAGF;EACE,mBAAA;EACA,eAAA;EACA,gBAAA;EACA,gBAAA;;AAKF,QAHqC;EAGrC;IAFI,eAAA;;;AASJ;AACA;EAAU,cAAA;;AAGV;EAAU,kBAAA;;AAGV;EAAuB,gBAAA;;AACvB;EAAuB,iBAAA;;AACvB;EAAuB,kBAAA;;AACvB;EAAuB,mBAAA;;AAGvB;EACE,cAAA;;AAEF;EFsfE,cAAA;;AACA,CAAC,aAAC;EACA,cAAA;;AErfJ;EFmfE,cAAA;;AACA,CAAC,aAAC;EACA,cAAA;;AElfJ;EFgfE,cAAA;;AACA,CAAC,UAAC;EACA,cAAA;;AE/eJ;EF6eE,cAAA;;AACA,CAAC,aAAC;EACA,cAAA;;AE5eJ;EF0eE,cAAA;;AACA,CAAC,YAAC;EACA,cAAA;;AEreJ;EAGE,WAAA;EFudA,yBAAA;;AACA,CAAC,WAAC;EACA,yBAAA;;AEtdJ;EFodE,yBAAA;;AACA,CAAC,WAAC;EACA,yBAAA;;AEndJ;EFidE,yBAAA;;AACA,CAAC,QAAC;EACA,yBAAA;;AEhdJ;EF8cE,yBAAA;;AACA,CAAC,WAAC;EACA,yBAAA;;AE7cJ;EF2cE,yBAAA;;AACA,CAAC,UAAC;EACA,yBAAA;;AErcJ;EACE,mBAAA;EACA,mBAAA;EACA,gCAAA;;AAQF;AACA;EACE,aAAA;EACA,mBAAA;;AAHF,EAIE;AAHF,EAGE;AAJF,EAKE;AAJF,EAIE;EACE,gBAAA;;AAOJ;EACE,eAAA;EACA,gBAAA;;AAIF;EALE,eAAA;EACA,gBAAA;;AAIF,YAGE;EACE,qBAAA;EACA,iBAAA;EACA,kBAAA;;AAEA,YALF,KAKG;EACC,eAAA;;AAMN;EACE,aAAA;EACA,mBAAA;;AAEF;AACA;EACE,wBAAA;;AAEF;EACE,iBAAA;;AAEF;EACE,cAAA;;AAwBF,QAhB2C;EACzC,cACE;IACE,WAAA;IACA,YAAA;IACA,WAAA;IACA,iBAAA;IF5IJ,gBAAA;IACA,uBAAA;IACA,mBAAA;;EEqIA,cAQE;IACE,kBAAA;;;AAUN,IAAI;AAEJ,IAAI;EACF,YAAA;EACA,iCAAA;;AAEF;EACE,cAAA;EACA,yBAAA;;AAIF;EACE,kBAAA;EACA,gBAAA;EACA,iBAAA;EACA,8BAAA;;AAKE,UAHF,EAGG;AAAD,UAFF,GAEG;AAAD,UADF,GACG;EACC,gBAAA;;AAVN,UAgBE;AAhBF,UAiBE;AAjBF,UAkBE;EACE,cAAA;EACA,cAAA;EACA,wBAAA;EACA,cAAA;;AAEA,UARF,OAQG;AAAD,UAPF,MAOG;AAAD,UANF,OAMG;EACC,SAAS,aAAT;;AAQN;AACA,UAAU;EACR,mBAAA;EACA,eAAA;EACA,+BAAA;EACA,cAAA;EACA,iBAAA;;AAME,mBAHF,OAGG;AAAD,UAXM,WAQR,OAGG;AAAD,mBAFF,MAEG;AAAD,UAXM,WASR,MAEG;AAAD,mBADF,OACG;AAAD,UAXM,WAUR,OACG;EAAU,SAAS,EAAT;;AACX,mBAJF,OAIG;AAAD,UAZM,WAQR,OAIG;AAAD,mBAHF,MAGG;AAAD,UAZM,WASR,MAGG;AAAD,mBAFF,OAEG;AAAD,UAZM,WAUR,OAEG;EACC,SAAS,aAAT;;AAMN,UAAU;AACV,UAAU;EACR,SAAS,EAAT;;AAIF;EACE,mBAAA;EACA,kBAAA;EACA,wBAAA;;AChSF;AACA;AACA;AACA;EACE,sCFkCiD,wBElCjD;;AAIF;EACE,gBAAA;EACA,cAAA;EACA,cAAA;EACA,yBAAA;EACA,mBAAA;EACA,kBAAA;;AAIF;EACE,gBAAA;EACA,cAAA;EACA,cAAA;EACA,yBAAA;EACA,kBAAA;EACA,8CAAA;;AAIF;EACE,cAAA;EACA,cAAA;EACA,gBAAA;EACA,eAAA;EACA,wBAAA;EACA,qBAAA;EACA,qBAAA;EACA,cAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;;AAXF,GAcE;EACE,UAAA;EACA,kBAAA;EACA,cAAA;EACA,qBAAA;EACA,6BAAA;EACA,gBAAA;;AAKJ;EACE,iBAAA;EACA,kBAAA;;ACpDF;EJ0nBE,kBAAA;EACA,iBAAA;EACA,kBAAA;EACA,mBAAA;;AIvnBA,QAHmC;EAGnC;IAFE,YAAA;;;AAKF,QAHmC;EAGnC;IAFE,YAAA;;;AAKJ,QAHqC;EAGrC;IAFI,aAAA;;;AAUJ;EJsmBE,kBAAA;EACA,iBAAA;EACA,kBAAA;EACA,mBAAA;;AIhmBF;EJsmBE,kBAAA;EACA,mBAAA;;AAqIE;EACE,kBAAA;EAEA,eAAA;EAEA,kBAAA;EACA,mBAAA;;AAgBF;EACE,WAAA;;AAOJ,KAAK,EAAQ,CAAC;EACZ,WAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,UAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,0BAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,UAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,UAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,0BAAA;;AADF,KAAK,EAAQ,CAAC;EACZ,yBAAA;;AASF,KAAK,EAAQ,MAAM;EACjB,WAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,0BAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,0BAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AANF,KAAK,EAAQ,MAAM;EACjB,UAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,SAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,yBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,wBAAA;;AADF,KAAK,EAAQ,MAAM;EACjB,QAAA;;AASF,KAAK,EAAQ,QAAQ;EACnB,iBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gCAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gBAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,gCAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,+BAAA;;AADF,KAAK,EAAQ,QAAQ;EACnB,eAAA;;AIpvBJ,QATmC;EJquB/B;IACE,WAAA;;EAOJ,KAAK,EAAQ,CAAC;IACZ,WAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EASF,KAAK,EAAQ,MAAM;IACjB,WAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EANF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,QAAA;;EASF,KAAK,EAAQ,QAAQ;IACnB,iBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,eAAA;;;AIvuBJ,QATmC;EJwtB/B;IACE,WAAA;;EAOJ,KAAK,EAAQ,CAAC;IACZ,WAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EASF,KAAK,EAAQ,MAAM;IACjB,WAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EANF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,QAAA;;EASF,KAAK,EAAQ,QAAQ;IACnB,iBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,eAAA;;;AI5tBJ,QAPmC;EJ2sB/B;IACE,WAAA;;EAOJ,KAAK,EAAQ,CAAC;IACZ,WAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,UAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,0BAAA;;EADF,KAAK,EAAQ,CAAC;IACZ,yBAAA;;EASF,KAAK,EAAQ,MAAM;IACjB,WAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,0BAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EANF,KAAK,EAAQ,MAAM;IACjB,UAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,SAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,yBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,wBAAA;;EADF,KAAK,EAAQ,MAAM;IACjB,QAAA;;EASF,KAAK,EAAQ,QAAQ;IACnB,iBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gBAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,gCAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,+BAAA;;EADF,KAAK,EAAQ,QAAQ;IACnB,eAAA;;;AK3zBJ;EACE,eAAA;EACA,6BAAA;;AAEF;EACE,gBAAA;;AAMF;EACE,WAAA;EACA,mBAAA;;AAFF,MAIE,QAGE,KACE;AARN,MAKE,QAEE,KACE;AARN,MAME,QACE,KACE;AARN,MAIE,QAGE,KAEE;AATN,MAKE,QAEE,KAEE;AATN,MAME,QACE,KAEE;EACE,YAAA;EACA,wBAAA;EACA,mBAAA;EACA,6BAAA;;AAbR,MAkBE,QAAQ,KAAK;EACX,sBAAA;EACA,gCAAA;;AApBJ,MAuBE,UAAU,QAGR,KAAI,YACF;AA3BN,MAwBE,WAAW,QAET,KAAI,YACF;AA3BN,MAyBE,QAAO,YACL,KAAI,YACF;AA3BN,MAuBE,UAAU,QAGR,KAAI,YAEF;AA5BN,MAwBE,WAAW,QAET,KAAI,YAEF;AA5BN,MAyBE,QAAO,YACL,KAAI,YAEF;EACE,aAAA;;AA7BR,MAkCE,QAAQ;EACN,6BAAA;;AAnCJ,MAuCE;EACE,yBAAA;;AAOJ,gBACE,QAGE,KACE;AALN,gBAEE,QAEE,KACE;AALN,gBAGE,QACE,KACE;AALN,gBACE,QAGE,KAEE;AANN,gBAEE,QAEE,KAEE;AANN,gBAGE,QACE,KAEE;EACE,YAAA;;AAWR;EACE,yBAAA;;AADF,eAEE,QAGE,KACE;AANN,eAGE,QAEE,KACE;AANN,eAIE,QACE,KACE;AANN,eAEE,QAGE,KAEE;AAPN,eAGE,QAEE,KAEE;AAPN,eAIE,QACE,KAEE;EACE,yBAAA;;AARR,eAYE,QAAQ,KACN;AAbJ,eAYE,QAAQ,KAEN;EACE,wBAAA;;AAUN,cACE,QAAQ,KAAI,UAAU,KACpB;AAFJ,cACE,QAAQ,KAAI,UAAU,KAEpB;EACE,yBAAA;;AAUN,YACE,QAAQ,KAAI,MACV;AAFJ,YACE,QAAQ,KAAI,MAEV;EACE,yBAAA;;AAUN,KAAM,IAAG;EACP,gBAAA;EACA,WAAA;EACA,qBAAA;;AAKE,KAFF,GAEG;AAAD,KADF,GACG;EACC,gBAAA;EACA,WAAA;EACA,mBAAA;;AL4SJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,MAAS;AACX,MANK,QAAQ,KAMZ,CAAC,MAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,MAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,MAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,MAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,MAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,OAAS;AACX,MANK,QAAQ,KAMZ,CAAC,OAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,OAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,OAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,OAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,OAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,IAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,IAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,IAAS;AACX,MANK,QAAQ,KAMZ,CAAC,IAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,IAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,IAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,IAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,IAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,IAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,IAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,OAAS;AACX,MANK,QAAQ,KAMZ,CAAC,OAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,OAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,OAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,OAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,OAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,OAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,OAAQ,MAAO;EACf,yBAAA;;AAlBJ,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AADP,MAAO,QAAQ,KACb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAIb,KAAI,CAAC;AAHP,MAAO,QAAQ,KAGb,KAAI,CAAC;AAFP,MAAO,QAAQ,KAEb,KAAI,CAAC;AACL,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;AAAX,MAHK,QAAQ,KAGZ,CAAC,MAAS;AACX,MANK,QAAQ,KAMZ,CAAC,MAAS;AAAX,MALK,QAAQ,KAKZ,CAAC,MAAS;AAAX,MAJK,QAAQ,KAIZ,CAAC,MAAS;EACT,yBAAA;;AAMJ,YAAa,QAAQ,KACnB,KAAI,CAAC,MAAQ;AADf,YAAa,QAAQ,KAEnB,KAAI,CAAC,MAAQ;AACb,YAHW,QAAQ,KAGlB,CAAC,MAAQ,MAAO;AACjB,YAJW,QAAQ,KAIlB,CAAC,MAAQ,MAAO;EACf,yBAAA;;AKtON,QA/DmC;EACjC;IACE,WAAA;IACA,mBAAA;IACA,kBAAA;IACA,kBAAA;IACA,4CAAA;IACA,yBAAA;IACA,iCAAA;;EAPF,iBAUE;IACE,gBAAA;;EAXJ,iBAUE,SAIE,QAGE,KACE;EAlBR,iBAUE,SAKE,QAEE,KACE;EAlBR,iBAUE,SAME,QACE,KACE;EAlBR,iBAUE,SAIE,QAGE,KAEE;EAnBR,iBAUE,SAKE,QAEE,KAEE;EAnBR,iBAUE,SAME,QACE,KAEE;IACE,mBAAA;;EApBV,iBA2BE;IACE,SAAA;;EA5BJ,iBA2BE,kBAIE,QAGE,KACE,KAAI;EAnCZ,iBA2BE,kBAKE,QAEE,KACE,KAAI;EAnCZ,iBA2BE,kBAME,QACE,KACE,KAAI;EAnCZ,iBA2BE,kBAIE,QAGE,KAEE,KAAI;EApCZ,iBA2BE,kBAKE,QAEE,KAEE,KAAI;EApCZ,iBA2BE,kBAME,QACE,KAEE,KAAI;IACF,cAAA;;EArCV,iBA2BE,kBAIE,QAGE,KAKE,KAAI;EAvCZ,iBA2BE,kBAKE,QAEE,KAKE,KAAI;EAvCZ,iBA2BE,kBAME,QACE,KAKE,KAAI;EAvCZ,iBA2BE,kBAIE,QAGE,KAME,KAAI;EAxCZ,iBA2BE,kBAKE,QAEE,KAME,KAAI;EAxCZ,iBA2BE,kBAME,QACE,KAME,KAAI;IACF,eAAA;;EAzCV,iBA2BE,kBAsBE,QAEE,KAAI,WACF;EApDR,iBA2BE,kBAuBE,QACE,KAAI,WACF;EApDR,iBA2BE,kBAsBE,QAEE,KAAI,WAEF;EArDR,iBA2BE,kBAuBE,QACE,KAAI,WAEF;IACE,gBAAA;;;ACxNZ;EACE,UAAA;EACA,SAAA;EACA,SAAA;EAIA,YAAA;;AAGF;EACE,cAAA;EACA,WAAA;EACA,UAAA;EACA,mBAAA;EACA,eAAA;EACA,oBAAA;EACA,cAAA;EACA,SAAA;EACA,gCAAA;;AAGF;EACE,qBAAA;EACA,kBAAA;EACA,iBAAA;;AAWF,KAAK;ENuMH,8BAAA;EACG,2BAAA;EACK,sBAAA;;AMpMV,KAAK;AACL,KAAK;EACH,eAAA;EACA,kBAAA;;EACA,mBAAA;;AAIF,KAAK;EACH,cAAA;;AAIF,KAAK;EACH,cAAA;EACA,WAAA;;AAIF,MAAM;AACN,MAAM;EACJ,YAAA;;AAIF,KAAK,aAAa;AAClB,KAAK,cAAc;AACnB,KAAK,iBAAiB;EN7CpB,oBAAA;EAEA,0CAAA;EACA,oBAAA;;AM+CF;EACE,cAAA;EACA,gBAAA;EACA,eAAA;EACA,wBAAA;EACA,cAAA;;AA0BF;EACE,cAAA;EACA,WAAA;EACA,YAAA;EACA,iBAAA;EACA,eAAA;EACA,wBAAA;EACA,cAAA;EACA,yBAAA;EACA,sBAAA;EACA,yBAAA;EACA,kBAAA;ENFA,wDAAA;EACQ,gDAAA;EAKR,8EAAA;EACQ,sEAAA;;AA+vBR,aAAC;EACC,qBAAA;EACA,UAAA;EAxwBF,sFAAA;EACQ,8EAAA;;AAnER,aAAC;EAA+B,cAAA;;AAChC,aAAC;EAA+B,cAAA;EACA,UAAA;;AAChC,aAAC;EAA+B,cAAA;;AAChC,aAAC;EAA+B,cAAA;;AM8EhC,aAAC;AACD,aAAC;AACD,QAAQ,UAAW;EACjB,mBAAA;EACA,yBAAA;EACA,UAAA;;AAIF,QAAQ;EACN,YAAA;;AAQJ,KAAK;EACH,iBAAA;;AASF;EACE,mBAAA;;AAQF;AACA;EACE,cAAA;EACA,gBAAA;EACA,gBAAA;EACA,mBAAA;EACA,kBAAA;;AANF,MAOE;AANF,SAME;EACE,eAAA;EACA,mBAAA;EACA,eAAA;;AAGJ,MAAO,MAAK;AACZ,aAAc,MAAK;AACnB,SAAU,MAAK;AACf,gBAAiB,MAAK;EACpB,WAAA;EACA,kBAAA;;AAEF,MAAO;AACP,SAAU;EACR,gBAAA;;AAIF;AACA;EACE,qBAAA;EACA,kBAAA;EACA,gBAAA;EACA,sBAAA;EACA,mBAAA;EACA,eAAA;;AAEF,aAAc;AACd,gBAAiB;EACf,aAAA;EACA,iBAAA;;AAYA,KANG,cAMF;AAAD,KALG,iBAKF;AAAD,MAAC;AAAD,aAAC;AAAD,SAAC;AAAD,gBAAC;AACD,QAAQ,UAAW,MAPhB;AAOH,QAAQ,UAAW,MANhB;AAMH,QAAQ,UAAW;AAAnB,QAAQ,UAAW;AAAnB,QAAQ,UAAW;AAAnB,QAAQ,UAAW;EACjB,mBAAA;;AAUJ;ENiqBE,YAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AAEA,MAAM;EACJ,YAAA;EACA,iBAAA;;AAGF,QAAQ;AACR,MAAM,UAAU;EACd,YAAA;;AM1qBJ;EN6pBE,YAAA;EACA,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AAEA,MAAM;EACJ,YAAA;EACA,iBAAA;;AAGF,QAAQ;AACR,MAAM,UAAU;EACd,YAAA;;AMjqBJ;EAEE,kBAAA;;AAFF,aAKE;EACE,qBAAA;;AANJ,aAUE;EACE,kBAAA;EACA,SAAA;EACA,QAAA;EACA,cAAA;EACA,WAAA;EACA,YAAA;EACA,iBAAA;EACA,kBAAA;;AAKJ,YNkkBE;AMlkBF,YNmkBE;AMnkBF,YNokBE;AMpkBF,YNqkBE;AMrkBF,YNskBE;AMtkBF,YNukBE;EACE,cAAA;;AMxkBJ,YN2kBE;EACE,qBAAA;EAnuBF,wDAAA;EACQ,gDAAA;;AAouBN,YAHF,cAGG;EACC,qBAAA;EAtuBJ,yEAAA;EACQ,iEAAA;;AMsJV,YNqlBE;EACE,cAAA;EACA,qBAAA;EACA,yBAAA;;AMxlBJ,YN2lBE;EACE,cAAA;;AMzlBJ,YN+jBE;AM/jBF,YNgkBE;AMhkBF,YNikBE;AMjkBF,YNkkBE;AMlkBF,YNmkBE;AMnkBF,YNokBE;EACE,cAAA;;AMrkBJ,YNwkBE;EACE,qBAAA;EAnuBF,wDAAA;EACQ,gDAAA;;AAouBN,YAHF,cAGG;EACC,qBAAA;EAtuBJ,yEAAA;EACQ,iEAAA;;AMyJV,YNklBE;EACE,cAAA;EACA,qBAAA;EACA,yBAAA;;AMrlBJ,YNwlBE;EACE,cAAA;;AMtlBJ,UN4jBE;AM5jBF,UN6jBE;AM7jBF,UN8jBE;AM9jBF,UN+jBE;AM/jBF,UNgkBE;AMhkBF,UNikBE;EACE,cAAA;;AMlkBJ,UNqkBE;EACE,qBAAA;EAnuBF,wDAAA;EACQ,gDAAA;;AAouBN,UAHF,cAGG;EACC,qBAAA;EAtuBJ,yEAAA;EACQ,iEAAA;;AM4JV,UN+kBE;EACE,cAAA;EACA,qBAAA;EACA,yBAAA;;AMllBJ,UNqlBE;EACE,cAAA;;AM5kBJ;EACE,gBAAA;;AASF;EACE,cAAA;EACA,eAAA;EACA,mBAAA;EACA,cAAA;;AAgEF,QA7CqC;EA6CrC,YA3CI;IACE,qBAAA;IACA,gBAAA;IACA,sBAAA;;EAwCN,YApCI;IACE,qBAAA;IACA,WAAA;IACA,sBAAA;;EAiCN,YA9BI;IACE,gBAAA;IACA,sBAAA;;EA4BN,YAtBI;EAsBJ,YArBI;IACE,qBAAA;IACA,aAAA;IACA,gBAAA;IACA,eAAA;IACA,sBAAA;;EAgBN,YAdI,OAAO,MAAK;EAchB,YAbI,UAAU,MAAK;IACb,WAAA;IACA,cAAA;;EAWN,YAJI,cAAc;IACZ,MAAA;;;AAWN,gBAGE;AAHF,gBAIE;AAJF,gBAKE;AALF,gBAME;AANF,gBAOE;EACE,aAAA;EACA,gBAAA;EACA,gBAAA;;AAVJ,gBAcE;AAdF,gBAeE;EACE,gBAAA;;AAhBJ,gBAoBE;ENiQA,kBAAA;EACA,mBAAA;;AMtRF,gBAwBE;EACE,gBAAA;;AAUF,QANmC;EAMnC,gBALE;IACE,iBAAA;;;AA/BN,gBAuCE,cAAc;EACZ,MAAA;EACA,WAAA;;ACxZJ;EACE,qBAAA;EACA,gBAAA;EACA,mBAAA;EACA,kBAAA;EACA,sBAAA;EACA,eAAA;EACA,sBAAA;EACA,6BAAA;EACA,mBAAA;EP4gBA,iBAAA;EACA,eAAA;EACA,wBAAA;EACA,kBAAA;EApSA,yBAAA;EACG,sBAAA;EACC,qBAAA;EACC,oBAAA;EACG,iBAAA;;AO3OR,IAAC;EPWD,oBAAA;EAEA,0CAAA;EACA,oBAAA;;AOVA,IAAC;AACD,IAAC;EACC,cAAA;EACA,qBAAA;;AAGF,IAAC;AACD,IAAC;EACC,UAAA;EACA,sBAAA;EPwFF,wDAAA;EACQ,gDAAA;;AOrFR,IAAC;AACD,IAAC;AACD,QAAQ,UAAW;EACjB,mBAAA;EACA,oBAAA;EPqPF,aAAA;EAGA,yBAAA;EAxKA,wBAAA;EACQ,gBAAA;;AOvEV;EPicE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AO5dV,YPgeE;EACE,cAAA;EACA,yBAAA;;AO/dJ;EP8bE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AOzdV,YP6dE;EACE,cAAA;EACA,yBAAA;;AO3dJ;EP0bE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AOrdV,YPydE;EACE,cAAA;EACA,yBAAA;;AOvdJ;EPsbE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,SAAC;AACD,SAAC;AACD,SAAC;AACD,SAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,SAAC;AACD,SAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,SAHD;AAGC,SAFD;AAEC,QADM,UAAW;AAEjB,SAJD,SAIE;AAAD,SAHD,UAGE;AAAD,QAFM,UAAW,UAEhB;AACD,SALD,SAKE;AAAD,SAJD,UAIE;AAAD,QAHM,UAAW,UAGhB;AACD,SAND,SAME;AAAD,SALD,UAKE;AAAD,QAJM,UAAW,UAIhB;AACD,SAPD,SAOE;AAAD,SAND,UAME;AAAD,QALM,UAAW,UAKhB;EACC,yBAAA;EACI,qBAAA;;AOjdV,SPqdE;EACE,cAAA;EACA,yBAAA;;AOndJ;EPkbE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,YAAC;AACD,YAAC;AACD,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,YAAC;AACD,YAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,YAHD;AAGC,YAFD;AAEC,QADM,UAAW;AAEjB,YAJD,SAIE;AAAD,YAHD,UAGE;AAAD,QAFM,UAAW,aAEhB;AACD,YALD,SAKE;AAAD,YAJD,UAIE;AAAD,QAHM,UAAW,aAGhB;AACD,YAND,SAME;AAAD,YALD,UAKE;AAAD,QAJM,UAAW,aAIhB;AACD,YAPD,SAOE;AAAD,YAND,UAME;AAAD,QALM,UAAW,aAKhB;EACC,yBAAA;EACI,qBAAA;;AO7cV,YPidE;EACE,cAAA;EACA,yBAAA;;AO/cJ;EP8aE,cAAA;EACA,yBAAA;EACA,qBAAA;;AAEA,WAAC;AACD,WAAC;AACD,WAAC;AACD,WAAC;AACD,KAAM,iBAAgB;EACpB,cAAA;EACA,yBAAA;EACI,qBAAA;;AAEN,WAAC;AACD,WAAC;AACD,KAAM,iBAAgB;EACpB,sBAAA;;AAKA,WAHD;AAGC,WAFD;AAEC,QADM,UAAW;AAEjB,WAJD,SAIE;AAAD,WAHD,UAGE;AAAD,QAFM,UAAW,YAEhB;AACD,WALD,SAKE;AAAD,WAJD,UAIE;AAAD,QAHM,UAAW,YAGhB;AACD,WAND,SAME;AAAD,WALD,UAKE;AAAD,QAJM,UAAW,YAIhB;AACD,WAPD,SAOE;AAAD,WAND,UAME;AAAD,QALM,UAAW,YAKhB;EACC,yBAAA;EACI,qBAAA;;AOzcV,WP6cE;EACE,cAAA;EACA,yBAAA;;AOtcJ;EACE,cAAA;EACA,mBAAA;EACA,eAAA;EACA,gBAAA;;AAEA;AACA,SAAC;AACD,SAAC;AACD,QAAQ,UAAW;EACjB,6BAAA;EPgCF,wBAAA;EACQ,gBAAA;;AO9BR;AACA,SAAC;AACD,SAAC;AACD,SAAC;EACC,yBAAA;;AAEF,SAAC;AACD,SAAC;EACC,cAAA;EACA,0BAAA;EACA,6BAAA;;AAIA,SAFD,UAEE;AAAD,QADM,UAAW,UAChB;AACD,SAHD,UAGE;AAAD,QAFM,UAAW,UAEhB;EACC,cAAA;EACA,qBAAA;;AASN;EPsaE,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AOraF;EPkaE,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AOjaF;EP8ZE,gBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AOzZF;EACE,cAAA;EACA,WAAA;EACA,eAAA;EACA,gBAAA;;AAIF,UAAW;EACT,eAAA;;AAOA,KAHG,eAGF;AAAD,KAFG,cAEF;AAAD,KADG,eACF;EACC,WAAA;;AC/IJ;EACE,UAAA;ERsHA,wCAAA;EACQ,gCAAA;;AQrHR,KAAC;EACC,UAAA;;AAIJ;EACE,aAAA;;AACA,SAAC;EACC,cAAA;;AAGJ;EACE,kBAAA;EACA,SAAA;EACA,gBAAA;ERsGA,qCAAA;EACQ,6BAAA;;ASvHV;EACE,aAAa,sBAAb;EACA,qDAAA;EACA,2TAAA;;AAOF;EACE,kBAAA;EACA,QAAA;EACA,qBAAA;EACA,aAAa,sBAAb;EACA,kBAAA;EACA,mBAAA;EACA,cAAA;EACA,mCAAA;EACA,kCAAA;;AAIkC,mBAAC;EAAU,SAAS,KAAT;;AACX,eAAC;EAAU,SAAS,KAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,aAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,aAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,2BAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,0BAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,6BAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,0BAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,cAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,2BAAC;EAAU,SAAS,OAAT;;AACX,+BAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,6BAAC;EAAU,SAAS,OAAT;;AACX,iCAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,eAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,wBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,kBAAC;EAAU,SAAS,OAAT;;AACX,iBAAC;EAAU,SAAS,OAAT;;AACX,qBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,gBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,mBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,sBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,oBAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AACX,4BAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,uBAAC;EAAU,SAAS,OAAT;;AACX,yBAAC;EAAU,SAAS,OAAT;;AClO/C;EACE,qBAAA;EACA,QAAA;EACA,SAAA;EACA,gBAAA;EACA,sBAAA;EACA,qBAAA;EACA,mCAAA;EACA,kCAAA;;AAIF;EACE,kBAAA;;AAIF,gBAAgB;EACd,UAAA;;AAIF;EACE,kBAAA;EACA,SAAA;EACA,OAAA;EACA,aAAA;EACA,aAAA;EACA,WAAA;EACA,gBAAA;EACA,cAAA;EACA,eAAA;EACA,gBAAA;EACA,eAAA;EACA,yBAAA;EACA,yBAAA;EACA,qCAAA;EACA,kBAAA;EV+EA,mDAAA;EACQ,2CAAA;EU9ER,4BAAA;;AAKA,cAAC;EACC,QAAA;EACA,UAAA;;AAxBJ,cA4BE;EVsVA,WAAA;EACA,aAAA;EACA,gBAAA;EACA,yBAAA;;AUrXF,cAiCE,KAAK;EACH,cAAA;EACA,iBAAA;EACA,WAAA;EACA,mBAAA;EACA,wBAAA;EACA,cAAA;EACA,mBAAA;;AAMF,cADa,KAAK,IACjB;AACD,cAFa,KAAK,IAEjB;EACC,qBAAA;EACA,cAAA;EACA,yBAAA;;AAMF,cADa,UAAU;AAEvB,cAFa,UAAU,IAEtB;AACD,cAHa,UAAU,IAGtB;EACC,cAAA;EACA,qBAAA;EACA,UAAA;EACA,yBAAA;;AASF,cADa,YAAY;AAEzB,cAFa,YAAY,IAExB;AACD,cAHa,YAAY,IAGxB;EACC,cAAA;;AAKF,cADa,YAAY,IACxB;AACD,cAFa,YAAY,IAExB;EACC,qBAAA;EACA,6BAAA;EACA,sBAAA;EVoPF,mEAAA;EUlPE,mBAAA;;AAKJ,KAEE;EACE,cAAA;;AAHJ,KAOE;EACE,UAAA;;AAQJ;EACE,UAAA;EACA,QAAA;;AAQF;EACE,OAAA;EACA,WAAA;;AAIF;EACE,cAAA;EACA,iBAAA;EACA,eAAA;EACA,wBAAA;EACA,cAAA;;AAIF;EACE,eAAA;EACA,OAAA;EACA,QAAA;EACA,SAAA;EACA,MAAA;EACA,YAAA;;AAIF,WAAY;EACV,QAAA;EACA,UAAA;;AAQF,OAGE;AAFF,oBAAqB,UAEnB;EACE,aAAA;EACA,wBAAA;EACA,SAAS,EAAT;;AANJ,OASE;AARF,oBAAqB,UAQnB;EACE,SAAA;EACA,YAAA;EACA,kBAAA;;AAsBJ,QAb2C;EACzC,aACE;IAnEF,UAAA;IACA,QAAA;;EAiEA,aAME;IA9DF,OAAA;IACA,WAAA;;;AC7IF;AACA;EACE,kBAAA;EACA,qBAAA;EACA,sBAAA;;AAJF,UAKE;AAJF,mBAIE;EACE,kBAAA;EACA,WAAA;;AAEA,UAJF,OAIG;AAAD,mBAJF,OAIG;AACD,UALF,OAKG;AAAD,mBALF,OAKG;AACD,UANF,OAMG;AAAD,mBANF,OAMG;AACD,UAPF,OAOG;AAAD,mBAPF,OAOG;EACC,UAAA;;AAEF,UAVF,OAUG;AAAD,mBAVF,OAUG;EAEC,aAAA;;AAMN,UACE,KAAK;AADP,UAEE,KAAK;AAFP,UAGE,WAAW;AAHb,UAIE,WAAW;EACT,iBAAA;;AAKJ;EACE,iBAAA;;AADF,YAIE;AAJF,YAKE;EACE,WAAA;;AANJ,YAQE;AARF,YASE;AATF,YAUE;EACE,gBAAA;;AAIJ,UAAW,OAAM,IAAI,cAAc,IAAI,aAAa,IAAI;EACtD,gBAAA;;AAIF,UAAW,OAAM;EACf,cAAA;;AACA,UAFS,OAAM,YAEd,IAAI,aAAa,IAAI;EX4CtB,6BAAA;EACG,0BAAA;;AWxCL,UAAW,OAAM,WAAW,IAAI;AAChC,UAAW,mBAAkB,IAAI;EX8C/B,4BAAA;EACG,yBAAA;;AW1CL,UAAW;EACT,WAAA;;AAEF,UAAW,aAAY,IAAI,cAAc,IAAI,aAAc;EACzD,gBAAA;;AAEF,UAAW,aAAY,YACrB,OAAM;AADR,UAAW,aAAY,YAErB;EXyBA,6BAAA;EACG,0BAAA;;AWtBL,UAAW,aAAY,WAAY,OAAM;EX6BvC,4BAAA;EACG,yBAAA;;AWzBL,UAAW,iBAAgB;AAC3B,UAAU,KAAM;EACd,UAAA;;AAQF,aAAc;EX2bZ,gBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AW7bF,aAAc;EX0bZ,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AW5bF,aAAc;EXybZ,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AWrbF,UAAW,OAAO;EAChB,iBAAA;EACA,kBAAA;;AAEF,UAAW,UAAU;EACnB,kBAAA;EACA,mBAAA;;AAKF,UAAU,KAAM;EXId,wDAAA;EACQ,gDAAA;;AWDR,UAJQ,KAAM,iBAIb;EXAD,wBAAA;EACQ,gBAAA;;AWMV,IAAK;EACH,cAAA;;AAGF,OAAQ;EACN,uBAAA;EACA,sBAAA;;AAGF,OAAQ,QAAQ;EACd,uBAAA;;AAOF,mBACE;AADF,mBAEE;AAFF,mBAGE,aAAa;EACX,cAAA;EACA,WAAA;EACA,WAAA;EACA,eAAA;;AAPJ,mBAWE,aAEE;EACE,WAAA;;AAdN,mBAkBE,OAAO;AAlBT,mBAmBE,OAAO;AAnBT,mBAoBE,aAAa;AApBf,mBAqBE,aAAa;EACX,gBAAA;EACA,cAAA;;AAKF,mBADkB,OACjB,IAAI,cAAc,IAAI;EACrB,gBAAA;;AAEF,mBAJkB,OAIjB,YAAY,IAAI;EACf,4BAAA;EXtEF,6BAAA;EACC,4BAAA;;AWwED,mBARkB,OAQjB,WAAW,IAAI;EACd,8BAAA;EXlFF,0BAAA;EACC,yBAAA;;AWqFH,mBAAoB,aAAY,IAAI,cAAc,IAAI,aAAc;EAClE,gBAAA;;AAEF,mBAAoB,aAAY,YAAY,IAAI,aAC9C,OAAM;AADR,mBAAoB,aAAY,YAAY,IAAI,aAE9C;EXnFA,6BAAA;EACC,4BAAA;;AWsFH,mBAAoB,aAAY,WAAW,IAAI,cAAe,OAAM;EX/FlE,0BAAA;EACC,yBAAA;;AWuGH;EACE,cAAA;EACA,WAAA;EACA,mBAAA;EACA,yBAAA;;AAJF,oBAKE;AALF,oBAME;EACE,WAAA;EACA,mBAAA;EACA,SAAA;;AATJ,oBAWE,aAAa;EACX,WAAA;;AAMJ,uBAAwB,OAAO,QAAO;AACtC,uBAAwB,OAAO,QAAO;EACpC,aAAA;;AC1NF;EACE,kBAAA;EACA,cAAA;EACA,yBAAA;;AAGA,YAAC;EACC,WAAA;EACA,eAAA;EACA,gBAAA;;AATJ,YAYE;EAIE,WAAA;EAEA,WAAA;EACA,gBAAA;;AASJ,eAAgB;AAChB,eAAgB;AAChB,eAAgB,mBAAmB;EZ02BjC,YAAA;EACA,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,kBAAA;;AAEA,MAAM,eYl3BQ;AZk3Bd,MAAM,eYj3BQ;AZi3Bd,MAAM,eYh3BQ,mBAAmB;EZi3B/B,YAAA;EACA,iBAAA;;AAGF,QAAQ,eYv3BM;AZu3Bd,QAAQ,eYt3BM;AZs3Bd,QAAQ,eYr3BM,mBAAmB;AZs3BjC,MAAM,UAAU,eYx3BF;AZw3Bd,MAAM,UAAU,eYv3BF;AZu3Bd,MAAM,UAAU,eYt3BF,mBAAmB;EZu3B/B,YAAA;;AYt3BJ,eAAgB;AAChB,eAAgB;AAChB,eAAgB,mBAAmB;EZu2BjC,YAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AAEA,MAAM,eY/2BQ;AZ+2Bd,MAAM,eY92BQ;AZ82Bd,MAAM,eY72BQ,mBAAmB;EZ82B/B,YAAA;EACA,iBAAA;;AAGF,QAAQ,eYp3BM;AZo3Bd,QAAQ,eYn3BM;AZm3Bd,QAAQ,eYl3BM,mBAAmB;AZm3BjC,MAAM,UAAU,eYr3BF;AZq3Bd,MAAM,UAAU,eYp3BF;AZo3Bd,MAAM,UAAU,eYn3BF,mBAAmB;EZo3B/B,YAAA;;AY/2BJ;AACA;AACA,YAAa;EACX,mBAAA;;AAEA,kBAAC,IAAI,cAAc,IAAI;AAAvB,gBAAC,IAAI,cAAc,IAAI;AAAvB,YAHW,cAGV,IAAI,cAAc,IAAI;EACrB,gBAAA;;AAIJ;AACA;EACE,SAAA;EACA,mBAAA;EACA,sBAAA;;AAKF;EACE,iBAAA;EACA,eAAA;EACA,mBAAA;EACA,cAAA;EACA,cAAA;EACA,kBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;;AAGA,kBAAC;EACC,iBAAA;EACA,eAAA;EACA,kBAAA;;AAEF,kBAAC;EACC,kBAAA;EACA,eAAA;EACA,kBAAA;;AApBJ,kBAwBE,MAAK;AAxBP,kBAyBE,MAAK;EACH,aAAA;;AAKJ,YAAa,cAAa;AAC1B,kBAAkB;AAClB,gBAAgB,YAAa;AAC7B,gBAAgB,YAAa,aAAa;AAC1C,gBAAgB,YAAa;AAC7B,gBAAgB,WAAY,OAAM,IAAI,aAAa,IAAI;AACvD,gBAAgB,WAAY,aAAY,IAAI,aAAc;EZIxD,6BAAA;EACG,0BAAA;;AYFL,kBAAkB;EAChB,eAAA;;AAEF,YAAa,cAAa;AAC1B,kBAAkB;AAClB,gBAAgB,WAAY;AAC5B,gBAAgB,WAAY,aAAa;AACzC,gBAAgB,WAAY;AAC5B,gBAAgB,YAAa,OAAM,IAAI;AACvC,gBAAgB,YAAa,aAAY,IAAI,cAAe;EZA1D,4BAAA;EACG,yBAAA;;AYEL,kBAAkB;EAChB,cAAA;;AAKF;EACE,kBAAA;EAGA,YAAA;EACA,mBAAA;;AALF,gBASE;EACE,kBAAA;;AAVJ,gBASE,OAEE;EACE,iBAAA;;AAGF,gBANF,OAMG;AACD,gBAPF,OAOG;AACD,gBARF,OAQG;EACC,UAAA;;AAKJ,gBAAC,YACC;AADF,gBAAC,YAEC;EACE,kBAAA;;AAGJ,gBAAC,WACC;AADF,gBAAC,WAEC;EACE,iBAAA;;ACjJN;EACE,gBAAA;EACA,eAAA;EACA,gBAAA;;AAHF,IAME;EACE,kBAAA;EACA,cAAA;;AARJ,IAME,KAIE;EACE,kBAAA;EACA,cAAA;EACA,kBAAA;;AACA,IARJ,KAIE,IAIG;AACD,IATJ,KAIE,IAKG;EACC,qBAAA;EACA,yBAAA;;AAKJ,IAhBF,KAgBG,SAAU;EACT,cAAA;;AAEA,IAnBJ,KAgBG,SAAU,IAGR;AACD,IApBJ,KAgBG,SAAU,IAIR;EACC,cAAA;EACA,qBAAA;EACA,6BAAA;EACA,mBAAA;;AAOJ,IADF,MAAM;AAEJ,IAFF,MAAM,IAEH;AACD,IAHF,MAAM,IAGH;EACC,yBAAA;EACA,qBAAA;;AAzCN,IAkDE;EboVA,WAAA;EACA,aAAA;EACA,gBAAA;EACA,yBAAA;;AazYF,IAyDE,KAAK,IAAI;EACP,eAAA;;AASJ;EACE,gCAAA;;AADF,SAEE;EACE,WAAA;EAEA,mBAAA;;AALJ,SAEE,KAME;EACE,iBAAA;EACA,wBAAA;EACA,6BAAA;EACA,0BAAA;;AACA,SAXJ,KAME,IAKG;EACC,qCAAA;;AAMF,SAlBJ,KAiBG,OAAQ;AAEP,SAnBJ,KAiBG,OAAQ,IAEN;AACD,SApBJ,KAiBG,OAAQ,IAGN;EACC,cAAA;EACA,yBAAA;EACA,yBAAA;EACA,gCAAA;EACA,eAAA;;AAKN,SAAC;EAqDD,WAAA;EA8BA,gBAAA;;AAnFA,SAAC,cAuDD;EACE,WAAA;;AAxDF,SAAC,cAuDD,KAEG;EACC,kBAAA;EACA,kBAAA;;AA3DJ,SAAC,cA+DD,YAAY;EACV,SAAA;EACA,UAAA;;AAYJ,QATqC;EASrC,SA7EG,cAqEC;IACE,mBAAA;IACA,SAAA;;EAMN,SA7EG,cAqEC,KAGE;IACE,gBAAA;;;AAzEN,SAAC,cAqFD,KAAK;EAEH,eAAA;EACA,kBAAA;;AAxFF,SAAC,cA2FD,UAAU;AA3FV,SAAC,cA4FD,UAAU,IAAG;AA5Fb,SAAC,cA6FD,UAAU,IAAG;EACX,yBAAA;;AAcJ,QAXqC;EAWrC,SA5GG,cAkGC,KAAK;IACH,gCAAA;IACA,0BAAA;;EAQN,SA5GG,cAsGC,UAAU;EAMd,SA5GG,cAuGC,UAAU,IAAG;EAKjB,SA5GG,cAwGC,UAAU,IAAG;IACX,4BAAA;;;AAhGN,UACE;EACE,WAAA;;AAFJ,UACE,KAIE;EACE,kBAAA;;AANN,UACE,KAOE;EACE,gBAAA;;AAKA,UAbJ,KAYG,OAAQ;AAEP,UAdJ,KAYG,OAAQ,IAEN;AACD,UAfJ,KAYG,OAAQ,IAGN;EACC,cAAA;EACA,yBAAA;;AAQR,YACE;EACE,WAAA;;AAFJ,YACE,KAEE;EACE,eAAA;EACA,cAAA;;AAYN;EACE,WAAA;;AADF,cAGE;EACE,WAAA;;AAJJ,cAGE,KAEG;EACC,kBAAA;EACA,kBAAA;;AAPN,cAWE,YAAY;EACV,SAAA;EACA,UAAA;;AAYJ,QATqC;EASrC,cARI;IACE,mBAAA;IACA,SAAA;;EAMN,cARI,KAGE;IACE,gBAAA;;;AASR;EACE,gBAAA;;AADF,mBAGE,KAAK;EAEH,eAAA;EACA,kBAAA;;AANJ,mBASE,UAAU;AATZ,mBAUE,UAAU,IAAG;AAVf,mBAWE,UAAU,IAAG;EACX,yBAAA;;AAcJ,QAXqC;EAWrC,mBAVI,KAAK;IACH,gCAAA;IACA,0BAAA;;EAQN,mBANI,UAAU;EAMd,mBALI,UAAU,IAAG;EAKjB,mBAJI,UAAU,IAAG;IACX,4BAAA;;;AAUN,YACE;EACE,aAAA;;AAFJ,YAIE;EACE,cAAA;;AASJ,SAAU;EAER,gBAAA;Eb1IA,0BAAA;EACC,yBAAA;;Ac3FH;EACE,kBAAA;EACA,gBAAA;EACA,mBAAA;EACA,6BAAA;;AAQF,QAH6C;EAG7C;IAFI,kBAAA;;;AAgBJ,QAH6C;EAG7C;IAFI,WAAA;;;AAeJ;EACE,iBAAA;EACA,mBAAA;EACA,mBAAA;EACA,kBAAA;EACA,iCAAA;EACA,kDAAA;EAEA,iCAAA;;AAEA,gBAAC;EACC,gBAAA;;AA4BJ,QAzB6C;EAyB7C;IAxBI,WAAA;IACA,aAAA;IACA,gBAAA;;EAEA,gBAAC;IACC,yBAAA;IACA,uBAAA;IACA,iBAAA;IACA,4BAAA;;EAGF,gBAAC;IACC,mBAAA;;EAKF,iBAAkB;EAClB,kBAAmB;EACnB,oBAAqB;IACnB,eAAA;IACA,gBAAA;;;AAUN,UAEE;AADF,gBACE;AAFF,UAGE;AAFF,gBAEE;EACE,mBAAA;EACA,kBAAA;;AAMF,QAJ6C;EAI7C,UATA;EASA,gBATA;EASA,UARA;EAQA,gBARA;IAKI,eAAA;IACA,cAAA;;;AAaN;EACE,aAAA;EACA,qBAAA;;AAKF,QAH6C;EAG7C;IAFI,gBAAA;;;AAKJ;AACA;EACE,eAAA;EACA,QAAA;EACA,OAAA;EACA,aAAA;;AAMF,QAH6C;EAG7C;EAAA;IAFI,gBAAA;;;AAGJ;EACE,MAAA;EACA,qBAAA;;AAEF;EACE,SAAA;EACA,gBAAA;EACA,qBAAA;;AAMF;EACE,WAAA;EACA,kBAAA;EACA,eAAA;EACA,iBAAA;EACA,YAAA;;AAEA,aAAC;AACD,aAAC;EACC,qBAAA;;AASJ,QAN6C;EACzC,OAAQ,aAAa;EACrB,OAAQ,mBAAmB;IACzB,kBAAA;;;AAWN;EACE,kBAAA;EACA,YAAA;EACA,kBAAA;EACA,iBAAA;EdwaA,eAAA;EACA,kBAAA;EcvaA,6BAAA;EACA,sBAAA;EACA,6BAAA;EACA,kBAAA;;AAIA,cAAC;EACC,aAAA;;AAdJ,cAkBE;EACE,cAAA;EACA,WAAA;EACA,WAAA;EACA,kBAAA;;AAtBJ,cAwBE,UAAU;EACR,eAAA;;AAMJ,QAH6C;EAG7C;IAFI,aAAA;;;AAUJ;EACE,mBAAA;;AADF,WAGE,KAAK;EACH,iBAAA;EACA,oBAAA;EACA,iBAAA;;AA2BF,QAxB+C;EAwB/C,WAtBE,MAAM;IACJ,gBAAA;IACA,WAAA;IACA,WAAA;IACA,aAAA;IACA,6BAAA;IACA,SAAA;IACA,gBAAA;;EAeJ,WAtBE,MAAM,eAQJ,KAAK;EAcT,WAtBE,MAAM,eASJ;IACE,0BAAA;;EAYN,WAtBE,MAAM,eAYJ,KAAK;IACH,iBAAA;;EACA,WAdJ,MAAM,eAYJ,KAAK,IAEF;EACD,WAfJ,MAAM,eAYJ,KAAK,IAGF;IACC,sBAAA;;;AAuBV,QAhB6C;EAgB7C;IAfI,WAAA;IACA,SAAA;;EAcJ,WAZI;IACE,WAAA;;EAWN,WAZI,KAEE;IACE,iBAAA;IACA,oBAAA;;EAIJ,WAAC,aAAa;IACZ,mBAAA;;;AAkBN,QAN2C;EACzC;ICnQA,sBAAA;;EDoQA;ICvQA,uBAAA;;;ADgRF;EACE,kBAAA;EACA,mBAAA;EACA,kBAAA;EACA,iCAAA;EACA,oCAAA;Ed1KA,4FAAA;EACQ,oFAAA;EAmeR,eAAA;EACA,kBAAA;;AMhPF,QA7CqC;EA6CrC,YA3CI;IACE,qBAAA;IACA,gBAAA;IACA,sBAAA;;EAwCN,YApCI;IACE,qBAAA;IACA,WAAA;IACA,sBAAA;;EAiCN,YA9BI;IACE,gBAAA;IACA,sBAAA;;EA4BN,YAtBI;EAsBJ,YArBI;IACE,qBAAA;IACA,aAAA;IACA,gBAAA;IACA,eAAA;IACA,sBAAA;;EAgBN,YAdI,OAAO,MAAK;EAchB,YAbI,UAAU,MAAK;IACb,WAAA;IACA,cAAA;;EAWN,YAJI,cAAc;IACZ,MAAA;;;AQ7DJ,QAHiD;EAGjD,YAJA;IAEI,kBAAA;;;AAsBN,QAd6C;EAc7C;IAbI,WAAA;IACA,SAAA;IACA,cAAA;IACA,eAAA;IACA,cAAA;IACA,iBAAA;IdjMF,wBAAA;IACQ,gBAAA;;EcoMN,YAAC,aAAa;IACZ,mBAAA;;;AASN,WAAY,KAAK;EACf,aAAA;EdtOA,0BAAA;EACC,yBAAA;;AcyOH,oBAAqB,YAAY,KAAK;EdlOpC,6BAAA;EACC,4BAAA;;Ac0OH;EduQE,eAAA;EACA,kBAAA;;AcrQA,WAAC;EdoQD,gBAAA;EACA,mBAAA;;AclQA,WAAC;EdiQD,gBAAA;EACA,mBAAA;;AcxPF;EduPE,gBAAA;EACA,mBAAA;;Ac3OF,QAV6C;EAU7C;IATI,WAAA;IACA,iBAAA;IACA,kBAAA;;EAGA,YAAC,aAAa;IACZ,eAAA;;;AASN;EACE,yBAAA;EACA,qBAAA;;AAFF,eAIE;EACE,cAAA;;AACA,eAFF,cAEG;AACD,eAHF,cAGG;EACC,cAAA;EACA,6BAAA;;AATN,eAaE;EACE,cAAA;;AAdJ,eAiBE,YACE,KAAK;EACH,cAAA;;AAEA,eAJJ,YACE,KAAK,IAGF;AACD,eALJ,YACE,KAAK,IAIF;EACC,cAAA;EACA,6BAAA;;AAIF,eAXJ,YAUE,UAAU;AAER,eAZJ,YAUE,UAAU,IAEP;AACD,eAbJ,YAUE,UAAU,IAGP;EACC,cAAA;EACA,yBAAA;;AAIF,eAnBJ,YAkBE,YAAY;AAEV,eApBJ,YAkBE,YAAY,IAET;AACD,eArBJ,YAkBE,YAAY,IAGT;EACC,cAAA;EACA,6BAAA;;AAxCR,eA6CE;EACE,qBAAA;;AACA,eAFF,eAEG;AACD,eAHF,eAGG;EACC,yBAAA;;AAjDN,eA6CE,eAME;EACE,yBAAA;;AApDN,eAwDE;AAxDF,eAyDE;EACE,qBAAA;;AAOE,eAHJ,YAEE,QAAQ;AAEN,eAJJ,YAEE,QAAQ,IAEL;AACD,eALJ,YAEE,QAAQ,IAGL;EACC,yBAAA;EACA,cAAA;;AAiCN,QA7BiD;EA6BjD,eAxCA,YAaI,MAAM,eACJ,KAAK;IACH,cAAA;;EACA,eAhBR,YAaI,MAAM,eACJ,KAAK,IAEF;EACD,eAjBR,YAaI,MAAM,eACJ,KAAK,IAGF;IACC,cAAA;IACA,6BAAA;;EAIF,eAvBR,YAaI,MAAM,eASJ,UAAU;EAER,eAxBR,YAaI,MAAM,eASJ,UAAU,IAEP;EACD,eAzBR,YAaI,MAAM,eASJ,UAAU,IAGP;IACC,cAAA;IACA,yBAAA;;EAIF,eA/BR,YAaI,MAAM,eAiBJ,YAAY;EAEV,eAhCR,YAaI,MAAM,eAiBJ,YAAY,IAET;EACD,eAjCR,YAaI,MAAM,eAiBJ,YAAY,IAGT;IACC,cAAA;IACA,6BAAA;;;AAjGZ,eA6GE;EACE,cAAA;;AACA,eAFF,aAEG;EACC,cAAA;;AAQN;EACE,yBAAA;EACA,qBAAA;;AAFF,eAIE;EACE,cAAA;;AACA,eAFF,cAEG;AACD,eAHF,cAGG;EACC,cAAA;EACA,6BAAA;;AATN,eAaE;EACE,cAAA;;AAdJ,eAiBE,YACE,KAAK;EACH,cAAA;;AAEA,eAJJ,YACE,KAAK,IAGF;AACD,eALJ,YACE,KAAK,IAIF;EACC,cAAA;EACA,6BAAA;;AAIF,eAXJ,YAUE,UAAU;AAER,eAZJ,YAUE,UAAU,IAEP;AACD,eAbJ,YAUE,UAAU,IAGP;EACC,cAAA;EACA,yBAAA;;AAIF,eAnBJ,YAkBE,YAAY;AAEV,eApBJ,YAkBE,YAAY,IAET;AACD,eArBJ,YAkBE,YAAY,IAGT;EACC,cAAA;EACA,6BAAA;;AAxCR,eA8CE;EACE,qBAAA;;AACA,eAFF,eAEG;AACD,eAHF,eAGG;EACC,yBAAA;;AAlDN,eA8CE,eAME;EACE,yBAAA;;AArDN,eAyDE;AAzDF,eA0DE;EACE,qBAAA;;AAME,eAFJ,YACE,QAAQ;AAEN,eAHJ,YACE,QAAQ,IAEL;AACD,eAJJ,YACE,QAAQ,IAGL;EACC,yBAAA;EACA,cAAA;;AAuCN,QAnCiD;EAmCjD,eA7CA,YAYI,MAAM,eACJ;IACE,qBAAA;;EA+BR,eA7CA,YAYI,MAAM,eAIJ;IACE,yBAAA;;EA4BR,eA7CA,YAYI,MAAM,eAOJ,KAAK;IACH,cAAA;;EACA,eArBR,YAYI,MAAM,eAOJ,KAAK,IAEF;EACD,eAtBR,YAYI,MAAM,eAOJ,KAAK,IAGF;IACC,cAAA;IACA,6BAAA;;EAIF,eA5BR,YAYI,MAAM,eAeJ,UAAU;EAER,eA7BR,YAYI,MAAM,eAeJ,UAAU,IAEP;EACD,eA9BR,YAYI,MAAM,eAeJ,UAAU,IAGP;IACC,cAAA;IACA,yBAAA;;EAIF,eApCR,YAYI,MAAM,eAuBJ,YAAY;EAEV,eArCR,YAYI,MAAM,eAuBJ,YAAY,IAET;EACD,eAtCR,YAYI,MAAM,eAuBJ,YAAY,IAGT;IACC,cAAA;IACA,6BAAA;;;AAvGZ,eA8GE;EACE,cAAA;;AACA,eAFF,aAEG;EACC,cAAA;;AE9lBN;EACE,iBAAA;EACA,mBAAA;EACA,gBAAA;EACA,yBAAA;EACA,kBAAA;;AALF,WAOE;EACE,qBAAA;;AARJ,WAOE,KAGE,KAAI;EACF,SAAS,QAAT;EACA,cAAA;EACA,cAAA;;AAbN,WAiBE;EACE,cAAA;;ACpBJ;EACE,qBAAA;EACA,eAAA;EACA,cAAA;EACA,kBAAA;;AAJF,WAME;EACE,eAAA;;AAPJ,WAME,KAEE;AARJ,WAME,KAGE;EACE,kBAAA;EACA,WAAA;EACA,iBAAA;EACA,wBAAA;EACA,qBAAA;EACA,cAAA;EACA,yBAAA;EACA,yBAAA;EACA,iBAAA;;AAEF,WAdF,KAcG,YACC;AADF,WAdF,KAcG,YAEC;EACE,cAAA;EjBsFN,8BAAA;EACG,2BAAA;;AiBnFD,WArBF,KAqBG,WACC;AADF,WArBF,KAqBG,WAEC;EjBwEJ,+BAAA;EACG,4BAAA;;AiBjED,WAFF,KAAK,IAEF;AAAD,WADF,KAAK,OACF;AACD,WAHF,KAAK,IAGF;AAAD,WAFF,KAAK,OAEF;EACC,cAAA;EACA,yBAAA;EACA,qBAAA;;AAMF,WAFF,UAAU;AAER,WADF,UAAU;AAER,WAHF,UAAU,IAGP;AAAD,WAFF,UAAU,OAEP;AACD,WAJF,UAAU,IAIP;AAAD,WAHF,UAAU,OAGP;EACC,UAAA;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;EACA,eAAA;;AAtDN,WA0DE,YACE;AA3DJ,WA0DE,YAEE,OAAM;AA5DV,WA0DE,YAGE,OAAM;AA7DV,WA0DE,YAIE;AA9DJ,WA0DE,YAKE,IAAG;AA/DP,WA0DE,YAME,IAAG;EACD,cAAA;EACA,yBAAA;EACA,qBAAA;EACA,mBAAA;;AASN,cjBsdE,KACE;AiBvdJ,cjBsdE,KAEE;EACE,kBAAA;EACA,eAAA;;AAEF,cANF,KAMG,YACC;AADF,cANF,KAMG,YAEC;EA9bJ,8BAAA;EACG,2BAAA;;AAicD,cAZF,KAYG,WACC;AADF,cAZF,KAYG,WAEC;EA5cJ,+BAAA;EACG,4BAAA;;AiBpBL,cjBidE,KACE;AiBldJ,cjBidE,KAEE;EACE,iBAAA;EACA,eAAA;;AAEF,cANF,KAMG,YACC;AADF,cANF,KAMG,YAEC;EA9bJ,8BAAA;EACG,2BAAA;;AAicD,cAZF,KAYG,WACC;AADF,cAZF,KAYG,WAEC;EA5cJ,+BAAA;EACG,4BAAA;;AkBpGL;EACE,eAAA;EACA,cAAA;EACA,gBAAA;EACA,kBAAA;;AAJF,MAME;EACE,eAAA;;AAPJ,MAME,GAEE;AARJ,MAME,GAGE;EACE,qBAAA;EACA,iBAAA;EACA,yBAAA;EACA,yBAAA;EACA,mBAAA;;AAdN,MAME,GAWE,IAAG;AAjBP,MAME,GAYE,IAAG;EACD,qBAAA;EACA,yBAAA;;AApBN,MAwBE,MACE;AAzBJ,MAwBE,MAEE;EACE,YAAA;;AA3BN,MA+BE,UACE;AAhCJ,MA+BE,UAEE;EACE,WAAA;;AAlCN,MAsCE,UACE;AAvCJ,MAsCE,UAEE,IAAG;AAxCP,MAsCE,UAGE,IAAG;AAzCP,MAsCE,UAIE;EACE,cAAA;EACA,yBAAA;EACA,mBAAA;;AC9CN;EACE,eAAA;EACA,uBAAA;EACA,cAAA;EACA,iBAAA;EACA,cAAA;EACA,cAAA;EACA,kBAAA;EACA,mBAAA;EACA,wBAAA;EACA,oBAAA;;AAIE,MADD,MACE;AACD,MAFD,MAEE;EACC,cAAA;EACA,qBAAA;EACA,eAAA;;AAKJ,MAAC;EACC,aAAA;;AAIF,IAAK;EACH,kBAAA;EACA,SAAA;;AAOJ;EnBqhBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmBrhBN;EnBihBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmBjhBN;EnB6gBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmB7gBN;EnBygBE,yBAAA;;AAEE,WADD,MACE;AACD,WAFD,MAEE;EACC,yBAAA;;AmBzgBN;EnBqgBE,yBAAA;;AAEE,cADD,MACE;AACD,cAFD,MAEE;EACC,yBAAA;;AmBrgBN;EnBigBE,yBAAA;;AAEE,aADD,MACE;AACD,aAFD,MAEE;EACC,yBAAA;;AoB5jBN;EACE,qBAAA;EACA,eAAA;EACA,gBAAA;EACA,eAAA;EACA,iBAAA;EACA,cAAA;EACA,cAAA;EACA,wBAAA;EACA,mBAAA;EACA,kBAAA;EACA,yBAAA;EACA,mBAAA;;AAGA,MAAC;EACC,aAAA;;AAIF,IAAK;EACH,kBAAA;EACA,SAAA;;AAEF,OAAQ;EACN,MAAA;EACA,gBAAA;;AAMF,CADD,MACE;AACD,CAFD,MAEE;EACC,cAAA;EACA,qBAAA;EACA,eAAA;;AAKJ,CAAC,gBAAgB,OAAQ;AACzB,UAAW,UAAU,IAAI;EACvB,cAAA;EACA,yBAAA;;AAEF,UAAW,KAAK,IAAI;EAClB,gBAAA;;AChDF;EACE,aAAA;EACA,mBAAA;EACA,cAAA;EACA,yBAAA;;AAJF,UAME;AANF,UAOE;EACE,cAAA;;AARJ,UAUE;EACE,mBAAA;EACA,eAAA;EACA,gBAAA;;AAGF,UAAW;EACT,kBAAA;;AAjBJ,UAoBE;EACE,eAAA;;AAiBJ,mBAdgD;EAchD;IAbI,iBAAA;IACA,oBAAA;;EAEA,UAAW;IACT,kBAAA;IACA,mBAAA;;EAQN,UALI;EAKJ,UAJI;IACE,eAAA;;;AClCN;EACE,cAAA;EACA,YAAA;EACA,mBAAA;EACA,wBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;EtBmHA,wCAAA;EACQ,gCAAA;;AsB3HV,UAUE;AAVF,UAWE,EAAE;EtBgXF,cAAA;EACA,eAAA;EACA,YAAA;EsBhXE,iBAAA;EACA,kBAAA;;AAIF,CAAC,UAAC;AACF,CAAC,UAAC;AACF,CAAC,UAAC;EACA,qBAAA;;AArBJ,UAyBE;EACE,YAAA;EACA,cAAA;;ACzBJ;EACE,aAAA;EACA,mBAAA;EACA,6BAAA;EACA,kBAAA;;AAJF,MAOE;EACE,aAAA;EAEA,cAAA;;AAVJ,MAaE;EACE,iBAAA;;AAdJ,MAkBE;AAlBF,MAmBE;EACE,gBAAA;;AApBJ,MAsBE,IAAI;EACF,eAAA;;AAQJ;EACC,mBAAA;;AADD,kBAIE;EACE,kBAAA;EACA,SAAA;EACA,YAAA;EACA,cAAA;;AAQJ;EvBqXE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuBvXF,cvByXE;EACE,yBAAA;;AuB1XJ,cvB4XE;EACE,cAAA;;AuB1XJ;EvBkXE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuBpXF,WvBsXE;EACE,yBAAA;;AuBvXJ,WvByXE;EACE,cAAA;;AuBvXJ;EvB+WE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuBjXF,cvBmXE;EACE,yBAAA;;AuBpXJ,cvBsXE;EACE,cAAA;;AuBpXJ;EvB4WE,yBAAA;EACA,qBAAA;EACA,cAAA;;AuB9WF,avBgXE;EACE,yBAAA;;AuBjXJ,avBmXE;EACE,cAAA;;AwB3aJ;EACE;IAAQ,2BAAA;;EACR;IAAQ,wBAAA;;;AAIV;EACE;IAAQ,2BAAA;;EACR;IAAQ,wBAAA;;;AASV;EACE,gBAAA;EACA,YAAA;EACA,mBAAA;EACA,yBAAA;EACA,kBAAA;ExB2FA,sDAAA;EACQ,8CAAA;;AwBvFV;EACE,WAAA;EACA,SAAA;EACA,YAAA;EACA,eAAA;EACA,iBAAA;EACA,cAAA;EACA,kBAAA;EACA,yBAAA;ExB8EA,sDAAA;EACQ,8CAAA;EAKR,mCAAA;EACQ,2BAAA;;AwB/EV,iBAAkB;ExBuSd,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;EwBtSF,0BAAA;;AAIF,SAAS,OAAQ;ExBqJf,0DAAA;EACQ,kDAAA;;AwB7IV;ExBoiBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AwBrRJ;ExBgiBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AwBjRJ;ExB4hBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AwB7QJ;ExBwhBE,yBAAA;;AACA,iBAAkB;EA7QhB,kBAAkB,2LAAlB;EACA,kBAAkB,mLAAlB;;AyBjVJ;AACA;EACE,gBAAA;EACA,OAAA;;AAIF;AACA,MAAO;EACL,gBAAA;;AAEF,MAAM;EACJ,aAAA;;AAIF;EACE,cAAA;;AAIF;EACE,eAAA;;AAOF,MACE;EACE,kBAAA;;AAFJ,MAIE;EACE,iBAAA;;AASJ;EACE,eAAA;EACA,gBAAA;;AC7CF;EAEE,mBAAA;EACA,eAAA;;AAQF;EACE,kBAAA;EACA,cAAA;EACA,kBAAA;EAEA,mBAAA;EACA,yBAAA;EACA,yBAAA;;AAGA,gBAAC;E1BsED,4BAAA;EACC,2BAAA;;A0BpED,gBAAC;EACC,gBAAA;E1B0EF,+BAAA;EACC,8BAAA;;A0BzFH,gBAmBE;EACE,YAAA;;AApBJ,gBAsBE,SAAS;EACP,iBAAA;;AAUJ,CAAC;EACC,cAAA;;AADF,CAAC,gBAGC;EACE,cAAA;;AAIF,CARD,gBAQE;AACD,CATD,gBASE;EACC,qBAAA;EACA,yBAAA;;AAIF,CAfD,gBAeE;AACD,CAhBD,gBAgBE,OAAO;AACR,CAjBD,gBAiBE,OAAO;EACN,UAAA;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AANF,CAfD,gBAeE,OASC;AARF,CAhBD,gBAgBE,OAAO,MAQN;AAPF,CAjBD,gBAiBE,OAAO,MAON;EACE,cAAA;;AAVJ,CAfD,gBAeE,OAYC;AAXF,CAhBD,gBAgBE,OAAO,MAWN;AAVF,CAjBD,gBAiBE,OAAO,MAUN;EACE,cAAA;;A1BsYJ,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,OAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,OASZ;AACD,CAND,iBAJc,OAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,OAcZ;AACD,CAXD,iBAJc,OAeZ,OAAO;AACR,CAZD,iBAJc,OAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;AAnBN,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,IAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,IASZ;AACD,CAND,iBAJc,IAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,IAcZ;AACD,CAXD,iBAJc,IAeZ,OAAO;AACR,CAZD,iBAJc,IAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;AAnBN,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,OAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,OASZ;AACD,CAND,iBAJc,OAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,OAcZ;AACD,CAXD,iBAJc,OAeZ,OAAO;AACR,CAZD,iBAJc,OAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;AAnBN,iBAAiB;EACf,cAAA;EACA,yBAAA;;AAEA,CAAC,iBAJc;EAKb,cAAA;;AADF,CAAC,iBAJc,MAOb;EAA2B,cAAA;;AAE3B,CALD,iBAJc,MASZ;AACD,CAND,iBAJc,MAUZ;EACC,cAAA;EACA,yBAAA;;AAEF,CAVD,iBAJc,MAcZ;AACD,CAXD,iBAJc,MAeZ,OAAO;AACR,CAZD,iBAJc,MAgBZ,OAAO;EACN,WAAA;EACA,yBAAA;EACA,qBAAA;;A0BpYR;EACE,aAAA;EACA,kBAAA;;AAEF;EACE,gBAAA;EACA,gBAAA;;ACtGF;EACE,mBAAA;EACA,yBAAA;EACA,6BAAA;EACA,kBAAA;E3BgHA,iDAAA;EACQ,yCAAA;;A2B5GV;EACE,aAAA;;AAUF,MACE;EACE,gBAAA;;AAFJ,MACE,cAEE;EACE,mBAAA;EACA,gBAAA;;AACA,MALJ,cAEE,iBAGG;EACC,aAAA;;AAEF,MARJ,cAEE,iBAMG;EACC,gBAAA;;AAIJ,MAbF,cAaG,YACC,iBAAgB;E3B2DpB,4BAAA;EACC,2BAAA;;A2BvDC,MAnBF,cAmBG,WACC,iBAAgB;E3B6DpB,+BAAA;EACC,8BAAA;;A2BvDH,cAAe,cACb,iBAAgB;EACd,mBAAA;;AAUJ,MACE;AADF,MAEE,oBAAoB;EAClB,gBAAA;;AAHJ,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YACF,GAAE;AAXV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YACF,GAAE;AAXV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YACF,GAAE;AAXV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YACF,GAAE;AAXV,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YAEF,GAAE;AAZV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YAEF,GAAE;AAZV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YAEF,GAAE;AAZV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YAEF,GAAE;EACA,2BAAA;;AAbV,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YAKF,GAAE;AAfV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YAKF,GAAE;AAfV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YAKF,GAAE;AAfV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YAKF,GAAE;AAfV,MAME,SAAQ,YAEN,QAAO,YAEL,KAAI,YAMF,GAAE;AAhBV,MAOE,oBAAmB,YAAa,SAAQ,YACtC,QAAO,YAEL,KAAI,YAMF,GAAE;AAhBV,MAME,SAAQ,YAGN,QAAO,YACL,KAAI,YAMF,GAAE;AAhBV,MAOE,oBAAmB,YAAa,SAAQ,YAEtC,QAAO,YACL,KAAI,YAMF,GAAE;EACA,4BAAA;;AAjBV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WACF,GAAE;AA5BV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WACF,GAAE;AA5BV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WACF,GAAE;AA5BV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WACF,GAAE;AA5BV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WAEF,GAAE;AA7BV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WAEF,GAAE;AA7BV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WAEF,GAAE;AA7BV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WAEF,GAAE;EACA,8BAAA;;AA9BV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WAKF,GAAE;AAhCV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WAKF,GAAE;AAhCV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WAKF,GAAE;AAhCV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WAKF,GAAE;AAhCV,MAuBE,SAAQ,WAEN,QAAO,WAEL,KAAI,WAMF,GAAE;AAjCV,MAwBE,oBAAmB,WAAY,SAAQ,WACrC,QAAO,WAEL,KAAI,WAMF,GAAE;AAjCV,MAuBE,SAAQ,WAGN,QAAO,WACL,KAAI,WAMF,GAAE;AAjCV,MAwBE,oBAAmB,WAAY,SAAQ,WAErC,QAAO,WACL,KAAI,WAMF,GAAE;EACA,+BAAA;;AAlCV,MAuCE,cAAc;AAvChB,MAwCE,cAAc;EACZ,6BAAA;;AAzCJ,MA2CE,SAAS,QAAO,YAAa,KAAI,YAAa;AA3ChD,MA4CE,SAAS,QAAO,YAAa,KAAI,YAAa;EAC5C,aAAA;;AA7CJ,MA+CE;AA/CF,MAgDE,oBAAoB;EAClB,SAAA;;AAjDJ,MA+CE,kBAGE,QAGE,KACE,KAAI;AAtDZ,MAgDE,oBAAoB,kBAElB,QAGE,KACE,KAAI;AAtDZ,MA+CE,kBAIE,QAEE,KACE,KAAI;AAtDZ,MAgDE,oBAAoB,kBAGlB,QAEE,KACE,KAAI;AAtDZ,MA+CE,kBAKE,QACE,KACE,KAAI;AAtDZ,MAgDE,oBAAoB,kBAIlB,QACE,KACE,KAAI;AAtDZ,MA+CE,kBAGE,QAGE,KAEE,KAAI;AAvDZ,MAgDE,oBAAoB,kBAElB,QAGE,KAEE,KAAI;AAvDZ,MA+CE,kBAIE,QAEE,KAEE,KAAI;AAvDZ,MAgDE,oBAAoB,kBAGlB,QAEE,KAEE,KAAI;AAvDZ,MA+CE,kBAKE,QACE,KAEE,KAAI;AAvDZ,MAgDE,oBAAoB,kBAIlB,QACE,KAEE,KAAI;EACF,cAAA;;AAxDV,MA+CE,kBAGE,QAGE,KAKE,KAAI;AA1DZ,MAgDE,oBAAoB,kBAElB,QAGE,KAKE,KAAI;AA1DZ,MA+CE,kBAIE,QAEE,KAKE,KAAI;AA1DZ,MAgDE,oBAAoB,kBAGlB,QAEE,KAKE,KAAI;AA1DZ,MA+CE,kBAKE,QACE,KAKE,KAAI;AA1DZ,MAgDE,oBAAoB,kBAIlB,QACE,KAKE,KAAI;AA1DZ,MA+CE,kBAGE,QAGE,KAME,KAAI;AA3DZ,MAgDE,oBAAoB,kBAElB,QAGE,KAME,KAAI;AA3DZ,MA+CE,kBAIE,QAEE,KAME,KAAI;AA3DZ,MAgDE,oBAAoB,kBAGlB,QAEE,KAME,KAAI;AA3DZ,MA+CE,kBAKE,QACE,KAME,KAAI;AA3DZ,MAgDE,oBAAoB,kBAIlB,QACE,KAME,KAAI;EACF,eAAA;;AAEF,MAfN,kBAGE,QAGE,KASG,YAAa;AAAd,MAdN,oBAAoB,kBAElB,QAGE,KASG,YAAa;AAAd,MAfN,kBAIE,QAEE,KASG,YAAa;AAAd,MAdN,oBAAoB,kBAGlB,QAEE,KASG,YAAa;AAAd,MAfN,kBAKE,QACE,KASG,YAAa;AAAd,MAdN,oBAAoB,kBAIlB,QACE,KASG,YAAa;AACd,MAhBN,kBAGE,QAGE,KAUG,YAAa;AAAd,MAfN,oBAAoB,kBAElB,QAGE,KAUG,YAAa;AAAd,MAhBN,kBAIE,QAEE,KAUG,YAAa;AAAd,MAfN,oBAAoB,kBAGlB,QAEE,KAUG,YAAa;AAAd,MAhBN,kBAKE,QACE,KAUG,YAAa;AAAd,MAfN,oBAAoB,kBAIlB,QACE,KAUG,YAAa;EACZ,aAAA;;AAEF,MAnBN,kBAGE,QAGE,KAaG,WAAY;AAAb,MAlBN,oBAAoB,kBAElB,QAGE,KAaG,WAAY;AAAb,MAnBN,kBAIE,QAEE,KAaG,WAAY;AAAb,MAlBN,oBAAoB,kBAGlB,QAEE,KAaG,WAAY;AAAb,MAnBN,kBAKE,QACE,KAaG,WAAY;AAAb,MAlBN,oBAAoB,kBAIlB,QACE,KAaG,WAAY;AACb,MApBN,kBAGE,QAGE,KAcG,WAAY;AAAb,MAnBN,oBAAoB,kBAElB,QAGE,KAcG,WAAY;AAAb,MApBN,kBAIE,QAEE,KAcG,WAAY;AAAb,MAnBN,oBAAoB,kBAGlB,QAEE,KAcG,WAAY;AAAb,MApBN,kBAKE,QACE,KAcG,WAAY;AAAb,MAnBN,oBAAoB,kBAIlB,QACE,KAcG,WAAY;EACX,gBAAA;;AApEV,MAyEE;EACE,SAAA;EACA,gBAAA;;AAMJ;EACE,kBAAA;EACA,oCAAA;E3BjDA,4BAAA;EACC,2BAAA;;A2B8CH,cAKE,YAAY;EACV,cAAA;;AAKJ;EACE,aAAA;EACA,gBAAA;EACA,eAAA;EACA,cAAA;;AAJF,YAME;EACE,cAAA;;AAKJ;EACE,kBAAA;EACA,yBAAA;EACA,6BAAA;E3BjEA,+BAAA;EACC,8BAAA;;A2B0EH;EACE,mBAAA;;AADF,YAIE;EACE,gBAAA;EACA,kBAAA;EACA,gBAAA;;AAPJ,YAIE,OAIE;EACE,eAAA;;AATN,YAaE;EACE,gBAAA;;AAdJ,YAaE,eAEE,kBAAkB;EAChB,6BAAA;;AAhBN,YAmBE;EACE,aAAA;;AApBJ,YAmBE,cAEE,kBAAkB;EAChB,gCAAA;;AAON;E3BmME,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2B7MN;E3BgME,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2B1MN;E3B6LE,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2BvMN;E3B0LE,qBAAA;;AAEA,WAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,WAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,WAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2BpMN;E3BuLE,qBAAA;;AAEA,cAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,cAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,cAAE,gBACA,kBAAkB;EAChB,4BAAA;;A2BjMN;E3BoLE,qBAAA;;AAEA,aAAE;EACA,cAAA;EACA,yBAAA;EACA,qBAAA;;AAHF,aAAE,iBAKA,kBAAkB;EAChB,yBAAA;;AAGJ,aAAE,gBACA,kBAAkB;EAChB,4BAAA;;A4B9ZN;EACE,gBAAA;EACA,aAAA;EACA,mBAAA;EACA,yBAAA;EACA,yBAAA;EACA,kBAAA;E5B8GA,uDAAA;EACQ,+CAAA;;A4BrHV,KAQE;EACE,kBAAA;EACA,iCAAA;;AAKJ;EACE,aAAA;EACA,kBAAA;;AAEF;EACE,YAAA;EACA,kBAAA;;ACtBF;EACE,YAAA;EACA,eAAA;EACA,iBAAA;EACA,cAAA;EACA,cAAA;EACA,4BAAA;E7BoRA,YAAA;EAGA,yBAAA;;A6BpRA,MAAC;AACD,MAAC;EACC,cAAA;EACA,qBAAA;EACA,eAAA;E7B6QF,YAAA;EAGA,yBAAA;;A6BzQA,MAAM;EACJ,UAAA;EACA,eAAA;EACA,uBAAA;EACA,SAAA;EACA,wBAAA;;ACpBJ;EACE,gBAAA;;AAIF;EACE,aAAA;EACA,cAAA;EACA,kBAAA;EACA,eAAA;EACA,MAAA;EACA,QAAA;EACA,SAAA;EACA,OAAA;EACA,aAAA;EACA,iCAAA;EAIA,UAAA;;AAGA,MAAC,KAAM;E9BkIP,mBAAmB,kBAAnB;EACI,eAAe,kBAAf;EACI,WAAW,kBAAX;EApBR,mDAAA;EACG,6CAAA;EACE,yCAAA;EACG,mCAAA;;A8B/GR,MAAC,GAAI;E9B8HL,mBAAmB,eAAnB;EACI,eAAe,eAAf;EACI,WAAW,eAAX;;A8B5HV;EACE,kBAAA;EACA,WAAA;EACA,YAAA;;AAIF;EACE,kBAAA;EACA,yBAAA;EACA,yBAAA;EACA,oCAAA;EACA,kBAAA;E9BsEA,gDAAA;EACQ,wCAAA;E8BrER,4BAAA;EAEA,aAAA;;AAIF;EACE,eAAA;EACA,MAAA;EACA,QAAA;EACA,SAAA;EACA,OAAA;EACA,aAAA;EACA,yBAAA;;AAEA,eAAC;E9B0ND,UAAA;EAGA,wBAAA;;A8B5NA,eAAC;E9ByND,YAAA;EAGA,yBAAA;;A8BvNF;EACE,aAAA;EACA,gCAAA;EACA,0BAAA;;AAGF,aAAc;EACZ,gBAAA;;AAIF;EACE,SAAA;EACA,wBAAA;;AAKF;EACE,kBAAA;EACA,aAAA;;AAIF;EACE,gBAAA;EACA,uBAAA;EACA,iBAAA;EACA,6BAAA;;AAJF,aAQE,KAAK;EACH,gBAAA;EACA,gBAAA;;AAVJ,aAaE,WAAW,KAAK;EACd,iBAAA;;AAdJ,aAiBE,WAAW;EACT,cAAA;;AAqBJ,QAhBmC;EAGjC;IACE,YAAA;IACA,iBAAA;;EAEF;I9BPA,iDAAA;IACQ,yCAAA;;E8BWR;IAAY,YAAA;;EACZ;IAAY,YAAA;;;ACjId;EACE,kBAAA;EACA,aAAA;EACA,cAAA;EACA,mBAAA;EACA,eAAA;EACA,gBAAA;E/BmRA,UAAA;EAGA,wBAAA;;A+BnRA,QAAC;E/BgRD,YAAA;EAGA,yBAAA;;A+BlRA,QAAC;EAAU,gBAAA;EAAmB,cAAA;;AAC9B,QAAC;EAAU,gBAAA;EAAmB,cAAA;;AAC9B,QAAC;EAAU,eAAA;EAAmB,cAAA;;AAC9B,QAAC;EAAU,iBAAA;EAAmB,cAAA;;AAIhC;EACE,gBAAA;EACA,gBAAA;EACA,cAAA;EACA,kBAAA;EACA,qBAAA;EACA,yBAAA;EACA,kBAAA;;AAIF;EACE,kBAAA;EACA,QAAA;EACA,SAAA;EACA,yBAAA;EACA,mBAAA;;AAGA,QAAC,IAAK;EACJ,SAAA;EACA,SAAA;EACA,iBAAA;EACA,uBAAA;EACA,yBAAA;;AAEF,QAAC,SAAU;EACT,SAAA;EACA,SAAA;EACA,uBAAA;EACA,yBAAA;;AAEF,QAAC,UAAW;EACV,SAAA;EACA,UAAA;EACA,uBAAA;EACA,yBAAA;;AAEF,QAAC,MAAO;EACN,QAAA;EACA,OAAA;EACA,gBAAA;EACA,2BAAA;EACA,2BAAA;;AAEF,QAAC,KAAM;EACL,QAAA;EACA,QAAA;EACA,gBAAA;EACA,2BAAA;EACA,0BAAA;;AAEF,QAAC,OAAQ;EACP,MAAA;EACA,SAAA;EACA,iBAAA;EACA,uBAAA;EACA,4BAAA;;AAEF,QAAC,YAAa;EACZ,MAAA;EACA,SAAA;EACA,uBAAA;EACA,4BAAA;;AAEF,QAAC,aAAc;EACb,MAAA;EACA,UAAA;EACA,uBAAA;EACA,4BAAA;;ACvFJ;EACE,kBAAA;EACA,MAAA;EACA,OAAA;EACA,aAAA;EACA,aAAA;EACA,gBAAA;EACA,YAAA;EACA,gBAAA;EACA,yBAAA;EACA,4BAAA;EACA,yBAAA;EACA,oCAAA;EACA,kBAAA;EhCwGA,iDAAA;EACQ,yCAAA;EgCrGR,mBAAA;;AAGA,QAAC;EAAW,iBAAA;;AACZ,QAAC;EAAW,iBAAA;;AACZ,QAAC;EAAW,gBAAA;;AACZ,QAAC;EAAW,kBAAA;;AAGd;EACE,SAAA;EACA,iBAAA;EACA,eAAA;EACA,mBAAA;EACA,iBAAA;EACA,yBAAA;EACA,gCAAA;EACA,0BAAA;;AAGF;EACE,iBAAA;;AAQA,QADO;AAEP,QAFO,OAEN;EACC,kBAAA;EACA,cAAA;EACA,QAAA;EACA,SAAA;EACA,yBAAA;EACA,mBAAA;;AAGJ,QAAS;EACP,kBAAA;;AAEF,QAAS,OAAM;EACb,kBAAA;EACA,SAAS,EAAT;;AAIA,QAAC,IAAK;EACJ,SAAA;EACA,kBAAA;EACA,sBAAA;EACA,yBAAA;EACA,qCAAA;EACA,aAAA;;AACA,QAPD,IAAK,OAOH;EACC,SAAS,GAAT;EACA,WAAA;EACA,kBAAA;EACA,sBAAA;EACA,yBAAA;;AAGJ,QAAC,MAAO;EACN,QAAA;EACA,WAAA;EACA,iBAAA;EACA,oBAAA;EACA,2BAAA;EACA,uCAAA;;AACA,QAPD,MAAO,OAOL;EACC,SAAS,GAAT;EACA,SAAA;EACA,aAAA;EACA,oBAAA;EACA,2BAAA;;AAGJ,QAAC,OAAQ;EACP,SAAA;EACA,kBAAA;EACA,mBAAA;EACA,4BAAA;EACA,wCAAA;EACA,UAAA;;AACA,QAPD,OAAQ,OAON;EACC,SAAS,GAAT;EACA,QAAA;EACA,kBAAA;EACA,mBAAA;EACA,4BAAA;;AAIJ,QAAC,KAAM;EACL,QAAA;EACA,YAAA;EACA,iBAAA;EACA,qBAAA;EACA,0BAAA;EACA,sCAAA;;AACA,QAPD,KAAM,OAOJ;EACC,SAAS,GAAT;EACA,UAAA;EACA,qBAAA;EACA,0BAAA;EACA,aAAA;;AC1HN;EACE,kBAAA;;AAGF;EACE,kBAAA;EACA,gBAAA;EACA,WAAA;;AAHF,eAKE;EACE,aAAA;EACA,kBAAA;EjC+GF,yCAAA;EACQ,iCAAA;;AiCvHV,eAKE,QAME;AAXJ,eAKE,QAOE,IAAI;EjC2WN,cAAA;EACA,eAAA;EACA,YAAA;EiC3WI,cAAA;;AAdN,eAkBE;AAlBF,eAmBE;AAnBF,eAoBE;EAAU,cAAA;;AApBZ,eAsBE;EACE,OAAA;;AAvBJ,eA0BE;AA1BF,eA2BE;EACE,kBAAA;EACA,MAAA;EACA,WAAA;;AA9BJ,eAiCE;EACE,UAAA;;AAlCJ,eAoCE;EACE,WAAA;;AArCJ,eAuCE,QAAO;AAvCT,eAwCE,QAAO;EACL,OAAA;;AAzCJ,eA4CE,UAAS;EACP,WAAA;;AA7CJ,eA+CE,UAAS;EACP,UAAA;;AAQJ;EACE,kBAAA;EACA,MAAA;EACA,OAAA;EACA,SAAA;EACA,UAAA;EjCwNA,YAAA;EAGA,yBAAA;EiCzNA,eAAA;EACA,cAAA;EACA,kBAAA;EACA,yCAAA;;AAKA,iBAAC;EjCgOC,kBAAkB,8BAA8B,mCAAyC,uCAAzF;EACA,kBAAmB,4EAAnB;EACA,2BAAA;EACA,sHAAA;;AiChOF,iBAAC;EACC,UAAA;EACA,QAAA;EjC2NA,kBAAkB,8BAA8B,sCAAyC,oCAAzF;EACA,kBAAmB,4EAAnB;EACA,2BAAA;EACA,sHAAA;;AiCzNF,iBAAC;AACD,iBAAC;EACC,aAAA;EACA,cAAA;EACA,qBAAA;EjCgMF,YAAA;EAGA,yBAAA;;AiChOF,iBAkCE;AAlCF,iBAmCE;AAnCF,iBAoCE;AApCF,iBAqCE;EACE,kBAAA;EACA,QAAA;EACA,UAAA;EACA,qBAAA;;AAzCJ,iBA2CE;AA3CF,iBA4CE;EACE,SAAA;;AA7CJ,iBA+CE;AA/CF,iBAgDE;EACE,UAAA;;AAjDJ,iBAmDE;AAnDF,iBAoDE;EACE,WAAA;EACA,YAAA;EACA,iBAAA;EACA,kBAAA;EACA,kBAAA;;AAIA,iBADF,WACG;EACC,SAAS,OAAT;;AAIF,iBADF,WACG;EACC,SAAS,OAAT;;AAUN;EACE,kBAAA;EACA,YAAA;EACA,SAAA;EACA,WAAA;EACA,UAAA;EACA,iBAAA;EACA,eAAA;EACA,gBAAA;EACA,kBAAA;;AATF,oBAWE;EACE,qBAAA;EACA,WAAA;EACA,YAAA;EACA,WAAA;EACA,mBAAA;EACA,yBAAA;EACA,mBAAA;EACA,eAAA;EAUA,yBAAA;EACA,kCAAA;;AA9BJ,oBAgCE;EACE,SAAA;EACA,WAAA;EACA,YAAA;EACA,yBAAA;;AAOJ;EACE,kBAAA;EACA,SAAA;EACA,UAAA;EACA,YAAA;EACA,WAAA;EACA,iBAAA;EACA,oBAAA;EACA,cAAA;EACA,kBAAA;EACA,yCAAA;;AACA,iBAAE;EACA,iBAAA;;AAkCJ,mBA5B8C;EAG5C,iBACE;EADF,iBAEE;EAFF,iBAGE;EAHF,iBAIE;IACE,WAAA;IACA,YAAA;IACA,iBAAA;IACA,kBAAA;IACA,eAAA;;EAKJ;IACE,SAAA;IACA,UAAA;IACA,oBAAA;;EAIF;IACE,YAAA;;;AjClNF,SAAC;AACD,SAAC;AIXH,UJUG;AIVH,UJWG;AISH,gBJVG;AIUH,gBJTG;AIkBH,IJnBG;AImBH,IJlBG;AMmWH,gBAoBE,YNxXC;AMoWH,gBAoBE,YNvXC;AWkBH,YXnBG;AWmBH,YXlBG;AW8HH,mBAWE,aX1IC;AW+HH,mBAWE,aXzIC;AaZH,IbWG;AaXH,IbYG;AcVH,OdSG;AcTH,OdUG;AcUH,cdXG;AcWH,cdVG;Ac6BH,gBd9BG;Ac8BH,gBd7BG;AkBfH,MlBcG;AkBdH,MlBeG;A2BLH,W3BIG;A2BJH,W3BKG;A8B+EH,a9BhFG;A8BgFH,a9B/EG;EACC,SAAS,GAAT;EACA,cAAA;;AAEF,SAAC;AIfH,UJeG;AIKH,gBJLG;AIcH,IJdG;AM+VH,gBAoBE,YNnXC;AWcH,YXdG;AW0HH,mBAWE,aXrIC;AahBH,IbgBG;AcdH,OdcG;AcMH,cdNG;AcyBH,gBdzBG;AkBnBH,MlBmBG;A2BTH,W3BSG;A8B2EH,a9B3EG;EACC,WAAA;;AedJ;Ef6BE,cAAA;EACA,iBAAA;EACA,kBAAA;;Ae5BF;EACE,uBAAA;;AAEF;EACE,sBAAA;;AAQF;EACE,wBAAA;;AAEF;EACE,yBAAA;;AAEF;EACE,kBAAA;;AAEF;Ef+CE,WAAA;EACA,kBAAA;EACA,iBAAA;EACA,6BAAA;EACA,SAAA;;Ae1CF;EACE,wBAAA;EACA,6BAAA;;AAOF;EACE,eAAA;;AmBnCF;EACE,mBAAA;;AlCmmBE;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkC3lBR,QAHqC;EAGrC;IlCglBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AAIR;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkCplBR,QAHqC,uBAAgC;EAGrE;IlCykBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AAIR;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkC7kBR,QAHqC,uBAAgC;EAGrE;IlCkkBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AAIR;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkCtkBR,QAHqC;EAGrC;IlC2jBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AkCzjBZ,QAHqC;ElCgkBjC;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AkC3jBR,QAHqC,uBAAgC;ElC2jBjE;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AkCtjBR,QAHqC,uBAAgC;ElCsjBjE;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AkCjjBR,QAHqC;ElCijBjC;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA;;;AAHJ;AACF,EAAE;AACF,EAAE;AACF,EAAE;EAAI,wBAAA;;AkCpiBR;EAAA;IlCyhBE,yBAAA;;EACA,KAAK;IAAK,cAAA;;EACV,EAAE;IAAQ,kBAAA;;EACV,EAAE;EACF,EAAE;IAAQ,mBAAA;;;AkCvhBZ;ElC2hBI;EACF,EAAE;EACF,EAAE;EACF,EAAE;IAAI,wBAAA","sourcesContent":["/*! normalize.css v3.0.0 | MIT License | git.io/normalize */\n\n//\n// 1. Set default font family to sans-serif.\n// 2. Prevent iOS text size adjust after orientation change, without disabling\n//    user zoom.\n//\n\nhtml {\n  font-family: sans-serif; // 1\n  -ms-text-size-adjust: 100%; // 2\n  -webkit-text-size-adjust: 100%; // 2\n}\n\n//\n// Remove default margin.\n//\n\nbody {\n  margin: 0;\n}\n\n// HTML5 display definitions\n// ==========================================================================\n\n//\n// Correct `block` display not defined in IE 8/9.\n//\n\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nnav,\nsection,\nsummary {\n  display: block;\n}\n\n//\n// 1. Correct `inline-block` display not defined in IE 8/9.\n// 2. Normalize vertical alignment of `progress` in Chrome, Firefox, and Opera.\n//\n\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block; // 1\n  vertical-align: baseline; // 2\n}\n\n//\n// Prevent modern browsers from displaying `audio` without controls.\n// Remove excess height in iOS 5 devices.\n//\n\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n\n//\n// Address `[hidden]` styling not present in IE 8/9.\n// Hide the `template` element in IE, Safari, and Firefox < 22.\n//\n\n[hidden],\ntemplate {\n  display: none;\n}\n\n// Links\n// ==========================================================================\n\n//\n// Remove the gray background color from active links in IE 10.\n//\n\na {\n  background: transparent;\n}\n\n//\n// Improve readability when focused and also mouse hovered in all browsers.\n//\n\na:active,\na:hover {\n  outline: 0;\n}\n\n// Text-level semantics\n// ==========================================================================\n\n//\n// Address styling not present in IE 8/9, Safari 5, and Chrome.\n//\n\nabbr[title] {\n  border-bottom: 1px dotted;\n}\n\n//\n// Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome.\n//\n\nb,\nstrong {\n  font-weight: bold;\n}\n\n//\n// Address styling not present in Safari 5 and Chrome.\n//\n\ndfn {\n  font-style: italic;\n}\n\n//\n// Address variable `h1` font-size and margin within `section` and `article`\n// contexts in Firefox 4+, Safari 5, and Chrome.\n//\n\nh1 {\n  font-size: 2em;\n  margin: 0.67em 0;\n}\n\n//\n// Address styling not present in IE 8/9.\n//\n\nmark {\n  background: #ff0;\n  color: #000;\n}\n\n//\n// Address inconsistent and variable font size in all browsers.\n//\n\nsmall {\n  font-size: 80%;\n}\n\n//\n// Prevent `sub` and `sup` affecting `line-height` in all browsers.\n//\n\nsub,\nsup {\n  font-size: 75%;\n  line-height: 0;\n  position: relative;\n  vertical-align: baseline;\n}\n\nsup {\n  top: -0.5em;\n}\n\nsub {\n  bottom: -0.25em;\n}\n\n// Embedded content\n// ==========================================================================\n\n//\n// Remove border when inside `a` element in IE 8/9.\n//\n\nimg {\n  border: 0;\n}\n\n//\n// Correct overflow displayed oddly in IE 9.\n//\n\nsvg:not(:root) {\n  overflow: hidden;\n}\n\n// Grouping content\n// ==========================================================================\n\n//\n// Address margin not present in IE 8/9 and Safari 5.\n//\n\nfigure {\n  margin: 1em 40px;\n}\n\n//\n// Address differences between Firefox and other browsers.\n//\n\nhr {\n  -moz-box-sizing: content-box;\n  box-sizing: content-box;\n  height: 0;\n}\n\n//\n// Contain overflow in all browsers.\n//\n\npre {\n  overflow: auto;\n}\n\n//\n// Address odd `em`-unit font size rendering in all browsers.\n//\n\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\n\n// Forms\n// ==========================================================================\n\n//\n// Known limitation: by default, Chrome and Safari on OS X allow very limited\n// styling of `select`, unless a `border` property is set.\n//\n\n//\n// 1. Correct color not being inherited.\n//    Known issue: affects color of disabled elements.\n// 2. Correct font properties not being inherited.\n// 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome.\n//\n\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  color: inherit; // 1\n  font: inherit; // 2\n  margin: 0; // 3\n}\n\n//\n// Address `overflow` set to `hidden` in IE 8/9/10.\n//\n\nbutton {\n  overflow: visible;\n}\n\n//\n// Address inconsistent `text-transform` inheritance for `button` and `select`.\n// All other form control elements do not inherit `text-transform` values.\n// Correct `button` style inheritance in Firefox, IE 8+, and Opera\n// Correct `select` style inheritance in Firefox.\n//\n\nbutton,\nselect {\n  text-transform: none;\n}\n\n//\n// 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio`\n//    and `video` controls.\n// 2. Correct inability to style clickable `input` types in iOS.\n// 3. Improve usability and consistency of cursor style between image-type\n//    `input` and others.\n//\n\nbutton,\nhtml input[type=\"button\"], // 1\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button; // 2\n  cursor: pointer; // 3\n}\n\n//\n// Re-set default cursor for disabled elements.\n//\n\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\n\n//\n// Remove inner padding and border in Firefox 4+.\n//\n\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  border: 0;\n  padding: 0;\n}\n\n//\n// Address Firefox 4+ setting `line-height` on `input` using `!important` in\n// the UA stylesheet.\n//\n\ninput {\n  line-height: normal;\n}\n\n//\n// It's recommended that you don't attempt to style these elements.\n// Firefox's implementation doesn't respect box-sizing, padding, or width.\n//\n// 1. Address box sizing set to `content-box` in IE 8/9/10.\n// 2. Remove excess padding in IE 8/9/10.\n//\n\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box; // 1\n  padding: 0; // 2\n}\n\n//\n// Fix the cursor style for Chrome's increment/decrement buttons. For certain\n// `font-size` values of the `input`, it causes the cursor style of the\n// decrement button to change from `default` to `text`.\n//\n\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\n\n//\n// 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome.\n// 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome\n//    (include `-moz` to future-proof).\n//\n\ninput[type=\"search\"] {\n  -webkit-appearance: textfield; // 1\n  -moz-box-sizing: content-box;\n  -webkit-box-sizing: content-box; // 2\n  box-sizing: content-box;\n}\n\n//\n// Remove inner padding and search cancel button in Safari and Chrome on OS X.\n// Safari (but not Chrome) clips the cancel button when the search input has\n// padding (and `textfield` appearance).\n//\n\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\n\n//\n// Define consistent border, margin, and padding.\n//\n\nfieldset {\n  border: 1px solid #c0c0c0;\n  margin: 0 2px;\n  padding: 0.35em 0.625em 0.75em;\n}\n\n//\n// 1. Correct `color` not being inherited in IE 8/9.\n// 2. Remove padding so people aren't caught out if they zero out fieldsets.\n//\n\nlegend {\n  border: 0; // 1\n  padding: 0; // 2\n}\n\n//\n// Remove default vertical scrollbar in IE 8/9.\n//\n\ntextarea {\n  overflow: auto;\n}\n\n//\n// Don't inherit the `font-weight` (applied by a rule above).\n// NOTE: the default cannot safely be changed in Chrome and Safari on OS X.\n//\n\noptgroup {\n  font-weight: bold;\n}\n\n// Tables\n// ==========================================================================\n\n//\n// Remove most spacing between table cells.\n//\n\ntable {\n  border-collapse: collapse;\n  border-spacing: 0;\n}\n\ntd,\nth {\n  padding: 0;\n}","//\n// Basic print styles\n// --------------------------------------------------\n// Source: https://github.com/h5bp/html5-boilerplate/blob/master/css/main.css\n\n@media print {\n\n  * {\n    text-shadow: none !important;\n    color: #000 !important; // Black prints faster: h5bp.com/s\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n\n  // Don't show links for images, or javascript/internal links\n  a[href^=\"javascript:\"]:after,\n  a[href^=\"#\"]:after {\n    content: \"\";\n  }\n\n  pre,\n  blockquote {\n    border: 1px solid #999;\n    page-break-inside: avoid;\n  }\n\n  thead {\n    display: table-header-group; // h5bp.com/t\n  }\n\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n\n  img {\n    max-width: 100% !important;\n  }\n\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n\n  // Chrome (OSX) fix for https://github.com/twbs/bootstrap/issues/11245\n  // Once fixed, we can just straight up remove this.\n  select {\n    background: #fff !important;\n  }\n\n  // Bootstrap components\n  .navbar {\n    display: none;\n  }\n  .table {\n    td,\n    th {\n      background-color: #fff !important;\n    }\n  }\n  .btn,\n  .dropup > .btn {\n    > .caret {\n      border-top-color: #000 !important;\n    }\n  }\n  .label {\n    border: 1px solid #000;\n  }\n\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table-bordered {\n    th,\n    td {\n      border: 1px solid #ddd !important;\n    }\n  }\n\n}\n","//\n// Scaffolding\n// --------------------------------------------------\n\n\n// Reset the box-sizing\n//\n// Heads up! This reset may cause conflicts with some third-party widgets.\n// For recommendations on resolving such conflicts, see\n// http://getbootstrap.com/getting-started/#third-box-sizing\n* {\n  .box-sizing(border-box);\n}\n*:before,\n*:after {\n  .box-sizing(border-box);\n}\n\n\n// Body reset\n\nhtml {\n  font-size: 62.5%;\n  -webkit-tap-highlight-color: rgba(0,0,0,0);\n}\n\nbody {\n  font-family: @font-family-base;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @text-color;\n  background-color: @body-bg;\n}\n\n// Reset fonts for relevant elements\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\n\n\n// Links\n\na {\n  color: @link-color;\n  text-decoration: none;\n\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: underline;\n  }\n\n  &:focus {\n    .tab-focus();\n  }\n}\n\n\n// Figures\n//\n// We reset this here because previously Normalize had no `figure` margins. This\n// ensures we don't break anyone's use of the element.\n\nfigure {\n  margin: 0;\n}\n\n\n// Images\n\nimg {\n  vertical-align: middle;\n}\n\n// Responsive images (ensure images don't scale beyond their parents)\n.img-responsive {\n  .img-responsive();\n}\n\n// Rounded corners\n.img-rounded {\n  border-radius: @border-radius-large;\n}\n\n// Image thumbnails\n//\n// Heads up! This is mixin-ed into thumbnails.less for `.thumbnail`.\n.img-thumbnail {\n  padding: @thumbnail-padding;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(all .2s ease-in-out);\n\n  // Keep them at most 100% wide\n  .img-responsive(inline-block);\n}\n\n// Perfect circle\n.img-circle {\n  border-radius: 50%; // set radius in percents\n}\n\n\n// Horizontal rules\n\nhr {\n  margin-top:    @line-height-computed;\n  margin-bottom: @line-height-computed;\n  border: 0;\n  border-top: 1px solid @hr-border;\n}\n\n\n// Only display content to screen readers\n//\n// See: http://a11yproject.com/posts/how-to-hide-content/\n\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  margin: -1px;\n  padding: 0;\n  overflow: hidden;\n  clip: rect(0,0,0,0);\n  border: 0;\n}\n","//\n// Mixins\n// --------------------------------------------------\n\n\n// Utilities\n// -------------------------\n\n// Clearfix\n// Source: http://nicolasgallagher.com/micro-clearfix-hack/\n//\n// For modern browsers\n// 1. The space content is one way to avoid an Opera bug when the\n//    contenteditable attribute is included anywhere else in the document.\n//    Otherwise it causes space to appear at the top and bottom of elements\n//    that are clearfixed.\n// 2. The use of `table` rather than `block` is only necessary if using\n//    `:before` to contain the top-margins of child elements.\n.clearfix() {\n  &:before,\n  &:after {\n    content: \" \"; // 1\n    display: table; // 2\n  }\n  &:after {\n    clear: both;\n  }\n}\n\n// WebKit-style focus\n.tab-focus() {\n  // Default\n  outline: thin dotted;\n  // WebKit\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n\n// Center-align a block level element\n.center-block() {\n  display: block;\n  margin-left: auto;\n  margin-right: auto;\n}\n\n// Sizing shortcuts\n.size(@width; @height) {\n  width: @width;\n  height: @height;\n}\n.square(@size) {\n  .size(@size; @size);\n}\n\n// Placeholder text\n.placeholder(@color: @input-color-placeholder) {\n  &:-moz-placeholder            { color: @color; } // Firefox 4-18\n  &::-moz-placeholder           { color: @color;   // Firefox 19+\n                                  opacity: 1; } // See https://github.com/twbs/bootstrap/pull/11526\n  &:-ms-input-placeholder       { color: @color; } // Internet Explorer 10+\n  &::-webkit-input-placeholder  { color: @color; } // Safari and Chrome\n}\n\n// Text overflow\n// Requires inline-block or block for proper styling\n.text-overflow() {\n  overflow: hidden;\n  text-overflow: ellipsis;\n  white-space: nowrap;\n}\n\n// CSS image replacement\n//\n// Heads up! v3 launched with with only `.hide-text()`, but per our pattern for\n// mixins being reused as classes with the same name, this doesn't hold up. As\n// of v3.0.1 we have added `.text-hide()` and deprecated `.hide-text()`. Note\n// that we cannot chain the mixins together in Less, so they are repeated.\n//\n// Source: https://github.com/h5bp/html5-boilerplate/commit/aa0396eae757\n\n// Deprecated as of v3.0.1 (will be removed in v4)\n.hide-text() {\n  font: ~\"0/0\" a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n// New mixin to use as of v3.0.1\n.text-hide() {\n  .hide-text();\n}\n\n\n\n// CSS3 PROPERTIES\n// --------------------------------------------------\n\n// Single side border-radius\n.border-top-radius(@radius) {\n  border-top-right-radius: @radius;\n   border-top-left-radius: @radius;\n}\n.border-right-radius(@radius) {\n  border-bottom-right-radius: @radius;\n     border-top-right-radius: @radius;\n}\n.border-bottom-radius(@radius) {\n  border-bottom-right-radius: @radius;\n   border-bottom-left-radius: @radius;\n}\n.border-left-radius(@radius) {\n  border-bottom-left-radius: @radius;\n     border-top-left-radius: @radius;\n}\n\n// Drop shadows\n//\n// Note: Deprecated `.box-shadow()` as of v3.1.0 since all of Bootstrap's\n//   supported browsers that have box shadow capabilities now support the\n//   standard `box-shadow` property.\n.box-shadow(@shadow) {\n  -webkit-box-shadow: @shadow; // iOS <4.3 & Android <4.1\n          box-shadow: @shadow;\n}\n\n// Transitions\n.transition(@transition) {\n  -webkit-transition: @transition;\n          transition: @transition;\n}\n.transition-property(@transition-property) {\n  -webkit-transition-property: @transition-property;\n          transition-property: @transition-property;\n}\n.transition-delay(@transition-delay) {\n  -webkit-transition-delay: @transition-delay;\n          transition-delay: @transition-delay;\n}\n.transition-duration(@transition-duration) {\n  -webkit-transition-duration: @transition-duration;\n          transition-duration: @transition-duration;\n}\n.transition-transform(@transition) {\n  -webkit-transition: -webkit-transform @transition;\n     -moz-transition: -moz-transform @transition;\n       -o-transition: -o-transform @transition;\n          transition: transform @transition;\n}\n\n// Transformations\n.rotate(@degrees) {\n  -webkit-transform: rotate(@degrees);\n      -ms-transform: rotate(@degrees); // IE9 only\n          transform: rotate(@degrees);\n}\n.scale(@ratio; @ratio-y...) {\n  -webkit-transform: scale(@ratio, @ratio-y);\n      -ms-transform: scale(@ratio, @ratio-y); // IE9 only\n          transform: scale(@ratio, @ratio-y);\n}\n.translate(@x; @y) {\n  -webkit-transform: translate(@x, @y);\n      -ms-transform: translate(@x, @y); // IE9 only\n          transform: translate(@x, @y);\n}\n.skew(@x; @y) {\n  -webkit-transform: skew(@x, @y);\n      -ms-transform: skewX(@x) skewY(@y); // See https://github.com/twbs/bootstrap/issues/4885; IE9+\n          transform: skew(@x, @y);\n}\n.translate3d(@x; @y; @z) {\n  -webkit-transform: translate3d(@x, @y, @z);\n          transform: translate3d(@x, @y, @z);\n}\n\n.rotateX(@degrees) {\n  -webkit-transform: rotateX(@degrees);\n      -ms-transform: rotateX(@degrees); // IE9 only\n          transform: rotateX(@degrees);\n}\n.rotateY(@degrees) {\n  -webkit-transform: rotateY(@degrees);\n      -ms-transform: rotateY(@degrees); // IE9 only\n          transform: rotateY(@degrees);\n}\n.perspective(@perspective) {\n  -webkit-perspective: @perspective;\n     -moz-perspective: @perspective;\n          perspective: @perspective;\n}\n.perspective-origin(@perspective) {\n  -webkit-perspective-origin: @perspective;\n     -moz-perspective-origin: @perspective;\n          perspective-origin: @perspective;\n}\n.transform-origin(@origin) {\n  -webkit-transform-origin: @origin;\n     -moz-transform-origin: @origin;\n      -ms-transform-origin: @origin; // IE9 only\n          transform-origin: @origin;\n}\n\n// Animations\n.animation(@animation) {\n  -webkit-animation: @animation;\n          animation: @animation;\n}\n.animation-name(@name) {\n  -webkit-animation-name: @name;\n          animation-name: @name;\n}\n.animation-duration(@duration) {\n  -webkit-animation-duration: @duration;\n          animation-duration: @duration;\n}\n.animation-timing-function(@timing-function) {\n  -webkit-animation-timing-function: @timing-function;\n          animation-timing-function: @timing-function;\n}\n.animation-delay(@delay) {\n  -webkit-animation-delay: @delay;\n          animation-delay: @delay;\n}\n.animation-iteration-count(@iteration-count) {\n  -webkit-animation-iteration-count: @iteration-count;\n          animation-iteration-count: @iteration-count;\n}\n.animation-direction(@direction) {\n  -webkit-animation-direction: @direction;\n          animation-direction: @direction;\n}\n\n// Backface visibility\n// Prevent browsers from flickering when using CSS 3D transforms.\n// Default value is `visible`, but can be changed to `hidden`\n.backface-visibility(@visibility){\n  -webkit-backface-visibility: @visibility;\n     -moz-backface-visibility: @visibility;\n          backface-visibility: @visibility;\n}\n\n// Box sizing\n.box-sizing(@boxmodel) {\n  -webkit-box-sizing: @boxmodel;\n     -moz-box-sizing: @boxmodel;\n          box-sizing: @boxmodel;\n}\n\n// User select\n// For selecting text on the page\n.user-select(@select) {\n  -webkit-user-select: @select;\n     -moz-user-select: @select;\n      -ms-user-select: @select; // IE10+\n       -o-user-select: @select;\n          user-select: @select;\n}\n\n// Resize anything\n.resizable(@direction) {\n  resize: @direction; // Options: horizontal, vertical, both\n  overflow: auto; // Safari fix\n}\n\n// CSS3 Content Columns\n.content-columns(@column-count; @column-gap: @grid-gutter-width) {\n  -webkit-column-count: @column-count;\n     -moz-column-count: @column-count;\n          column-count: @column-count;\n  -webkit-column-gap: @column-gap;\n     -moz-column-gap: @column-gap;\n          column-gap: @column-gap;\n}\n\n// Optional hyphenation\n.hyphens(@mode: auto) {\n  word-wrap: break-word;\n  -webkit-hyphens: @mode;\n     -moz-hyphens: @mode;\n      -ms-hyphens: @mode; // IE10+\n       -o-hyphens: @mode;\n          hyphens: @mode;\n}\n\n// Opacity\n.opacity(@opacity) {\n  opacity: @opacity;\n  // IE8 filter\n  @opacity-ie: (@opacity * 100);\n  filter: ~\"alpha(opacity=@{opacity-ie})\";\n}\n\n\n\n// GRADIENTS\n// --------------------------------------------------\n\n#gradient {\n\n  // Horizontal gradient, from left to right\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .horizontal(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(left, color-stop(@start-color @start-percent), color-stop(@end-color @end-percent)); // Safari 5.1-6, Chrome 10+\n    background-image:  linear-gradient(to right, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  // Vertical gradient, from top to bottom\n  //\n  // Creates two color stops, start and end, by specifying a color and position for each color stop.\n  // Color stops are not available in IE9 and below.\n  .vertical(@start-color: #555; @end-color: #333; @start-percent: 0%; @end-percent: 100%) {\n    background-image: -webkit-linear-gradient(top, @start-color @start-percent, @end-color @end-percent);  // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(to bottom, @start-color @start-percent, @end-color @end-percent); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n    background-repeat: repeat-x;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down\n  }\n\n  .directional(@start-color: #555; @end-color: #333; @deg: 45deg) {\n    background-repeat: repeat-x;\n    background-image: -webkit-linear-gradient(@deg, @start-color, @end-color); // Safari 5.1-6, Chrome 10+\n    background-image: linear-gradient(@deg, @start-color, @end-color); // Standard, IE10, Firefox 16+, Opera 12.10+, Safari 7+, Chrome 26+\n  }\n  .horizontal-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(left, @start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(to right, @start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=1)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .vertical-three-colors(@start-color: #00b3ee; @mid-color: #7a43b6; @color-stop: 50%; @end-color: #c3325f) {\n    background-image: -webkit-linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-image: linear-gradient(@start-color, @mid-color @color-stop, @end-color);\n    background-repeat: no-repeat;\n    filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(startColorstr='%d', endColorstr='%d', GradientType=0)\",argb(@start-color),argb(@end-color))); // IE9 and down, gets no color-stop at all for proper fallback\n  }\n  .radial(@inner-color: #555; @outer-color: #333) {\n    background-image: -webkit-radial-gradient(circle, @inner-color, @outer-color);\n    background-image: radial-gradient(circle, @inner-color, @outer-color);\n    background-repeat: no-repeat;\n  }\n  .striped(@color: rgba(255,255,255,.15); @angle: 45deg) {\n    background-image: -webkit-linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n    background-image: linear-gradient(@angle, @color 25%, transparent 25%, transparent 50%, @color 50%, @color 75%, transparent 75%, transparent);\n  }\n}\n\n// Reset filters for IE\n//\n// When you need to remove a gradient background, do not forget to use this to reset\n// the IE filter for IE9 and below.\n.reset-filter() {\n  filter: e(%(\"progid:DXImageTransform.Microsoft.gradient(enabled = false)\"));\n}\n\n\n\n// Retina images\n//\n// Short retina mixin for setting background-image and -size\n\n.img-retina(@file-1x; @file-2x; @width-1x; @height-1x) {\n  background-image: url(\"@{file-1x}\");\n\n  @media\n  only screen and (-webkit-min-device-pixel-ratio: 2),\n  only screen and (   min--moz-device-pixel-ratio: 2),\n  only screen and (     -o-min-device-pixel-ratio: 2/1),\n  only screen and (        min-device-pixel-ratio: 2),\n  only screen and (                min-resolution: 192dpi),\n  only screen and (                min-resolution: 2dppx) {\n    background-image: url(\"@{file-2x}\");\n    background-size: @width-1x @height-1x;\n  }\n}\n\n\n// Responsive image\n//\n// Keep images from scaling beyond the width of their parents.\n\n.img-responsive(@display: block) {\n  display: @display;\n  max-width: 100%; // Part 1: Set a maximum relative to the parent\n  height: auto; // Part 2: Scale the height according to the width, otherwise you get stretching\n}\n\n\n// COMPONENT MIXINS\n// --------------------------------------------------\n\n// Horizontal dividers\n// -------------------------\n// Dividers (basically an hr) within dropdowns and nav lists\n.nav-divider(@color: #e5e5e5) {\n  height: 1px;\n  margin: ((@line-height-computed / 2) - 1) 0;\n  overflow: hidden;\n  background-color: @color;\n}\n\n// Panels\n// -------------------------\n.panel-variant(@border; @heading-text-color; @heading-bg-color; @heading-border) {\n  border-color: @border;\n\n  & > .panel-heading {\n    color: @heading-text-color;\n    background-color: @heading-bg-color;\n    border-color: @heading-border;\n\n    + .panel-collapse .panel-body {\n      border-top-color: @border;\n    }\n  }\n  & > .panel-footer {\n    + .panel-collapse .panel-body {\n      border-bottom-color: @border;\n    }\n  }\n}\n\n// Alerts\n// -------------------------\n.alert-variant(@background; @border; @text-color) {\n  background-color: @background;\n  border-color: @border;\n  color: @text-color;\n\n  hr {\n    border-top-color: darken(@border, 5%);\n  }\n  .alert-link {\n    color: darken(@text-color, 10%);\n  }\n}\n\n// Tables\n// -------------------------\n.table-row-variant(@state; @background) {\n  // Exact selectors below required to override `.table-striped` and prevent\n  // inheritance to nested tables.\n  .table > thead > tr,\n  .table > tbody > tr,\n  .table > tfoot > tr {\n    > td.@{state},\n    > th.@{state},\n    &.@{state} > td,\n    &.@{state} > th {\n      background-color: @background;\n    }\n  }\n\n  // Hover states for `.table-hover`\n  // Note: this is not available for cells or rows within `thead` or `tfoot`.\n  .table-hover > tbody > tr {\n    > td.@{state}:hover,\n    > th.@{state}:hover,\n    &.@{state}:hover > td,\n    &.@{state}:hover > th {\n      background-color: darken(@background, 5%);\n    }\n  }\n}\n\n// List Groups\n// -------------------------\n.list-group-item-variant(@state; @background; @color) {\n  .list-group-item-@{state} {\n    color: @color;\n    background-color: @background;\n\n    a& {\n      color: @color;\n\n      .list-group-item-heading { color: inherit; }\n\n      &:hover,\n      &:focus {\n        color: @color;\n        background-color: darken(@background, 5%);\n      }\n      &.active,\n      &.active:hover,\n      &.active:focus {\n        color: #fff;\n        background-color: @color;\n        border-color: @color;\n      }\n    }\n  }\n}\n\n// Button variants\n// -------------------------\n// Easily pump out default styles, as well as :hover, :focus, :active,\n// and disabled options for all buttons\n.button-variant(@color; @background; @border) {\n  color: @color;\n  background-color: @background;\n  border-color: @border;\n\n  &:hover,\n  &:focus,\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    color: @color;\n    background-color: darken(@background, 8%);\n        border-color: darken(@border, 12%);\n  }\n  &:active,\n  &.active,\n  .open .dropdown-toggle& {\n    background-image: none;\n  }\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    &,\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      background-color: @background;\n          border-color: @border;\n    }\n  }\n\n  .badge {\n    color: @background;\n    background-color: @color;\n  }\n}\n\n// Button sizes\n// -------------------------\n.button-size(@padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n}\n\n// Pagination\n// -------------------------\n.pagination-size(@padding-vertical; @padding-horizontal; @font-size; @border-radius) {\n  > li {\n    > a,\n    > span {\n      padding: @padding-vertical @padding-horizontal;\n      font-size: @font-size;\n    }\n    &:first-child {\n      > a,\n      > span {\n        .border-left-radius(@border-radius);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius);\n      }\n    }\n  }\n}\n\n// Labels\n// -------------------------\n.label-variant(@color) {\n  background-color: @color;\n  &[href] {\n    &:hover,\n    &:focus {\n      background-color: darken(@color, 10%);\n    }\n  }\n}\n\n// Contextual backgrounds\n// -------------------------\n.bg-variant(@color) {\n  background-color: @color;\n  a&:hover {\n    background-color: darken(@color, 10%);\n  }\n}\n\n// Typography\n// -------------------------\n.text-emphasis-variant(@color) {\n  color: @color;\n  a&:hover {\n    color: darken(@color, 10%);\n  }\n}\n\n// Navbar vertical align\n// -------------------------\n// Vertically center elements in the navbar.\n// Example: an element has a height of 30px, so write out `.navbar-vertical-align(30px);` to calculate the appropriate top margin.\n.navbar-vertical-align(@element-height) {\n  margin-top: ((@navbar-height - @element-height) / 2);\n  margin-bottom: ((@navbar-height - @element-height) / 2);\n}\n\n// Progress bars\n// -------------------------\n.progress-bar-variant(@color) {\n  background-color: @color;\n  .progress-striped & {\n    #gradient > .striped();\n  }\n}\n\n// Responsive utilities\n// -------------------------\n// More easily include all the states for responsive-utilities.less.\n.responsive-visibility() {\n  display: block !important;\n  table&  { display: table; }\n  tr&     { display: table-row !important; }\n  th&,\n  td&     { display: table-cell !important; }\n}\n\n.responsive-invisibility() {\n    &,\n  tr&,\n  th&,\n  td& { display: none !important; }\n}\n\n\n// Grid System\n// -----------\n\n// Centered container element\n.container-fixed() {\n  margin-right: auto;\n  margin-left: auto;\n  padding-left:  (@grid-gutter-width / 2);\n  padding-right: (@grid-gutter-width / 2);\n  &:extend(.clearfix all);\n}\n\n// Creates a wrapper for a series of columns\n.make-row(@gutter: @grid-gutter-width) {\n  margin-left:  (@gutter / -2);\n  margin-right: (@gutter / -2);\n  &:extend(.clearfix all);\n}\n\n// Generate the extra small columns\n.make-xs-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  float: left;\n  width: percentage((@columns / @grid-columns));\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n}\n.make-xs-column-offset(@columns) {\n  @media (min-width: @screen-xs-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-push(@columns) {\n  @media (min-width: @screen-xs-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-xs-column-pull(@columns) {\n  @media (min-width: @screen-xs-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the small columns\n.make-sm-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-sm-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-offset(@columns) {\n  @media (min-width: @screen-sm-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-push(@columns) {\n  @media (min-width: @screen-sm-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-sm-column-pull(@columns) {\n  @media (min-width: @screen-sm-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the medium columns\n.make-md-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-md-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-offset(@columns) {\n  @media (min-width: @screen-md-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-push(@columns) {\n  @media (min-width: @screen-md-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-md-column-pull(@columns) {\n  @media (min-width: @screen-md-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Generate the large columns\n.make-lg-column(@columns; @gutter: @grid-gutter-width) {\n  position: relative;\n  min-height: 1px;\n  padding-left:  (@gutter / 2);\n  padding-right: (@gutter / 2);\n\n  @media (min-width: @screen-lg-min) {\n    float: left;\n    width: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-offset(@columns) {\n  @media (min-width: @screen-lg-min) {\n    margin-left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-push(@columns) {\n  @media (min-width: @screen-lg-min) {\n    left: percentage((@columns / @grid-columns));\n  }\n}\n.make-lg-column-pull(@columns) {\n  @media (min-width: @screen-lg-min) {\n    right: percentage((@columns / @grid-columns));\n  }\n}\n\n\n// Framework grid generation\n//\n// Used only by Bootstrap to generate the correct number of grid classes given\n// any value of `@grid-columns`.\n\n.make-grid-columns() {\n  // Common styles for all sizes of grid columns, widths 1-12\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general; \"=<\" isn't a typo\n    @item: ~\".col-xs-@{index}, .col-sm-@{index}, .col-md-@{index}, .col-lg-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      position: relative;\n      // Prevent columns from collapsing when empty\n      min-height: 1px;\n      // Inner gutter via padding\n      padding-left:  (@grid-gutter-width / 2);\n      padding-right: (@grid-gutter-width / 2);\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.make-grid-columns-float(@class) {\n  .col(@index) when (@index = 1) { // initial\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), @item);\n  }\n  .col(@index, @list) when (@index =< @grid-columns) { // general\n    @item: ~\".col-@{class}-@{index}\";\n    .col((@index + 1), ~\"@{list}, @{item}\");\n  }\n  .col(@index, @list) when (@index > @grid-columns) { // terminal\n    @{list} {\n      float: left;\n    }\n  }\n  .col(1); // kickstart it\n}\n\n.calc-grid(@index, @class, @type) when (@type = width) and (@index > 0) {\n  .col-@{class}-@{index} {\n    width: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = push) {\n  .col-@{class}-push-@{index} {\n    left: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = pull) {\n  .col-@{class}-pull-@{index} {\n    right: percentage((@index / @grid-columns));\n  }\n}\n.calc-grid(@index, @class, @type) when (@type = offset) {\n  .col-@{class}-offset-@{index} {\n    margin-left: percentage((@index / @grid-columns));\n  }\n}\n\n// Basic looping in LESS\n.make-grid(@index, @class, @type) when (@index >= 0) {\n  .calc-grid(@index, @class, @type);\n  // next iteration\n  .make-grid((@index - 1), @class, @type);\n}\n\n\n// Form validation states\n//\n// Used in forms.less to generate the form validation CSS for warnings, errors,\n// and successes.\n\n.form-control-validation(@text-color: #555; @border-color: #ccc; @background-color: #f5f5f5) {\n  // Color the label and help text\n  .help-block,\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline  {\n    color: @text-color;\n  }\n  // Set the border and box shadow on specific inputs to match\n  .form-control {\n    border-color: @border-color;\n    .box-shadow(inset 0 1px 1px rgba(0,0,0,.075)); // Redeclare so transitions work\n    &:focus {\n      border-color: darken(@border-color, 10%);\n      @shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 6px lighten(@border-color, 20%);\n      .box-shadow(@shadow);\n    }\n  }\n  // Set validation states also for addons\n  .input-group-addon {\n    color: @text-color;\n    border-color: @border-color;\n    background-color: @background-color;\n  }\n  // Optional feedback icon\n  .form-control-feedback {\n    color: @text-color;\n  }\n}\n\n// Form control focus state\n//\n// Generate a customized focus state and for any input with the specified color,\n// which defaults to the `@input-focus-border` variable.\n//\n// We highly encourage you to not customize the default value, but instead use\n// this to tweak colors on an as-needed basis. This aesthetic change is based on\n// WebKit's default styles, but applicable to a wider range of browsers. Its\n// usability and accessibility should be taken into account with any change.\n//\n// Example usage: change the default blue border and shadow to white for better\n// contrast against a dark gray background.\n\n.form-control-focus(@color: @input-border-focus) {\n  @color-rgba: rgba(red(@color), green(@color), blue(@color), .6);\n  &:focus {\n    border-color: @color;\n    outline: 0;\n    .box-shadow(~\"inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px @{color-rgba}\");\n  }\n}\n\n// Form control sizing\n//\n// Relative text size, padding, and border-radii changes for form controls. For\n// horizontal sizing, wrap controls in the predefined grid classes. `<select>`\n// element gets special love because it's special, and that's a fact!\n\n.input-size(@input-height; @padding-vertical; @padding-horizontal; @font-size; @line-height; @border-radius) {\n  height: @input-height;\n  padding: @padding-vertical @padding-horizontal;\n  font-size: @font-size;\n  line-height: @line-height;\n  border-radius: @border-radius;\n\n  select& {\n    height: @input-height;\n    line-height: @input-height;\n  }\n\n  textarea&,\n  select[multiple]& {\n    height: auto;\n  }\n}\n","//\n// Variables\n// --------------------------------------------------\n\n\n//== Colors\n//\n//## Gray and brand colors for use across Bootstrap.\n\n@gray-darker:            lighten(#000, 13.5%); // #222\n@gray-dark:              lighten(#000, 20%);   // #333\n@gray:                   lighten(#000, 33.5%); // #555\n@gray-light:             lighten(#000, 60%);   // #999\n@gray-lighter:           lighten(#000, 93.5%); // #eee\n\n@brand-primary:         #428bca;\n@brand-success:         #5cb85c;\n@brand-info:            #5bc0de;\n@brand-warning:         #f0ad4e;\n@brand-danger:          #d9534f;\n\n\n//== Scaffolding\n//\n// ## Settings for some of the most global styles.\n\n//** Background color for `<body>`.\n@body-bg:               #fff;\n//** Global text color on `<body>`.\n@text-color:            @gray-dark;\n\n//** Global textual link color.\n@link-color:            @brand-primary;\n//** Link hover color set via `darken()` function.\n@link-hover-color:      darken(@link-color, 15%);\n\n\n//== Typography\n//\n//## Font, line-height, and color for body text, headings, and more.\n\n@font-family-sans-serif:  \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n@font-family-serif:       Georgia, \"Times New Roman\", Times, serif;\n//** Default monospace fonts for `<code>`, `<kbd>`, and `<pre>`.\n@font-family-monospace:   Menlo, Monaco, Consolas, \"Courier New\", monospace;\n@font-family-base:        @font-family-sans-serif;\n\n@font-size-base:          14px;\n@font-size-large:         ceil((@font-size-base * 1.25)); // ~18px\n@font-size-small:         ceil((@font-size-base * 0.85)); // ~12px\n\n@font-size-h1:            floor((@font-size-base * 2.6)); // ~36px\n@font-size-h2:            floor((@font-size-base * 2.15)); // ~30px\n@font-size-h3:            ceil((@font-size-base * 1.7)); // ~24px\n@font-size-h4:            ceil((@font-size-base * 1.25)); // ~18px\n@font-size-h5:            @font-size-base;\n@font-size-h6:            ceil((@font-size-base * 0.85)); // ~12px\n\n//** Unit-less `line-height` for use in components like buttons.\n@line-height-base:        1.428571429; // 20/14\n//** Computed \"line-height\" (`font-size` * `line-height`) for use with `margin`, `padding`, etc.\n@line-height-computed:    floor((@font-size-base * @line-height-base)); // ~20px\n\n//** By default, this inherits from the `<body>`.\n@headings-font-family:    inherit;\n@headings-font-weight:    500;\n@headings-line-height:    1.1;\n@headings-color:          inherit;\n\n\n//-- Iconography\n//\n//## Specify custom locations of the include Glyphicons icon font. Useful for those including Bootstrap via Bower.\n\n@icon-font-path:          \"../fonts/\";\n@icon-font-name:          \"glyphicons-halflings-regular\";\n@icon-font-svg-id:\t\t\t\t\"glyphicons_halflingsregular\";\n\n//== Components\n//\n//## Define common padding and border radius sizes and more. Values based on 14px text and 1.428 line-height (~20px to start).\n\n@padding-base-vertical:     6px;\n@padding-base-horizontal:   12px;\n\n@padding-large-vertical:    10px;\n@padding-large-horizontal:  16px;\n\n@padding-small-vertical:    5px;\n@padding-small-horizontal:  10px;\n\n@padding-xs-vertical:       1px;\n@padding-xs-horizontal:     5px;\n\n@line-height-large:         1.33;\n@line-height-small:         1.5;\n\n@border-radius-base:        4px;\n@border-radius-large:       6px;\n@border-radius-small:       3px;\n\n//** Global color for active items (e.g., navs or dropdowns).\n@component-active-color:    #fff;\n//** Global background color for active items (e.g., navs or dropdowns).\n@component-active-bg:       @brand-primary;\n\n//** Width of the `border` for generating carets that indicator dropdowns.\n@caret-width-base:          4px;\n//** Carets increase slightly in size for larger components.\n@caret-width-large:         5px;\n\n\n//== Tables\n//\n//## Customizes the `.table` component with basic values, each used across all table variations.\n\n//** Padding for `<th>`s and `<td>`s.\n@table-cell-padding:            8px;\n//** Padding for cells in `.table-condensed`.\n@table-condensed-cell-padding:  5px;\n\n//** Default background color used for all tables.\n@table-bg:                      transparent;\n//** Background color used for `.table-striped`.\n@table-bg-accent:               #f9f9f9;\n//** Background color used for `.table-hover`.\n@table-bg-hover:                #f5f5f5;\n@table-bg-active:               @table-bg-hover;\n\n//** Border color for table and cell borders.\n@table-border-color:            #ddd;\n\n\n//== Buttons\n//\n//## For each of Bootstrap's buttons, define text, background and border color.\n\n@btn-font-weight:                normal;\n\n@btn-default-color:              #333;\n@btn-default-bg:                 #fff;\n@btn-default-border:             #ccc;\n\n@btn-primary-color:              #fff;\n@btn-primary-bg:                 @brand-primary;\n@btn-primary-border:             darken(@btn-primary-bg, 5%);\n\n@btn-success-color:              #fff;\n@btn-success-bg:                 @brand-success;\n@btn-success-border:             darken(@btn-success-bg, 5%);\n\n@btn-info-color:                 #fff;\n@btn-info-bg:                    @brand-info;\n@btn-info-border:                darken(@btn-info-bg, 5%);\n\n@btn-warning-color:              #fff;\n@btn-warning-bg:                 @brand-warning;\n@btn-warning-border:             darken(@btn-warning-bg, 5%);\n\n@btn-danger-color:               #fff;\n@btn-danger-bg:                  @brand-danger;\n@btn-danger-border:              darken(@btn-danger-bg, 5%);\n\n@btn-link-disabled-color:        @gray-light;\n\n\n//== Forms\n//\n//##\n\n//** `<input>` background color\n@input-bg:                       #fff;\n//** `<input disabled>` background color\n@input-bg-disabled:              @gray-lighter;\n\n//** Text color for `<input>`s\n@input-color:                    @gray;\n//** `<input>` border color\n@input-border:                   #ccc;\n//** `<input>` border radius\n@input-border-radius:            @border-radius-base;\n//** Border color for inputs on focus\n@input-border-focus:             #66afe9;\n\n//** Placeholder text color\n@input-color-placeholder:        @gray-light;\n\n//** Default `.form-control` height\n@input-height-base:              (@line-height-computed + (@padding-base-vertical * 2) + 2);\n//** Large `.form-control` height\n@input-height-large:             (ceil(@font-size-large * @line-height-large) + (@padding-large-vertical * 2) + 2);\n//** Small `.form-control` height\n@input-height-small:             (floor(@font-size-small * @line-height-small) + (@padding-small-vertical * 2) + 2);\n\n@legend-color:                   @gray-dark;\n@legend-border-color:            #e5e5e5;\n\n//** Background color for textual input addons\n@input-group-addon-bg:           @gray-lighter;\n//** Border color for textual input addons\n@input-group-addon-border-color: @input-border;\n\n\n//== Dropdowns\n//\n//## Dropdown menu container and contents.\n\n//** Background for the dropdown menu.\n@dropdown-bg:                    #fff;\n//** Dropdown menu `border-color`.\n@dropdown-border:                rgba(0,0,0,.15);\n//** Dropdown menu `border-color` **for IE8**.\n@dropdown-fallback-border:       #ccc;\n//** Divider color for between dropdown items.\n@dropdown-divider-bg:            #e5e5e5;\n\n//** Dropdown link text color.\n@dropdown-link-color:            @gray-dark;\n//** Hover color for dropdown links.\n@dropdown-link-hover-color:      darken(@gray-dark, 5%);\n//** Hover background for dropdown links.\n@dropdown-link-hover-bg:         #f5f5f5;\n\n//** Active dropdown menu item text color.\n@dropdown-link-active-color:     @component-active-color;\n//** Active dropdown menu item background color.\n@dropdown-link-active-bg:        @component-active-bg;\n\n//** Disabled dropdown menu item background color.\n@dropdown-link-disabled-color:   @gray-light;\n\n//** Text color for headers within dropdown menus.\n@dropdown-header-color:          @gray-light;\n\n// Note: Deprecated @dropdown-caret-color as of v3.1.0\n@dropdown-caret-color:           #000;\n\n\n//-- Z-index master list\n//\n// Warning: Avoid customizing these values. They're used for a bird's eye view\n// of components dependent on the z-axis and are designed to all work together.\n//\n// Note: These variables are not generated into the Customizer.\n\n@zindex-navbar:            1000;\n@zindex-dropdown:          1000;\n@zindex-popover:           1010;\n@zindex-tooltip:           1030;\n@zindex-navbar-fixed:      1030;\n@zindex-modal-background:  1040;\n@zindex-modal:             1050;\n\n\n//== Media queries breakpoints\n//\n//## Define the breakpoints at which your layout will change, adapting to different screen sizes.\n\n// Extra small screen / phone\n// Note: Deprecated @screen-xs and @screen-phone as of v3.0.1\n@screen-xs:                  480px;\n@screen-xs-min:              @screen-xs;\n@screen-phone:               @screen-xs-min;\n\n// Small screen / tablet\n// Note: Deprecated @screen-sm and @screen-tablet as of v3.0.1\n@screen-sm:                  768px;\n@screen-sm-min:              @screen-sm;\n@screen-tablet:              @screen-sm-min;\n\n// Medium screen / desktop\n// Note: Deprecated @screen-md and @screen-desktop as of v3.0.1\n@screen-md:                  992px;\n@screen-md-min:              @screen-md;\n@screen-desktop:             @screen-md-min;\n\n// Large screen / wide desktop\n// Note: Deprecated @screen-lg and @screen-lg-desktop as of v3.0.1\n@screen-lg:                  1200px;\n@screen-lg-min:              @screen-lg;\n@screen-lg-desktop:          @screen-lg-min;\n\n// So media queries don't overlap when required, provide a maximum\n@screen-xs-max:              (@screen-sm-min - 1);\n@screen-sm-max:              (@screen-md-min - 1);\n@screen-md-max:              (@screen-lg-min - 1);\n\n\n//== Grid system\n//\n//## Define your custom responsive grid.\n\n//** Number of columns in the grid.\n@grid-columns:              12;\n//** Padding between columns. Gets divided in half for the left and right.\n@grid-gutter-width:         30px;\n// Navbar collapse\n//** Point at which the navbar becomes uncollapsed.\n@grid-float-breakpoint:     @screen-sm-min;\n//** Point at which the navbar begins collapsing.\n@grid-float-breakpoint-max: (@grid-float-breakpoint - 1);\n\n\n//== Navbar\n//\n//##\n\n// Basics of a navbar\n@navbar-height:                    50px;\n@navbar-margin-bottom:             @line-height-computed;\n@navbar-border-radius:             @border-radius-base;\n@navbar-padding-horizontal:        floor((@grid-gutter-width / 2));\n@navbar-padding-vertical:          ((@navbar-height - @line-height-computed) / 2);\n@navbar-collapse-max-height:       340px;\n\n@navbar-default-color:             #777;\n@navbar-default-bg:                #f8f8f8;\n@navbar-default-border:            darken(@navbar-default-bg, 6.5%);\n\n// Navbar links\n@navbar-default-link-color:                #777;\n@navbar-default-link-hover-color:          #333;\n@navbar-default-link-hover-bg:             transparent;\n@navbar-default-link-active-color:         #555;\n@navbar-default-link-active-bg:            darken(@navbar-default-bg, 6.5%);\n@navbar-default-link-disabled-color:       #ccc;\n@navbar-default-link-disabled-bg:          transparent;\n\n// Navbar brand label\n@navbar-default-brand-color:               @navbar-default-link-color;\n@navbar-default-brand-hover-color:         darken(@navbar-default-brand-color, 10%);\n@navbar-default-brand-hover-bg:            transparent;\n\n// Navbar toggle\n@navbar-default-toggle-hover-bg:           #ddd;\n@navbar-default-toggle-icon-bar-bg:        #888;\n@navbar-default-toggle-border-color:       #ddd;\n\n\n// Inverted navbar\n// Reset inverted navbar basics\n@navbar-inverse-color:                      @gray-light;\n@navbar-inverse-bg:                         #222;\n@navbar-inverse-border:                     darken(@navbar-inverse-bg, 10%);\n\n// Inverted navbar links\n@navbar-inverse-link-color:                 @gray-light;\n@navbar-inverse-link-hover-color:           #fff;\n@navbar-inverse-link-hover-bg:              transparent;\n@navbar-inverse-link-active-color:          @navbar-inverse-link-hover-color;\n@navbar-inverse-link-active-bg:             darken(@navbar-inverse-bg, 10%);\n@navbar-inverse-link-disabled-color:        #444;\n@navbar-inverse-link-disabled-bg:           transparent;\n\n// Inverted navbar brand label\n@navbar-inverse-brand-color:                @navbar-inverse-link-color;\n@navbar-inverse-brand-hover-color:          #fff;\n@navbar-inverse-brand-hover-bg:             transparent;\n\n// Inverted navbar toggle\n@navbar-inverse-toggle-hover-bg:            #333;\n@navbar-inverse-toggle-icon-bar-bg:         #fff;\n@navbar-inverse-toggle-border-color:        #333;\n\n\n//== Navs\n//\n//##\n\n//=== Shared nav styles\n@nav-link-padding:                          10px 15px;\n@nav-link-hover-bg:                         @gray-lighter;\n\n@nav-disabled-link-color:                   @gray-light;\n@nav-disabled-link-hover-color:             @gray-light;\n\n@nav-open-link-hover-color:                 #fff;\n\n//== Tabs\n@nav-tabs-border-color:                     #ddd;\n\n@nav-tabs-link-hover-border-color:          @gray-lighter;\n\n@nav-tabs-active-link-hover-bg:             @body-bg;\n@nav-tabs-active-link-hover-color:          @gray;\n@nav-tabs-active-link-hover-border-color:   #ddd;\n\n@nav-tabs-justified-link-border-color:            #ddd;\n@nav-tabs-justified-active-link-border-color:     @body-bg;\n\n//== Pills\n@nav-pills-border-radius:                   @border-radius-base;\n@nav-pills-active-link-hover-bg:            @component-active-bg;\n@nav-pills-active-link-hover-color:         @component-active-color;\n\n\n//== Pagination\n//\n//##\n\n@pagination-color:                     @link-color;\n@pagination-bg:                        #fff;\n@pagination-border:                    #ddd;\n\n@pagination-hover-color:               @link-hover-color;\n@pagination-hover-bg:                  @gray-lighter;\n@pagination-hover-border:              #ddd;\n\n@pagination-active-color:              #fff;\n@pagination-active-bg:                 @brand-primary;\n@pagination-active-border:             @brand-primary;\n\n@pagination-disabled-color:            @gray-light;\n@pagination-disabled-bg:               #fff;\n@pagination-disabled-border:           #ddd;\n\n\n//== Pager\n//\n//##\n\n@pager-bg:                             @pagination-bg;\n@pager-border:                         @pagination-border;\n@pager-border-radius:                  15px;\n\n@pager-hover-bg:                       @pagination-hover-bg;\n\n@pager-active-bg:                      @pagination-active-bg;\n@pager-active-color:                   @pagination-active-color;\n\n@pager-disabled-color:                 @pagination-disabled-color;\n\n\n//== Jumbotron\n//\n//##\n\n@jumbotron-padding:              30px;\n@jumbotron-color:                inherit;\n@jumbotron-bg:                   @gray-lighter;\n@jumbotron-heading-color:        inherit;\n@jumbotron-font-size:            ceil((@font-size-base * 1.5));\n\n\n//== Form states and alerts\n//\n//## Define colors for form feedback states and, by default, alerts.\n\n@state-success-text:             #3c763d;\n@state-success-bg:               #dff0d8;\n@state-success-border:           darken(spin(@state-success-bg, -10), 5%);\n\n@state-info-text:                #31708f;\n@state-info-bg:                  #d9edf7;\n@state-info-border:              darken(spin(@state-info-bg, -10), 7%);\n\n@state-warning-text:             #8a6d3b;\n@state-warning-bg:               #fcf8e3;\n@state-warning-border:           darken(spin(@state-warning-bg, -10), 5%);\n\n@state-danger-text:              #a94442;\n@state-danger-bg:                #f2dede;\n@state-danger-border:            darken(spin(@state-danger-bg, -10), 5%);\n\n\n//== Tooltips\n//\n//##\n\n//** Tooltip max width\n@tooltip-max-width:           200px;\n//** Tooltip text color\n@tooltip-color:               #fff;\n//** Tooltip background color\n@tooltip-bg:                  #000;\n@tooltip-opacity:             .9;\n\n//** Tooltip arrow width\n@tooltip-arrow-width:         5px;\n//** Tooltip arrow color\n@tooltip-arrow-color:         @tooltip-bg;\n\n\n//== Popovers\n//\n//##\n\n//** Popover body background color\n@popover-bg:                          #fff;\n//** Popover maximum width\n@popover-max-width:                   276px;\n//** Popover border color\n@popover-border-color:                rgba(0,0,0,.2);\n//** Popover fallback border color\n@popover-fallback-border-color:       #ccc;\n\n//** Popover title background color\n@popover-title-bg:                    darken(@popover-bg, 3%);\n\n//** Popover arrow width\n@popover-arrow-width:                 10px;\n//** Popover arrow color\n@popover-arrow-color:                 #fff;\n\n//** Popover outer arrow width\n@popover-arrow-outer-width:           (@popover-arrow-width + 1);\n//** Popover outer arrow color\n@popover-arrow-outer-color:           rgba(0,0,0,.25);\n//** Popover outer arrow fallback color\n@popover-arrow-outer-fallback-color:  #999;\n\n\n//== Labels\n//\n//##\n\n//** Default label background color\n@label-default-bg:            @gray-light;\n//** Primary label background color\n@label-primary-bg:            @brand-primary;\n//** Success label background color\n@label-success-bg:            @brand-success;\n//** Info label background color\n@label-info-bg:               @brand-info;\n//** Warning label background color\n@label-warning-bg:            @brand-warning;\n//** Danger label background color\n@label-danger-bg:             @brand-danger;\n\n//** Default label text color\n@label-color:                 #fff;\n//** Default text color of a linked label\n@label-link-hover-color:      #fff;\n\n\n//== Modals\n//\n//##\n\n//** Padding applied to the modal body\n@modal-inner-padding:         20px;\n\n//** Padding applied to the modal title\n@modal-title-padding:         15px;\n//** Modal title line-height\n@modal-title-line-height:     @line-height-base;\n\n//** Background color of modal content area\n@modal-content-bg:                             #fff;\n//** Modal content border color\n@modal-content-border-color:                   rgba(0,0,0,.2);\n//** Modal content border color **for IE8**\n@modal-content-fallback-border-color:          #999;\n\n//** Modal backdrop background color\n@modal-backdrop-bg:           #000;\n//** Modal backdrop opacity\n@modal-backdrop-opacity:      .5;\n//** Modal header border color\n@modal-header-border-color:   #e5e5e5;\n//** Modal footer border color\n@modal-footer-border-color:   @modal-header-border-color;\n\n@modal-lg:                    900px;\n@modal-md:                    600px;\n@modal-sm:                    300px;\n\n\n//== Alerts\n//\n//## Define alert colors, border radius, and padding.\n\n@alert-padding:               15px;\n@alert-border-radius:         @border-radius-base;\n@alert-link-font-weight:      bold;\n\n@alert-success-bg:            @state-success-bg;\n@alert-success-text:          @state-success-text;\n@alert-success-border:        @state-success-border;\n\n@alert-info-bg:               @state-info-bg;\n@alert-info-text:             @state-info-text;\n@alert-info-border:           @state-info-border;\n\n@alert-warning-bg:            @state-warning-bg;\n@alert-warning-text:          @state-warning-text;\n@alert-warning-border:        @state-warning-border;\n\n@alert-danger-bg:             @state-danger-bg;\n@alert-danger-text:           @state-danger-text;\n@alert-danger-border:         @state-danger-border;\n\n\n//== Progress bars\n//\n//##\n\n//** Background color of the whole progress component\n@progress-bg:                 #f5f5f5;\n//** Progress bar text color\n@progress-bar-color:          #fff;\n\n//** Default progress bar color\n@progress-bar-bg:             @brand-primary;\n//** Success progress bar color\n@progress-bar-success-bg:     @brand-success;\n//** Warning progress bar color\n@progress-bar-warning-bg:     @brand-warning;\n//** Danger progress bar color\n@progress-bar-danger-bg:      @brand-danger;\n//** Info progress bar color\n@progress-bar-info-bg:        @brand-info;\n\n\n//== List group\n//\n//##\n\n//** Background color on `.list-group-item`\n@list-group-bg:                 #fff;\n//** `.list-group-item` border color\n@list-group-border:             #ddd;\n//** List group border radius\n@list-group-border-radius:      @border-radius-base;\n\n//** Background color of single list elements on hover\n@list-group-hover-bg:           #f5f5f5;\n//** Text color of active list elements\n@list-group-active-color:       @component-active-color;\n//** Background color of active list elements\n@list-group-active-bg:          @component-active-bg;\n//** Border color of active list elements\n@list-group-active-border:      @list-group-active-bg;\n@list-group-active-text-color:  lighten(@list-group-active-bg, 40%);\n\n@list-group-link-color:         #555;\n@list-group-link-heading-color: #333;\n\n\n//== Panels\n//\n//##\n\n@panel-bg:                    #fff;\n@panel-body-padding:          15px;\n@panel-border-radius:         @border-radius-base;\n\n//** Border color for elements within panels\n@panel-inner-border:          #ddd;\n@panel-footer-bg:             #f5f5f5;\n\n@panel-default-text:          @gray-dark;\n@panel-default-border:        #ddd;\n@panel-default-heading-bg:    #f5f5f5;\n\n@panel-primary-text:          #fff;\n@panel-primary-border:        @brand-primary;\n@panel-primary-heading-bg:    @brand-primary;\n\n@panel-success-text:          @state-success-text;\n@panel-success-border:        @state-success-border;\n@panel-success-heading-bg:    @state-success-bg;\n\n@panel-info-text:             @state-info-text;\n@panel-info-border:           @state-info-border;\n@panel-info-heading-bg:       @state-info-bg;\n\n@panel-warning-text:          @state-warning-text;\n@panel-warning-border:        @state-warning-border;\n@panel-warning-heading-bg:    @state-warning-bg;\n\n@panel-danger-text:           @state-danger-text;\n@panel-danger-border:         @state-danger-border;\n@panel-danger-heading-bg:     @state-danger-bg;\n\n\n//== Thumbnails\n//\n//##\n\n//** Padding around the thumbnail image\n@thumbnail-padding:           4px;\n//** Thumbnail background color\n@thumbnail-bg:                @body-bg;\n//** Thumbnail border color\n@thumbnail-border:            #ddd;\n//** Thumbnail border radius\n@thumbnail-border-radius:     @border-radius-base;\n\n//** Custom text color for thumbnail captions\n@thumbnail-caption-color:     @text-color;\n//** Padding around the thumbnail caption\n@thumbnail-caption-padding:   9px;\n\n\n//== Wells\n//\n//##\n\n@well-bg:                     #f5f5f5;\n@well-border:                 darken(@well-bg, 7%);\n\n\n//== Badges\n//\n//##\n\n@badge-color:                 #fff;\n//** Linked badge text color on hover\n@badge-link-hover-color:      #fff;\n@badge-bg:                    @gray-light;\n\n//** Badge text color in active nav link\n@badge-active-color:          @link-color;\n//** Badge background color in active nav link\n@badge-active-bg:             #fff;\n\n@badge-font-weight:           bold;\n@badge-line-height:           1;\n@badge-border-radius:         10px;\n\n\n//== Breadcrumbs\n//\n//##\n\n@breadcrumb-padding-vertical:   8px;\n@breadcrumb-padding-horizontal: 15px;\n//** Breadcrumb background color\n@breadcrumb-bg:                 #f5f5f5;\n//** Breadcrumb text color\n@breadcrumb-color:              #ccc;\n//** Text color of current page in the breadcrumb\n@breadcrumb-active-color:       @gray-light;\n//** Textual separator for between breadcrumb elements\n@breadcrumb-separator:          \"/\";\n\n\n//== Carousel\n//\n//##\n\n@carousel-text-shadow:                        0 1px 2px rgba(0,0,0,.6);\n\n@carousel-control-color:                      #fff;\n@carousel-control-width:                      15%;\n@carousel-control-opacity:                    .5;\n@carousel-control-font-size:                  20px;\n\n@carousel-indicator-active-bg:                #fff;\n@carousel-indicator-border-color:             #fff;\n\n@carousel-caption-color:                      #fff;\n\n\n//== Close\n//\n//##\n\n@close-font-weight:           bold;\n@close-color:                 #000;\n@close-text-shadow:           0 1px 0 #fff;\n\n\n//== Code\n//\n//##\n\n@code-color:                  #c7254e;\n@code-bg:                     #f9f2f4;\n\n@kbd-color:                   #fff;\n@kbd-bg:                      #333;\n\n@pre-bg:                      #f5f5f5;\n@pre-color:                   @gray-dark;\n@pre-border-color:            #ccc;\n@pre-scrollable-max-height:   340px;\n\n\n//== Type\n//\n//##\n\n//** Text muted color\n@text-muted:                  @gray-light;\n//** Abbreviations and acronyms border color\n@abbr-border-color:           @gray-light;\n//** Headings small color\n@headings-small-color:        @gray-light;\n//** Blockquote small color\n@blockquote-small-color:      @gray-light;\n//** Blockquote border color\n@blockquote-border-color:     @gray-lighter;\n//** Page header border color\n@page-header-border-color:    @gray-lighter;\n\n\n//== Miscellaneous\n//\n//##\n\n//** Horizontal line color.\n@hr-border:                   @gray-lighter;\n\n//** Horizontal offset for forms and lists.\n@component-offset-horizontal: 180px;\n\n\n//== Container sizes\n//\n//## Define the maximum width of `.container` for different screen sizes.\n\n// Small screen / tablet\n@container-tablet:             ((720px + @grid-gutter-width));\n//** For `@screen-sm-min` and up.\n@container-sm:                 @container-tablet;\n\n// Medium screen / desktop\n@container-desktop:            ((940px + @grid-gutter-width));\n//** For `@screen-md-min` and up.\n@container-md:                 @container-desktop;\n\n// Large screen / wide desktop\n@container-large-desktop:      ((1140px + @grid-gutter-width));\n//** For `@screen-lg-min` and up.\n@container-lg:                 @container-large-desktop;\n","//\n// Typography\n// --------------------------------------------------\n\n\n// Headings\n// -------------------------\n\nh1, h2, h3, h4, h5, h6,\n.h1, .h2, .h3, .h4, .h5, .h6 {\n  font-family: @headings-font-family;\n  font-weight: @headings-font-weight;\n  line-height: @headings-line-height;\n  color: @headings-color;\n\n  small,\n  .small {\n    font-weight: normal;\n    line-height: 1;\n    color: @headings-small-color;\n  }\n}\n\nh1, .h1,\nh2, .h2,\nh3, .h3 {\n  margin-top: @line-height-computed;\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 65%;\n  }\n}\nh4, .h4,\nh5, .h5,\nh6, .h6 {\n  margin-top: (@line-height-computed / 2);\n  margin-bottom: (@line-height-computed / 2);\n\n  small,\n  .small {\n    font-size: 75%;\n  }\n}\n\nh1, .h1 { font-size: @font-size-h1; }\nh2, .h2 { font-size: @font-size-h2; }\nh3, .h3 { font-size: @font-size-h3; }\nh4, .h4 { font-size: @font-size-h4; }\nh5, .h5 { font-size: @font-size-h5; }\nh6, .h6 { font-size: @font-size-h6; }\n\n\n// Body text\n// -------------------------\n\np {\n  margin: 0 0 (@line-height-computed / 2);\n}\n\n.lead {\n  margin-bottom: @line-height-computed;\n  font-size: floor((@font-size-base * 1.15));\n  font-weight: 200;\n  line-height: 1.4;\n\n  @media (min-width: @screen-sm-min) {\n    font-size: (@font-size-base * 1.5);\n  }\n}\n\n\n// Emphasis & misc\n// -------------------------\n\n// Ex: 14px base font * 85% = about 12px\nsmall,\n.small  { font-size: 85%; }\n\n// Undo browser default styling\ncite    { font-style: normal; }\n\n// Alignment\n.text-left           { text-align: left; }\n.text-right          { text-align: right; }\n.text-center         { text-align: center; }\n.text-justify        { text-align: justify; }\n\n// Contextual colors\n.text-muted {\n  color: @text-muted;\n}\n.text-primary {\n  .text-emphasis-variant(@brand-primary);\n}\n.text-success {\n  .text-emphasis-variant(@state-success-text);\n}\n.text-info {\n  .text-emphasis-variant(@state-info-text);\n}\n.text-warning {\n  .text-emphasis-variant(@state-warning-text);\n}\n.text-danger {\n  .text-emphasis-variant(@state-danger-text);\n}\n\n// Contextual backgrounds\n// For now we'll leave these alongside the text classes until v4 when we can\n// safely shift things around (per SemVer rules).\n.bg-primary {\n  // Given the contrast here, this is the only class to have its color inverted\n  // automatically.\n  color: #fff;\n  .bg-variant(@brand-primary);\n}\n.bg-success {\n  .bg-variant(@state-success-bg);\n}\n.bg-info {\n  .bg-variant(@state-info-bg);\n}\n.bg-warning {\n  .bg-variant(@state-warning-bg);\n}\n.bg-danger {\n  .bg-variant(@state-danger-bg);\n}\n\n\n// Page header\n// -------------------------\n\n.page-header {\n  padding-bottom: ((@line-height-computed / 2) - 1);\n  margin: (@line-height-computed * 2) 0 @line-height-computed;\n  border-bottom: 1px solid @page-header-border-color;\n}\n\n\n// Lists\n// --------------------------------------------------\n\n// Unordered and Ordered lists\nul,\nol {\n  margin-top: 0;\n  margin-bottom: (@line-height-computed / 2);\n  ul,\n  ol {\n    margin-bottom: 0;\n  }\n}\n\n// List options\n\n// Unstyled keeps list items block level, just removes default browser padding and list-style\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n\n// Inline turns list items into inline-block\n.list-inline {\n  .list-unstyled();\n\n  > li {\n    display: inline-block;\n    padding-left: 5px;\n    padding-right: 5px;\n\n    &:first-child {\n      padding-left: 0;\n    }\n  }\n}\n\n// Description Lists\ndl {\n  margin-top: 0; // Remove browser default\n  margin-bottom: @line-height-computed;\n}\ndt,\ndd {\n  line-height: @line-height-base;\n}\ndt {\n  font-weight: bold;\n}\ndd {\n  margin-left: 0; // Undo browser default\n}\n\n// Horizontal description lists\n//\n// Defaults to being stacked without any of the below styles applied, until the\n// grid breakpoint is reached (default of ~768px).\n\n@media (min-width: @grid-float-breakpoint) {\n  .dl-horizontal {\n    dt {\n      float: left;\n      width: (@component-offset-horizontal - 20);\n      clear: left;\n      text-align: right;\n      .text-overflow();\n    }\n    dd {\n      margin-left: @component-offset-horizontal;\n      &:extend(.clearfix all); // Clear the floated `dt` if an empty `dd` is present\n    }\n  }\n}\n\n// MISC\n// ----\n\n// Abbreviations and acronyms\nabbr[title],\n// Add data-* attribute to help out our tooltip plugin, per https://github.com/twbs/bootstrap/issues/5257\nabbr[data-original-title] {\n  cursor: help;\n  border-bottom: 1px dotted @abbr-border-color;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\n\n// Blockquotes\nblockquote {\n  padding: (@line-height-computed / 2) @line-height-computed;\n  margin: 0 0 @line-height-computed;\n  font-size: (@font-size-base * 1.25);\n  border-left: 5px solid @blockquote-border-color;\n\n  p,\n  ul,\n  ol {\n    &:last-child {\n      margin-bottom: 0;\n    }\n  }\n\n  // Note: Deprecated small and .small as of v3.1.0\n  // Context: https://github.com/twbs/bootstrap/issues/11660\n  footer,\n  small,\n  .small {\n    display: block;\n    font-size: 80%; // back to default font-size\n    line-height: @line-height-base;\n    color: @blockquote-small-color;\n\n    &:before {\n      content: '\\2014 \\00A0'; // em dash, nbsp\n    }\n  }\n}\n\n// Opposite alignment of blockquote\n//\n// Heads up: `blockquote.pull-right` has been deprecated as of v3.1.0.\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  border-right: 5px solid @blockquote-border-color;\n  border-left: 0;\n  text-align: right;\n\n  // Account for citation\n  footer,\n  small,\n  .small {\n    &:before { content: ''; }\n    &:after {\n      content: '\\00A0 \\2014'; // nbsp, em dash\n    }\n  }\n}\n\n// Quotes\nblockquote:before,\nblockquote:after {\n  content: \"\";\n}\n\n// Addresses\naddress {\n  margin-bottom: @line-height-computed;\n  font-style: normal;\n  line-height: @line-height-base;\n}\n","//\n// Code (inline and block)\n// --------------------------------------------------\n\n\n// Inline and block code styles\ncode,\nkbd,\npre,\nsamp {\n  font-family: @font-family-monospace;\n}\n\n// Inline code\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @code-color;\n  background-color: @code-bg;\n  white-space: nowrap;\n  border-radius: @border-radius-base;\n}\n\n// User input typically entered via keyboard\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: @kbd-color;\n  background-color: @kbd-bg;\n  border-radius: @border-radius-small;\n  box-shadow: inset 0 -1px 0 rgba(0,0,0,.25);\n}\n\n// Blocks of code\npre {\n  display: block;\n  padding: ((@line-height-computed - 1) / 2);\n  margin: 0 0 (@line-height-computed / 2);\n  font-size: (@font-size-base - 1); // 14px to 13px\n  line-height: @line-height-base;\n  word-break: break-all;\n  word-wrap: break-word;\n  color: @pre-color;\n  background-color: @pre-bg;\n  border: 1px solid @pre-border-color;\n  border-radius: @border-radius-base;\n\n  // Account for some code outputs that place code tags in pre tags\n  code {\n    padding: 0;\n    font-size: inherit;\n    color: inherit;\n    white-space: pre-wrap;\n    background-color: transparent;\n    border-radius: 0;\n  }\n}\n\n// Enable scrollable blocks of code\n.pre-scrollable {\n  max-height: @pre-scrollable-max-height;\n  overflow-y: scroll;\n}\n","//\n// Grid system\n// --------------------------------------------------\n\n\n// Container widths\n//\n// Set the container width, and override it for fixed navbars in media queries.\n\n.container {\n  .container-fixed();\n\n  @media (min-width: @screen-sm-min) {\n    width: @container-sm;\n  }\n  @media (min-width: @screen-md-min) {\n    width: @container-md;\n  }\n  @media (min-width: @screen-lg-min) {\n    width: @container-lg;\n  }\n}\n\n\n// Fluid container\n//\n// Utilizes the mixin meant for fixed width containers, but without any defined\n// width for fluid, full width layouts.\n\n.container-fluid {\n  .container-fixed();\n}\n\n\n// Row\n//\n// Rows contain and clear the floats of your columns.\n\n.row {\n  .make-row();\n}\n\n\n// Columns\n//\n// Common styles for small and large grid columns\n\n.make-grid-columns();\n\n\n// Extra small grid\n//\n// Columns, offsets, pushes, and pulls for extra small devices like\n// smartphones.\n\n.make-grid-columns-float(xs);\n.make-grid(@grid-columns, xs, width);\n.make-grid(@grid-columns, xs, pull);\n.make-grid(@grid-columns, xs, push);\n.make-grid(@grid-columns, xs, offset);\n\n\n// Small grid\n//\n// Columns, offsets, pushes, and pulls for the small device range, from phones\n// to tablets.\n\n@media (min-width: @screen-sm-min) {\n  .make-grid-columns-float(sm);\n  .make-grid(@grid-columns, sm, width);\n  .make-grid(@grid-columns, sm, pull);\n  .make-grid(@grid-columns, sm, push);\n  .make-grid(@grid-columns, sm, offset);\n}\n\n\n// Medium grid\n//\n// Columns, offsets, pushes, and pulls for the desktop device range.\n\n@media (min-width: @screen-md-min) {\n  .make-grid-columns-float(md);\n  .make-grid(@grid-columns, md, width);\n  .make-grid(@grid-columns, md, pull);\n  .make-grid(@grid-columns, md, push);\n  .make-grid(@grid-columns, md, offset);\n}\n\n\n// Large grid\n//\n// Columns, offsets, pushes, and pulls for the large desktop device range.\n\n@media (min-width: @screen-lg-min) {\n  .make-grid-columns-float(lg);\n  .make-grid(@grid-columns, lg, width);\n  .make-grid(@grid-columns, lg, pull);\n  .make-grid(@grid-columns, lg, push);\n  .make-grid(@grid-columns, lg, offset);\n}\n","//\n// Tables\n// --------------------------------------------------\n\n\ntable {\n  max-width: 100%;\n  background-color: @table-bg;\n}\nth {\n  text-align: left;\n}\n\n\n// Baseline styles\n\n.table {\n  width: 100%;\n  margin-bottom: @line-height-computed;\n  // Cells\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-cell-padding;\n        line-height: @line-height-base;\n        vertical-align: top;\n        border-top: 1px solid @table-border-color;\n      }\n    }\n  }\n  // Bottom align for column headings\n  > thead > tr > th {\n    vertical-align: bottom;\n    border-bottom: 2px solid @table-border-color;\n  }\n  // Remove top border from thead by default\n  > caption + thead,\n  > colgroup + thead,\n  > thead:first-child {\n    > tr:first-child {\n      > th,\n      > td {\n        border-top: 0;\n      }\n    }\n  }\n  // Account for multiple tbody instances\n  > tbody + tbody {\n    border-top: 2px solid @table-border-color;\n  }\n\n  // Nesting\n  .table {\n    background-color: @body-bg;\n  }\n}\n\n\n// Condensed table w/ half padding\n\n.table-condensed {\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        padding: @table-condensed-cell-padding;\n      }\n    }\n  }\n}\n\n\n// Bordered version\n//\n// Add borders all around the table and between all the columns.\n\n.table-bordered {\n  border: 1px solid @table-border-color;\n  > thead,\n  > tbody,\n  > tfoot {\n    > tr {\n      > th,\n      > td {\n        border: 1px solid @table-border-color;\n      }\n    }\n  }\n  > thead > tr {\n    > th,\n    > td {\n      border-bottom-width: 2px;\n    }\n  }\n}\n\n\n// Zebra-striping\n//\n// Default zebra-stripe styles (alternating gray and transparent backgrounds)\n\n.table-striped {\n  > tbody > tr:nth-child(odd) {\n    > td,\n    > th {\n      background-color: @table-bg-accent;\n    }\n  }\n}\n\n\n// Hover effect\n//\n// Placed here since it has to come after the potential zebra striping\n\n.table-hover {\n  > tbody > tr:hover {\n    > td,\n    > th {\n      background-color: @table-bg-hover;\n    }\n  }\n}\n\n\n// Table cell sizing\n//\n// Reset default table behavior\n\ntable col[class*=\"col-\"] {\n  position: static; // Prevent border hiding in Firefox and IE9/10 (see https://github.com/twbs/bootstrap/issues/11623)\n  float: none;\n  display: table-column;\n}\ntable {\n  td,\n  th {\n    &[class*=\"col-\"] {\n      position: static; // Prevent border hiding in Firefox and IE9/10 (see https://github.com/twbs/bootstrap/issues/11623)\n      float: none;\n      display: table-cell;\n    }\n  }\n}\n\n\n// Table backgrounds\n//\n// Exact selectors below required to override `.table-striped` and prevent\n// inheritance to nested tables.\n\n// Generate the contextual variants\n.table-row-variant(active; @table-bg-active);\n.table-row-variant(success; @state-success-bg);\n.table-row-variant(info; @state-info-bg);\n.table-row-variant(warning; @state-warning-bg);\n.table-row-variant(danger; @state-danger-bg);\n\n\n// Responsive tables\n//\n// Wrap your tables in `.table-responsive` and we'll make them mobile friendly\n// by enabling horizontal scrolling. Only applies <768px. Everything above that\n// will display normally.\n\n@media (max-width: @screen-xs-max) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: (@line-height-computed * 0.75);\n    overflow-y: hidden;\n    overflow-x: scroll;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid @table-border-color;\n    -webkit-overflow-scrolling: touch;\n\n    // Tighten up spacing\n    > .table {\n      margin-bottom: 0;\n\n      // Ensure the content doesn't wrap\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th,\n          > td {\n            white-space: nowrap;\n          }\n        }\n      }\n    }\n\n    // Special overrides for the bordered tables\n    > .table-bordered {\n      border: 0;\n\n      // Nuke the appropriate borders so that the parent can handle them\n      > thead,\n      > tbody,\n      > tfoot {\n        > tr {\n          > th:first-child,\n          > td:first-child {\n            border-left: 0;\n          }\n          > th:last-child,\n          > td:last-child {\n            border-right: 0;\n          }\n        }\n      }\n\n      // Only nuke the last row's bottom-border in `tbody` and `tfoot` since\n      // chances are there will be only one `tr` in a `thead` and that would\n      // remove the border altogether.\n      > tbody,\n      > tfoot {\n        > tr:last-child {\n          > th,\n          > td {\n            border-bottom: 0;\n          }\n        }\n      }\n\n    }\n  }\n}\n","//\n// Forms\n// --------------------------------------------------\n\n\n// Normalize non-controls\n//\n// Restyle and baseline non-control form elements.\n\nfieldset {\n  padding: 0;\n  margin: 0;\n  border: 0;\n  // Chrome and Firefox set a `min-width: -webkit-min-content;` on fieldsets,\n  // so we reset that to ensure it behaves more like a standard block element.\n  // See https://github.com/twbs/bootstrap/issues/12359.\n  min-width: 0;\n}\n\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: @line-height-computed;\n  font-size: (@font-size-base * 1.5);\n  line-height: inherit;\n  color: @legend-color;\n  border: 0;\n  border-bottom: 1px solid @legend-border-color;\n}\n\nlabel {\n  display: inline-block;\n  margin-bottom: 5px;\n  font-weight: bold;\n}\n\n\n// Normalize form controls\n//\n// While most of our form styles require extra classes, some basic normalization\n// is required to ensure optimum display with or without those classes to better\n// address browser inconsistencies.\n\n// Override content-box in Normalize (* isn't specific enough)\ninput[type=\"search\"] {\n  .box-sizing(border-box);\n}\n\n// Position radios and checkboxes better\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9; /* IE8-9 */\n  line-height: normal;\n}\n\n// Set the height of file controls to match text inputs\ninput[type=\"file\"] {\n  display: block;\n}\n\n// Make range inputs behave like textual form controls\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\n\n// Make multiple select elements height not fixed\nselect[multiple],\nselect[size] {\n  height: auto;\n}\n\n// Focus for file, radio, and checkbox\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  .tab-focus();\n}\n\n// Adjust output element\noutput {\n  display: block;\n  padding-top: (@padding-base-vertical + 1);\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n}\n\n\n// Common form controls\n//\n// Shared size and type resets for form controls. Apply `.form-control` to any\n// of the following form controls:\n//\n// select\n// textarea\n// input[type=\"text\"]\n// input[type=\"password\"]\n// input[type=\"datetime\"]\n// input[type=\"datetime-local\"]\n// input[type=\"date\"]\n// input[type=\"month\"]\n// input[type=\"time\"]\n// input[type=\"week\"]\n// input[type=\"number\"]\n// input[type=\"email\"]\n// input[type=\"url\"]\n// input[type=\"search\"]\n// input[type=\"tel\"]\n// input[type=\"color\"]\n\n.form-control {\n  display: block;\n  width: 100%;\n  height: @input-height-base; // Make inputs at least the height of their button counterpart (base line-height + padding + border)\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  line-height: @line-height-base;\n  color: @input-color;\n  background-color: @input-bg;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid @input-border;\n  border-radius: @input-border-radius;\n  .box-shadow(inset 0 1px 1px rgba(0,0,0,.075));\n  .transition(~\"border-color ease-in-out .15s, box-shadow ease-in-out .15s\");\n\n  // Customize the `:focus` state to imitate native WebKit styles.\n  .form-control-focus();\n\n  // Placeholder\n  .placeholder();\n\n  // Disabled and read-only inputs\n  // Note: HTML5 says that controls under a fieldset > legend:first-child won't\n  // be disabled if the fieldset is disabled. Due to implementation difficulty,\n  // we don't honor that edge case; we style them as disabled anyway.\n  &[disabled],\n  &[readonly],\n  fieldset[disabled] & {\n    cursor: not-allowed;\n    background-color: @input-bg-disabled;\n    opacity: 1; // iOS fix for unreadable disabled content\n  }\n\n  // Reset height for `textarea`s\n  textarea& {\n    height: auto;\n  }\n}\n\n// Special styles for iOS date input\n//\n// In Mobile Safari, date inputs require a pixel line-height that matches the\n// given height of the input.\ninput[type=\"date\"] {\n  line-height: @input-height-base;\n}\n\n\n// Form groups\n//\n// Designed to help with the organization and spacing of vertical forms. For\n// horizontal forms, use the predefined grid classes.\n\n.form-group {\n  margin-bottom: 15px;\n}\n\n\n// Checkboxes and radios\n//\n// Indent the labels to position radios/checkboxes as hanging controls.\n\n.radio,\n.checkbox {\n  display: block;\n  min-height: @line-height-computed; // clear the floating input if there is no label text\n  margin-top: 10px;\n  margin-bottom: 10px;\n  padding-left: 20px;\n  label {\n    display: inline;\n    font-weight: normal;\n    cursor: pointer;\n  }\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  float: left;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px; // Move up sibling radios or checkboxes for tighter spacing\n}\n\n// Radios and checkboxes on same line\n.radio-inline,\n.checkbox-inline {\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  vertical-align: middle;\n  font-weight: normal;\n  cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px; // space out consecutive inline controls\n}\n\n// Apply same disabled cursor tweak as for inputs\n//\n// Note: Neither radios nor checkboxes can be readonly.\ninput[type=\"radio\"],\ninput[type=\"checkbox\"],\n.radio,\n.radio-inline,\n.checkbox,\n.checkbox-inline {\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: not-allowed;\n  }\n}\n\n\n// Form control sizing\n//\n// Build on `.form-control` with modifier classes to decrease or increase the\n// height and font-size of form controls.\n\n.input-sm {\n  .input-size(@input-height-small; @padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n\n.input-lg {\n  .input-size(@input-height-large; @padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n\n\n// Form control feedback states\n//\n// Apply contextual and semantic states to individual form controls.\n\n.has-feedback {\n  // Enable absolute positioning\n  position: relative;\n\n  // Ensure icons don't overlap text\n  .form-control {\n    padding-right: (@input-height-base * 1.25);\n  }\n\n  // Feedback icon (requires .glyphicon classes)\n  .form-control-feedback {\n    position: absolute;\n    top: (@line-height-computed + 5); // Height of the `label` and its margin\n    right: 0;\n    display: block;\n    width: @input-height-base;\n    height: @input-height-base;\n    line-height: @input-height-base;\n    text-align: center;\n  }\n}\n\n// Feedback states\n.has-success {\n  .form-control-validation(@state-success-text; @state-success-text; @state-success-bg);\n}\n.has-warning {\n  .form-control-validation(@state-warning-text; @state-warning-text; @state-warning-bg);\n}\n.has-error {\n  .form-control-validation(@state-danger-text; @state-danger-text; @state-danger-bg);\n}\n\n\n// Static form control text\n//\n// Apply class to a `p` element to make any string of text align with labels in\n// a horizontal form layout.\n\n.form-control-static {\n  margin-bottom: 0; // Remove default margin from `p`\n}\n\n\n// Help text\n//\n// Apply to any element you wish to create light text for placement immediately\n// below a form control. Use for general help, formatting, or instructional text.\n\n.help-block {\n  display: block; // account for any element using help-block\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: lighten(@text-color, 25%); // lighten the text some for contrast\n}\n\n\n\n// Inline forms\n//\n// Make forms appear inline(-block) by adding the `.form-inline` class. Inline\n// forms begin stacked on extra small (mobile) devices and then go inline when\n// viewports reach <768px.\n//\n// Requires wrapping inputs and labels with `.form-group` for proper display of\n// default HTML form controls and our custom form controls (e.g., input groups).\n//\n// Heads up! This is mixin-ed into `.navbar-form` in navbars.less.\n\n.form-inline {\n\n  // Kick in the inline\n  @media (min-width: @screen-sm-min) {\n    // Inline-block all the things for \"inline\"\n    .form-group {\n      display: inline-block;\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // In navbar-form, allow folks to *not* use `.form-group`\n    .form-control {\n      display: inline-block;\n      width: auto; // Prevent labels from stacking above inputs in `.form-group`\n      vertical-align: middle;\n    }\n\n    .control-label {\n      margin-bottom: 0;\n      vertical-align: middle;\n    }\n\n    // Remove default margin on radios/checkboxes that were used for stacking, and\n    // then undo the floating of radios and checkboxes to match (which also avoids\n    // a bug in WebKit: https://github.com/twbs/bootstrap/issues/1969).\n    .radio,\n    .checkbox {\n      display: inline-block;\n      margin-top: 0;\n      margin-bottom: 0;\n      padding-left: 0;\n      vertical-align: middle;\n    }\n    .radio input[type=\"radio\"],\n    .checkbox input[type=\"checkbox\"] {\n      float: none;\n      margin-left: 0;\n    }\n\n    // Validation states\n    //\n    // Reposition the icon because it's now within a grid column and columns have\n    // `position: relative;` on them. Also accounts for the grid gutter padding.\n    .has-feedback .form-control-feedback {\n      top: 0;\n    }\n  }\n}\n\n\n// Horizontal forms\n//\n// Horizontal forms are built on grid classes and allow you to create forms with\n// labels on the left and inputs on the right.\n\n.form-horizontal {\n\n  // Consistent vertical alignment of labels, radios, and checkboxes\n  .control-label,\n  .radio,\n  .checkbox,\n  .radio-inline,\n  .checkbox-inline {\n    margin-top: 0;\n    margin-bottom: 0;\n    padding-top: (@padding-base-vertical + 1); // Default padding plus a border\n  }\n  // Account for padding we're adding to ensure the alignment and of help text\n  // and other content below items\n  .radio,\n  .checkbox {\n    min-height: (@line-height-computed + (@padding-base-vertical + 1));\n  }\n\n  // Make form groups behave like rows\n  .form-group {\n    .make-row();\n  }\n\n  .form-control-static {\n    padding-top: (@padding-base-vertical + 1);\n  }\n\n  // Only right align form labels here when the columns stop stacking\n  @media (min-width: @screen-sm-min) {\n    .control-label {\n      text-align: right;\n    }\n  }\n\n  // Validation states\n  //\n  // Reposition the icon because it's now within a grid column and columns have\n  // `position: relative;` on them. Also accounts for the grid gutter padding.\n  .has-feedback .form-control-feedback {\n    top: 0;\n    right: (@grid-gutter-width / 2);\n  }\n}\n","//\n// Buttons\n// --------------------------------------------------\n\n\n// Base styles\n// --------------------------------------------------\n\n.btn {\n  display: inline-block;\n  margin-bottom: 0; // For input.btn\n  font-weight: @btn-font-weight;\n  text-align: center;\n  vertical-align: middle;\n  cursor: pointer;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  white-space: nowrap;\n  .button-size(@padding-base-vertical; @padding-base-horizontal; @font-size-base; @line-height-base; @border-radius-base);\n  .user-select(none);\n\n  &:focus {\n    .tab-focus();\n  }\n\n  &:hover,\n  &:focus {\n    color: @btn-default-color;\n    text-decoration: none;\n  }\n\n  &:active,\n  &.active {\n    outline: 0;\n    background-image: none;\n    .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n  }\n\n  &.disabled,\n  &[disabled],\n  fieldset[disabled] & {\n    cursor: not-allowed;\n    pointer-events: none; // Future-proof disabling of clicks\n    .opacity(.65);\n    .box-shadow(none);\n  }\n}\n\n\n// Alternate buttons\n// --------------------------------------------------\n\n.btn-default {\n  .button-variant(@btn-default-color; @btn-default-bg; @btn-default-border);\n}\n.btn-primary {\n  .button-variant(@btn-primary-color; @btn-primary-bg; @btn-primary-border);\n}\n// Success appears as green\n.btn-success {\n  .button-variant(@btn-success-color; @btn-success-bg; @btn-success-border);\n}\n// Info appears as blue-green\n.btn-info {\n  .button-variant(@btn-info-color; @btn-info-bg; @btn-info-border);\n}\n// Warning appears as orange\n.btn-warning {\n  .button-variant(@btn-warning-color; @btn-warning-bg; @btn-warning-border);\n}\n// Danger and error appear as red\n.btn-danger {\n  .button-variant(@btn-danger-color; @btn-danger-bg; @btn-danger-border);\n}\n\n\n// Link buttons\n// -------------------------\n\n// Make a button look and behave like a link\n.btn-link {\n  color: @link-color;\n  font-weight: normal;\n  cursor: pointer;\n  border-radius: 0;\n\n  &,\n  &:active,\n  &[disabled],\n  fieldset[disabled] & {\n    background-color: transparent;\n    .box-shadow(none);\n  }\n  &,\n  &:hover,\n  &:focus,\n  &:active {\n    border-color: transparent;\n  }\n  &:hover,\n  &:focus {\n    color: @link-hover-color;\n    text-decoration: underline;\n    background-color: transparent;\n  }\n  &[disabled],\n  fieldset[disabled] & {\n    &:hover,\n    &:focus {\n      color: @btn-link-disabled-color;\n      text-decoration: none;\n    }\n  }\n}\n\n\n// Button Sizes\n// --------------------------------------------------\n\n.btn-lg {\n  // line-height: ensure even-numbered height of button next to large input\n  .button-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @line-height-large; @border-radius-large);\n}\n.btn-sm {\n  // line-height: ensure proper height of button next to small input\n  .button-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n.btn-xs {\n  .button-size(@padding-xs-vertical; @padding-xs-horizontal; @font-size-small; @line-height-small; @border-radius-small);\n}\n\n\n// Block button\n// --------------------------------------------------\n\n.btn-block {\n  display: block;\n  width: 100%;\n  padding-left: 0;\n  padding-right: 0;\n}\n\n// Vertically space out multiple block buttons\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\n\n// Specificity overrides\ninput[type=\"submit\"],\ninput[type=\"reset\"],\ninput[type=\"button\"] {\n  &.btn-block {\n    width: 100%;\n  }\n}\n","//\n// Component animations\n// --------------------------------------------------\n\n// Heads up!\n//\n// We don't use the `.opacity()` mixin here since it causes a bug with text\n// fields in IE7-8. Source: https://github.com/twitter/bootstrap/pull/3552.\n\n.fade {\n  opacity: 0;\n  .transition(opacity .15s linear);\n  &.in {\n    opacity: 1;\n  }\n}\n\n.collapse {\n  display: none;\n  &.in {\n    display: block;\n  }\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  .transition(height .35s ease);\n}\n","//\n// Glyphicons for Bootstrap\n//\n// Since icons are fonts, they can be placed anywhere text is placed and are\n// thus automatically sized to match the surrounding child. To use, create an\n// inline element with the appropriate classes, like so:\n//\n// <a href=\"#\"><span class=\"glyphicon glyphicon-star\"></span> Star</a>\n\n// Import the fonts\n@font-face {\n  font-family: 'Glyphicons Halflings';\n  src: ~\"url('@{icon-font-path}@{icon-font-name}.eot')\";\n  src: ~\"url('@{icon-font-path}@{icon-font-name}.eot?#iefix') format('embedded-opentype')\",\n       ~\"url('@{icon-font-path}@{icon-font-name}.woff') format('woff')\",\n       ~\"url('@{icon-font-path}@{icon-font-name}.ttf') format('truetype')\",\n       ~\"url('@{icon-font-path}@{icon-font-name}.svg#@{icon-font-svg-id}') format('svg')\";\n}\n\n// Catchall baseclass\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: 'Glyphicons Halflings';\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n\n// Individual icons\n.glyphicon-asterisk               { &:before { content: \"\\2a\"; } }\n.glyphicon-plus                   { &:before { content: \"\\2b\"; } }\n.glyphicon-euro                   { &:before { content: \"\\20ac\"; } }\n.glyphicon-minus                  { &:before { content: \"\\2212\"; } }\n.glyphicon-cloud                  { &:before { content: \"\\2601\"; } }\n.glyphicon-envelope               { &:before { content: \"\\2709\"; } }\n.glyphicon-pencil                 { &:before { content: \"\\270f\"; } }\n.glyphicon-glass                  { &:before { content: \"\\e001\"; } }\n.glyphicon-music                  { &:before { content: \"\\e002\"; } }\n.glyphicon-search                 { &:before { content: \"\\e003\"; } }\n.glyphicon-heart                  { &:before { content: \"\\e005\"; } }\n.glyphicon-star                   { &:before { content: \"\\e006\"; } }\n.glyphicon-star-empty             { &:before { content: \"\\e007\"; } }\n.glyphicon-user                   { &:before { content: \"\\e008\"; } }\n.glyphicon-film                   { &:before { content: \"\\e009\"; } }\n.glyphicon-th-large               { &:before { content: \"\\e010\"; } }\n.glyphicon-th                     { &:before { content: \"\\e011\"; } }\n.glyphicon-th-list                { &:before { content: \"\\e012\"; } }\n.glyphicon-ok                     { &:before { content: \"\\e013\"; } }\n.glyphicon-remove                 { &:before { content: \"\\e014\"; } }\n.glyphicon-zoom-in                { &:before { content: \"\\e015\"; } }\n.glyphicon-zoom-out               { &:before { content: \"\\e016\"; } }\n.glyphicon-off                    { &:before { content: \"\\e017\"; } }\n.glyphicon-signal                 { &:before { content: \"\\e018\"; } }\n.glyphicon-cog                    { &:before { content: \"\\e019\"; } }\n.glyphicon-trash                  { &:before { content: \"\\e020\"; } }\n.glyphicon-home                   { &:before { content: \"\\e021\"; } }\n.glyphicon-file                   { &:before { content: \"\\e022\"; } }\n.glyphicon-time                   { &:before { content: \"\\e023\"; } }\n.glyphicon-road                   { &:before { content: \"\\e024\"; } }\n.glyphicon-download-alt           { &:before { content: \"\\e025\"; } }\n.glyphicon-download               { &:before { content: \"\\e026\"; } }\n.glyphicon-upload                 { &:before { content: \"\\e027\"; } }\n.glyphicon-inbox                  { &:before { content: \"\\e028\"; } }\n.glyphicon-play-circle            { &:before { content: \"\\e029\"; } }\n.glyphicon-repeat                 { &:before { content: \"\\e030\"; } }\n.glyphicon-refresh                { &:before { content: \"\\e031\"; } }\n.glyphicon-list-alt               { &:before { content: \"\\e032\"; } }\n.glyphicon-lock                   { &:before { content: \"\\e033\"; } }\n.glyphicon-flag                   { &:before { content: \"\\e034\"; } }\n.glyphicon-headphones             { &:before { content: \"\\e035\"; } }\n.glyphicon-volume-off             { &:before { content: \"\\e036\"; } }\n.glyphicon-volume-down            { &:before { content: \"\\e037\"; } }\n.glyphicon-volume-up              { &:before { content: \"\\e038\"; } }\n.glyphicon-qrcode                 { &:before { content: \"\\e039\"; } }\n.glyphicon-barcode                { &:before { content: \"\\e040\"; } }\n.glyphicon-tag                    { &:before { content: \"\\e041\"; } }\n.glyphicon-tags                   { &:before { content: \"\\e042\"; } }\n.glyphicon-book                   { &:before { content: \"\\e043\"; } }\n.glyphicon-bookmark               { &:before { content: \"\\e044\"; } }\n.glyphicon-print                  { &:before { content: \"\\e045\"; } }\n.glyphicon-camera                 { &:before { content: \"\\e046\"; } }\n.glyphicon-font                   { &:before { content: \"\\e047\"; } }\n.glyphicon-bold                   { &:before { content: \"\\e048\"; } }\n.glyphicon-italic                 { &:before { content: \"\\e049\"; } }\n.glyphicon-text-height            { &:before { content: \"\\e050\"; } }\n.glyphicon-text-width             { &:before { content: \"\\e051\"; } }\n.glyphicon-align-left             { &:before { content: \"\\e052\"; } }\n.glyphicon-align-center           { &:before { content: \"\\e053\"; } }\n.glyphicon-align-right            { &:before { content: \"\\e054\"; } }\n.glyphicon-align-justify          { &:before { content: \"\\e055\"; } }\n.glyphicon-list                   { &:before { content: \"\\e056\"; } }\n.glyphicon-indent-left            { &:before { content: \"\\e057\"; } }\n.glyphicon-indent-right           { &:before { content: \"\\e058\"; } }\n.glyphicon-facetime-video         { &:before { content: \"\\e059\"; } }\n.glyphicon-picture                { &:before { content: \"\\e060\"; } }\n.glyphicon-map-marker             { &:before { content: \"\\e062\"; } }\n.glyphicon-adjust                 { &:before { content: \"\\e063\"; } }\n.glyphicon-tint                   { &:before { content: \"\\e064\"; } }\n.glyphicon-edit                   { &:before { content: \"\\e065\"; } }\n.glyphicon-share                  { &:before { content: \"\\e066\"; } }\n.glyphicon-check                  { &:before { content: \"\\e067\"; } }\n.glyphicon-move                   { &:before { content: \"\\e068\"; } }\n.glyphicon-step-backward          { &:before { content: \"\\e069\"; } }\n.glyphicon-fast-backward          { &:before { content: \"\\e070\"; } }\n.glyphicon-backward               { &:before { content: \"\\e071\"; } }\n.glyphicon-play                   { &:before { content: \"\\e072\"; } }\n.glyphicon-pause                  { &:before { content: \"\\e073\"; } }\n.glyphicon-stop                   { &:before { content: \"\\e074\"; } }\n.glyphicon-forward                { &:before { content: \"\\e075\"; } }\n.glyphicon-fast-forward           { &:before { content: \"\\e076\"; } }\n.glyphicon-step-forward           { &:before { content: \"\\e077\"; } }\n.glyphicon-eject                  { &:before { content: \"\\e078\"; } }\n.glyphicon-chevron-left           { &:before { content: \"\\e079\"; } }\n.glyphicon-chevron-right          { &:before { content: \"\\e080\"; } }\n.glyphicon-plus-sign              { &:before { content: \"\\e081\"; } }\n.glyphicon-minus-sign             { &:before { content: \"\\e082\"; } }\n.glyphicon-remove-sign            { &:before { content: \"\\e083\"; } }\n.glyphicon-ok-sign                { &:before { content: \"\\e084\"; } }\n.glyphicon-question-sign          { &:before { content: \"\\e085\"; } }\n.glyphicon-info-sign              { &:before { content: \"\\e086\"; } }\n.glyphicon-screenshot             { &:before { content: \"\\e087\"; } }\n.glyphicon-remove-circle          { &:before { content: \"\\e088\"; } }\n.glyphicon-ok-circle              { &:before { content: \"\\e089\"; } }\n.glyphicon-ban-circle             { &:before { content: \"\\e090\"; } }\n.glyphicon-arrow-left             { &:before { content: \"\\e091\"; } }\n.glyphicon-arrow-right            { &:before { content: \"\\e092\"; } }\n.glyphicon-arrow-up               { &:before { content: \"\\e093\"; } }\n.glyphicon-arrow-down             { &:before { content: \"\\e094\"; } }\n.glyphicon-share-alt              { &:before { content: \"\\e095\"; } }\n.glyphicon-resize-full            { &:before { content: \"\\e096\"; } }\n.glyphicon-resize-small           { &:before { content: \"\\e097\"; } }\n.glyphicon-exclamation-sign       { &:before { content: \"\\e101\"; } }\n.glyphicon-gift                   { &:before { content: \"\\e102\"; } }\n.glyphicon-leaf                   { &:before { content: \"\\e103\"; } }\n.glyphicon-fire                   { &:before { content: \"\\e104\"; } }\n.glyphicon-eye-open               { &:before { content: \"\\e105\"; } }\n.glyphicon-eye-close              { &:before { content: \"\\e106\"; } }\n.glyphicon-warning-sign           { &:before { content: \"\\e107\"; } }\n.glyphicon-plane                  { &:before { content: \"\\e108\"; } }\n.glyphicon-calendar               { &:before { content: \"\\e109\"; } }\n.glyphicon-random                 { &:before { content: \"\\e110\"; } }\n.glyphicon-comment                { &:before { content: \"\\e111\"; } }\n.glyphicon-magnet                 { &:before { content: \"\\e112\"; } }\n.glyphicon-chevron-up             { &:before { content: \"\\e113\"; } }\n.glyphicon-chevron-down           { &:before { content: \"\\e114\"; } }\n.glyphicon-retweet                { &:before { content: \"\\e115\"; } }\n.glyphicon-shopping-cart          { &:before { content: \"\\e116\"; } }\n.glyphicon-folder-close           { &:before { content: \"\\e117\"; } }\n.glyphicon-folder-open            { &:before { content: \"\\e118\"; } }\n.glyphicon-resize-vertical        { &:before { content: \"\\e119\"; } }\n.glyphicon-resize-horizontal      { &:before { content: \"\\e120\"; } }\n.glyphicon-hdd                    { &:before { content: \"\\e121\"; } }\n.glyphicon-bullhorn               { &:before { content: \"\\e122\"; } }\n.glyphicon-bell                   { &:before { content: \"\\e123\"; } }\n.glyphicon-certificate            { &:before { content: \"\\e124\"; } }\n.glyphicon-thumbs-up              { &:before { content: \"\\e125\"; } }\n.glyphicon-thumbs-down            { &:before { content: \"\\e126\"; } }\n.glyphicon-hand-right             { &:before { content: \"\\e127\"; } }\n.glyphicon-hand-left              { &:before { content: \"\\e128\"; } }\n.glyphicon-hand-up                { &:before { content: \"\\e129\"; } }\n.glyphicon-hand-down              { &:before { content: \"\\e130\"; } }\n.glyphicon-circle-arrow-right     { &:before { content: \"\\e131\"; } }\n.glyphicon-circle-arrow-left      { &:before { content: \"\\e132\"; } }\n.glyphicon-circle-arrow-up        { &:before { content: \"\\e133\"; } }\n.glyphicon-circle-arrow-down      { &:before { content: \"\\e134\"; } }\n.glyphicon-globe                  { &:before { content: \"\\e135\"; } }\n.glyphicon-wrench                 { &:before { content: \"\\e136\"; } }\n.glyphicon-tasks                  { &:before { content: \"\\e137\"; } }\n.glyphicon-filter                 { &:before { content: \"\\e138\"; } }\n.glyphicon-briefcase              { &:before { content: \"\\e139\"; } }\n.glyphicon-fullscreen             { &:before { content: \"\\e140\"; } }\n.glyphicon-dashboard              { &:before { content: \"\\e141\"; } }\n.glyphicon-paperclip              { &:before { content: \"\\e142\"; } }\n.glyphicon-heart-empty            { &:before { content: \"\\e143\"; } }\n.glyphicon-link                   { &:before { content: \"\\e144\"; } }\n.glyphicon-phone                  { &:before { content: \"\\e145\"; } }\n.glyphicon-pushpin                { &:before { content: \"\\e146\"; } }\n.glyphicon-usd                    { &:before { content: \"\\e148\"; } }\n.glyphicon-gbp                    { &:before { content: \"\\e149\"; } }\n.glyphicon-sort                   { &:before { content: \"\\e150\"; } }\n.glyphicon-sort-by-alphabet       { &:before { content: \"\\e151\"; } }\n.glyphicon-sort-by-alphabet-alt   { &:before { content: \"\\e152\"; } }\n.glyphicon-sort-by-order          { &:before { content: \"\\e153\"; } }\n.glyphicon-sort-by-order-alt      { &:before { content: \"\\e154\"; } }\n.glyphicon-sort-by-attributes     { &:before { content: \"\\e155\"; } }\n.glyphicon-sort-by-attributes-alt { &:before { content: \"\\e156\"; } }\n.glyphicon-unchecked              { &:before { content: \"\\e157\"; } }\n.glyphicon-expand                 { &:before { content: \"\\e158\"; } }\n.glyphicon-collapse-down          { &:before { content: \"\\e159\"; } }\n.glyphicon-collapse-up            { &:before { content: \"\\e160\"; } }\n.glyphicon-log-in                 { &:before { content: \"\\e161\"; } }\n.glyphicon-flash                  { &:before { content: \"\\e162\"; } }\n.glyphicon-log-out                { &:before { content: \"\\e163\"; } }\n.glyphicon-new-window             { &:before { content: \"\\e164\"; } }\n.glyphicon-record                 { &:before { content: \"\\e165\"; } }\n.glyphicon-save                   { &:before { content: \"\\e166\"; } }\n.glyphicon-open                   { &:before { content: \"\\e167\"; } }\n.glyphicon-saved                  { &:before { content: \"\\e168\"; } }\n.glyphicon-import                 { &:before { content: \"\\e169\"; } }\n.glyphicon-export                 { &:before { content: \"\\e170\"; } }\n.glyphicon-send                   { &:before { content: \"\\e171\"; } }\n.glyphicon-floppy-disk            { &:before { content: \"\\e172\"; } }\n.glyphicon-floppy-saved           { &:before { content: \"\\e173\"; } }\n.glyphicon-floppy-remove          { &:before { content: \"\\e174\"; } }\n.glyphicon-floppy-save            { &:before { content: \"\\e175\"; } }\n.glyphicon-floppy-open            { &:before { content: \"\\e176\"; } }\n.glyphicon-credit-card            { &:before { content: \"\\e177\"; } }\n.glyphicon-transfer               { &:before { content: \"\\e178\"; } }\n.glyphicon-cutlery                { &:before { content: \"\\e179\"; } }\n.glyphicon-header                 { &:before { content: \"\\e180\"; } }\n.glyphicon-compressed             { &:before { content: \"\\e181\"; } }\n.glyphicon-earphone               { &:before { content: \"\\e182\"; } }\n.glyphicon-phone-alt              { &:before { content: \"\\e183\"; } }\n.glyphicon-tower                  { &:before { content: \"\\e184\"; } }\n.glyphicon-stats                  { &:before { content: \"\\e185\"; } }\n.glyphicon-sd-video               { &:before { content: \"\\e186\"; } }\n.glyphicon-hd-video               { &:before { content: \"\\e187\"; } }\n.glyphicon-subtitles              { &:before { content: \"\\e188\"; } }\n.glyphicon-sound-stereo           { &:before { content: \"\\e189\"; } }\n.glyphicon-sound-dolby            { &:before { content: \"\\e190\"; } }\n.glyphicon-sound-5-1              { &:before { content: \"\\e191\"; } }\n.glyphicon-sound-6-1              { &:before { content: \"\\e192\"; } }\n.glyphicon-sound-7-1              { &:before { content: \"\\e193\"; } }\n.glyphicon-copyright-mark         { &:before { content: \"\\e194\"; } }\n.glyphicon-registration-mark      { &:before { content: \"\\e195\"; } }\n.glyphicon-cloud-download         { &:before { content: \"\\e197\"; } }\n.glyphicon-cloud-upload           { &:before { content: \"\\e198\"; } }\n.glyphicon-tree-conifer           { &:before { content: \"\\e199\"; } }\n.glyphicon-tree-deciduous         { &:before { content: \"\\e200\"; } }\n","//\n// Dropdown menus\n// --------------------------------------------------\n\n\n// Dropdown arrow/caret\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top:   @caret-width-base solid;\n  border-right: @caret-width-base solid transparent;\n  border-left:  @caret-width-base solid transparent;\n}\n\n// The dropdown wrapper (div)\n.dropdown {\n  position: relative;\n}\n\n// Prevent the focus on the dropdown toggle when closing dropdowns\n.dropdown-toggle:focus {\n  outline: 0;\n}\n\n// The dropdown menu (ul)\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: @zindex-dropdown;\n  display: none; // none by default, but block on \"open\" of the menu\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0; // override default ul\n  list-style: none;\n  font-size: @font-size-base;\n  background-color: @dropdown-bg;\n  border: 1px solid @dropdown-fallback-border; // IE8 fallback\n  border: 1px solid @dropdown-border;\n  border-radius: @border-radius-base;\n  .box-shadow(0 6px 12px rgba(0,0,0,.175));\n  background-clip: padding-box;\n\n  // Aligns the dropdown menu to right\n  //\n  // Deprecated as of 3.1.0 in favor of `.dropdown-menu-[dir]`\n  &.pull-right {\n    right: 0;\n    left: auto;\n  }\n\n  // Dividers (basically an hr) within the dropdown\n  .divider {\n    .nav-divider(@dropdown-divider-bg);\n  }\n\n  // Links within the dropdown menu\n  > li > a {\n    display: block;\n    padding: 3px 20px;\n    clear: both;\n    font-weight: normal;\n    line-height: @line-height-base;\n    color: @dropdown-link-color;\n    white-space: nowrap; // prevent links from randomly breaking onto new lines\n  }\n}\n\n// Hover/Focus state\n.dropdown-menu > li > a {\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    color: @dropdown-link-hover-color;\n    background-color: @dropdown-link-hover-bg;\n  }\n}\n\n// Active state\n.dropdown-menu > .active > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-active-color;\n    text-decoration: none;\n    outline: 0;\n    background-color: @dropdown-link-active-bg;\n  }\n}\n\n// Disabled state\n//\n// Gray out text and ensure the hover/focus state remains gray\n\n.dropdown-menu > .disabled > a {\n  &,\n  &:hover,\n  &:focus {\n    color: @dropdown-link-disabled-color;\n  }\n}\n// Nuke hover/focus effects\n.dropdown-menu > .disabled > a {\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    background-color: transparent;\n    background-image: none; // Remove CSS gradient\n    .reset-filter();\n    cursor: not-allowed;\n  }\n}\n\n// Open state for the dropdown\n.open {\n  // Show the menu\n  > .dropdown-menu {\n    display: block;\n  }\n\n  // Remove the outline when :focus is triggered\n  > a {\n    outline: 0;\n  }\n}\n\n// Menu positioning\n//\n// Add extra class to `.dropdown-menu` to flip the alignment of the dropdown\n// menu with the parent.\n.dropdown-menu-right {\n  left: auto; // Reset the default from `.dropdown-menu`\n  right: 0;\n}\n// With v3, we enabled auto-flipping if you have a dropdown within a right\n// aligned nav component. To enable the undoing of that, we provide an override\n// to restore the default dropdown menu alignment.\n//\n// This is only for left-aligning a dropdown menu within a `.navbar-right` or\n// `.pull-right` nav component.\n.dropdown-menu-left {\n  left: 0;\n  right: auto;\n}\n\n// Dropdown section headers\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: @font-size-small;\n  line-height: @line-height-base;\n  color: @dropdown-header-color;\n}\n\n// Backdrop to catch body clicks on mobile, etc.\n.dropdown-backdrop {\n  position: fixed;\n  left: 0;\n  right: 0;\n  bottom: 0;\n  top: 0;\n  z-index: (@zindex-dropdown - 10);\n}\n\n// Right aligned dropdowns\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n\n// Allow for dropdowns to go bottom up (aka, dropup-menu)\n//\n// Just add .dropup after the standard .dropdown class and you're set, bro.\n// TODO: abstract this so that the navbar fixed styles are not placed here?\n\n.dropup,\n.navbar-fixed-bottom .dropdown {\n  // Reverse the caret\n  .caret {\n    border-top: 0;\n    border-bottom: @caret-width-base solid;\n    content: \"\";\n  }\n  // Different positioning for bottom up menu\n  .dropdown-menu {\n    top: auto;\n    bottom: 100%;\n    margin-bottom: 1px;\n  }\n}\n\n\n// Component alignment\n//\n// Reiterate per navbar.less and the modified component alignment there.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-right {\n    .dropdown-menu {\n      .dropdown-menu-right();\n    }\n    // Necessary for overrides of the default right aligned menu.\n    // Will remove come v4 in all likelihood.\n    .dropdown-menu-left {\n      .dropdown-menu-left();\n    }\n  }\n}\n\n","//\n// Button groups\n// --------------------------------------------------\n\n// Make the div behave like a button\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle; // match .btn alignment given font-size hack above\n  > .btn {\n    position: relative;\n    float: left;\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active,\n    &.active {\n      z-index: 2;\n    }\n    &:focus {\n      // Remove focus outline when dropdown JS adds it after closing the menu\n      outline: none;\n    }\n  }\n}\n\n// Prevent double borders when buttons are next to each other\n.btn-group {\n  .btn + .btn,\n  .btn + .btn-group,\n  .btn-group + .btn,\n  .btn-group + .btn-group {\n    margin-left: -1px;\n  }\n}\n\n// Optional: Group multiple button groups together for a toolbar\n.btn-toolbar {\n  margin-left: -5px; // Offset the first child's margin\n  &:extend(.clearfix all);\n\n  .btn-group,\n  .input-group {\n    float: left;\n  }\n  > .btn,\n  > .btn-group,\n  > .input-group {\n    margin-left: 5px;\n  }\n}\n\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n\n// Set corners individual because sometimes a single button can be in a .btn-group and we need :first-child and :last-child to both match\n.btn-group > .btn:first-child {\n  margin-left: 0;\n  &:not(:last-child):not(.dropdown-toggle) {\n    .border-right-radius(0);\n  }\n}\n// Need .dropdown-toggle since :last-child doesn't apply given a .dropdown-menu immediately after it\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  .border-left-radius(0);\n}\n\n// Custom edits for including btn-groups within btn-groups (useful for including dropdown buttons within a btn-group)\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-right-radius(0);\n  }\n}\n.btn-group > .btn-group:last-child > .btn:first-child {\n  .border-left-radius(0);\n}\n\n// On active and open, don't show outline\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n\n\n// Sizing\n//\n// Remix the default button sizing classes into new ones for easier manipulation.\n\n.btn-group-xs > .btn { .btn-xs(); }\n.btn-group-sm > .btn { .btn-sm(); }\n.btn-group-lg > .btn { .btn-lg(); }\n\n\n// Split button dropdowns\n// ----------------------\n\n// Give the line between buttons some depth\n.btn-group > .btn + .dropdown-toggle {\n  padding-left: 8px;\n  padding-right: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-left: 12px;\n  padding-right: 12px;\n}\n\n// The clickable button for toggling the menu\n// Remove the gradient and set the same inset shadow as the :active state\n.btn-group.open .dropdown-toggle {\n  .box-shadow(inset 0 3px 5px rgba(0,0,0,.125));\n\n  // Show no shadow for `.btn-link` since it has no other button styles.\n  &.btn-link {\n    .box-shadow(none);\n  }\n}\n\n\n// Reposition the caret\n.btn .caret {\n  margin-left: 0;\n}\n// Carets in other button sizes\n.btn-lg .caret {\n  border-width: @caret-width-large @caret-width-large 0;\n  border-bottom-width: 0;\n}\n// Upside down carets for .dropup\n.dropup .btn-lg .caret {\n  border-width: 0 @caret-width-large @caret-width-large;\n}\n\n\n// Vertical button groups\n// ----------------------\n\n.btn-group-vertical {\n  > .btn,\n  > .btn-group,\n  > .btn-group > .btn {\n    display: block;\n    float: none;\n    width: 100%;\n    max-width: 100%;\n  }\n\n  // Clear floats so dropdown menus can be properly placed\n  > .btn-group {\n    &:extend(.clearfix all);\n    > .btn {\n      float: none;\n    }\n  }\n\n  > .btn + .btn,\n  > .btn + .btn-group,\n  > .btn-group + .btn,\n  > .btn-group + .btn-group {\n    margin-top: -1px;\n    margin-left: 0;\n  }\n}\n\n.btn-group-vertical > .btn {\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n  &:first-child:not(:last-child) {\n    border-top-right-radius: @border-radius-base;\n    .border-bottom-radius(0);\n  }\n  &:last-child:not(:first-child) {\n    border-bottom-left-radius: @border-radius-base;\n    .border-top-radius(0);\n  }\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) {\n  > .btn:last-child,\n  > .dropdown-toggle {\n    .border-bottom-radius(0);\n  }\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  .border-top-radius(0);\n}\n\n\n\n// Justified button groups\n// ----------------------\n\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n  > .btn,\n  > .btn-group {\n    float: none;\n    display: table-cell;\n    width: 1%;\n  }\n  > .btn-group .btn {\n    width: 100%;\n  }\n}\n\n\n// Checkbox and radio options\n[data-toggle=\"buttons\"] > .btn > input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn > input[type=\"checkbox\"] {\n  display: none;\n}\n","//\n// Input groups\n// --------------------------------------------------\n\n// Base styles\n// -------------------------\n.input-group {\n  position: relative; // For dropdowns\n  display: table;\n  border-collapse: separate; // prevent input groups from inheriting border styles from table cells when placed within a table\n\n  // Undo padding and float of grid classes\n  &[class*=\"col-\"] {\n    float: none;\n    padding-left: 0;\n    padding-right: 0;\n  }\n\n  .form-control {\n    // IE9 fubars the placeholder attribute in text inputs and the arrows on\n    // select elements in input groups. To fix it, we float the input. Details:\n    // https://github.com/twbs/bootstrap/issues/11561#issuecomment-28936855\n    float: left;\n\n    width: 100%;\n    margin-bottom: 0;\n  }\n}\n\n// Sizing options\n//\n// Remix the default form control sizing classes into new ones for easier\n// manipulation.\n\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn { .input-lg(); }\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn { .input-sm(); }\n\n\n// Display as table-cell\n// -------------------------\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n\n  &:not(:first-child):not(:last-child) {\n    border-radius: 0;\n  }\n}\n// Addon and addon wrapper for buttons\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle; // Match the inputs\n}\n\n// Text input groups\n// -------------------------\n.input-group-addon {\n  padding: @padding-base-vertical @padding-base-horizontal;\n  font-size: @font-size-base;\n  font-weight: normal;\n  line-height: 1;\n  color: @input-color;\n  text-align: center;\n  background-color: @input-group-addon-bg;\n  border: 1px solid @input-group-addon-border-color;\n  border-radius: @border-radius-base;\n\n  // Sizing\n  &.input-sm {\n    padding: @padding-small-vertical @padding-small-horizontal;\n    font-size: @font-size-small;\n    border-radius: @border-radius-small;\n  }\n  &.input-lg {\n    padding: @padding-large-vertical @padding-large-horizontal;\n    font-size: @font-size-large;\n    border-radius: @border-radius-large;\n  }\n\n  // Nuke default margins from checkboxes and radios to vertically center within.\n  input[type=\"radio\"],\n  input[type=\"checkbox\"] {\n    margin-top: 0;\n  }\n}\n\n// Reset rounded corners\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  .border-right-radius(0);\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  .border-left-radius(0);\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n\n// Button input groups\n// -------------------------\n.input-group-btn {\n  position: relative;\n  // Jankily prevent input button groups from wrapping with `white-space` and\n  // `font-size` in combination with `inline-block` on buttons.\n  font-size: 0;\n  white-space: nowrap;\n\n  // Negative margin for spacing, position for bringing hovered/focused/actived\n  // element above the siblings.\n  > .btn {\n    position: relative;\n    + .btn {\n      margin-left: -1px;\n    }\n    // Bring the \"active\" button to the front\n    &:hover,\n    &:focus,\n    &:active {\n      z-index: 2;\n    }\n  }\n\n  // Negative margin to only have a 1px border between the two\n  &:first-child {\n    > .btn,\n    > .btn-group {\n      margin-right: -1px;\n    }\n  }\n  &:last-child {\n    > .btn,\n    > .btn-group {\n      margin-left: -1px;\n    }\n  }\n}\n","//\n// Navs\n// --------------------------------------------------\n\n\n// Base class\n// --------------------------------------------------\n\n.nav {\n  margin-bottom: 0;\n  padding-left: 0; // Override default ul/ol\n  list-style: none;\n  &:extend(.clearfix all);\n\n  > li {\n    position: relative;\n    display: block;\n\n    > a {\n      position: relative;\n      display: block;\n      padding: @nav-link-padding;\n      &:hover,\n      &:focus {\n        text-decoration: none;\n        background-color: @nav-link-hover-bg;\n      }\n    }\n\n    // Disabled state sets text to gray and nukes hover/tab effects\n    &.disabled > a {\n      color: @nav-disabled-link-color;\n\n      &:hover,\n      &:focus {\n        color: @nav-disabled-link-hover-color;\n        text-decoration: none;\n        background-color: transparent;\n        cursor: not-allowed;\n      }\n    }\n  }\n\n  // Open dropdowns\n  .open > a {\n    &,\n    &:hover,\n    &:focus {\n      background-color: @nav-link-hover-bg;\n      border-color: @link-color;\n    }\n  }\n\n  // Nav dividers (deprecated with v3.0.1)\n  //\n  // This should have been removed in v3 with the dropping of `.nav-list`, but\n  // we missed it. We don't currently support this anywhere, but in the interest\n  // of maintaining backward compatibility in case you use it, it's deprecated.\n  .nav-divider {\n    .nav-divider();\n  }\n\n  // Prevent IE8 from misplacing imgs\n  //\n  // See https://github.com/h5bp/html5-boilerplate/issues/984#issuecomment-3985989\n  > li > a > img {\n    max-width: none;\n  }\n}\n\n\n// Tabs\n// -------------------------\n\n// Give the tabs something to sit on\n.nav-tabs {\n  border-bottom: 1px solid @nav-tabs-border-color;\n  > li {\n    float: left;\n    // Make the list-items overlay the bottom border\n    margin-bottom: -1px;\n\n    // Actual tabs (as links)\n    > a {\n      margin-right: 2px;\n      line-height: @line-height-base;\n      border: 1px solid transparent;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n      &:hover {\n        border-color: @nav-tabs-link-hover-border-color @nav-tabs-link-hover-border-color @nav-tabs-border-color;\n      }\n    }\n\n    // Active state, and its :hover to override normal :hover\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-tabs-active-link-hover-color;\n        background-color: @nav-tabs-active-link-hover-bg;\n        border: 1px solid @nav-tabs-active-link-hover-border-color;\n        border-bottom-color: transparent;\n        cursor: default;\n      }\n    }\n  }\n  // pulling this in mainly for less shorthand\n  &.nav-justified {\n    .nav-justified();\n    .nav-tabs-justified();\n  }\n}\n\n\n// Pills\n// -------------------------\n.nav-pills {\n  > li {\n    float: left;\n\n    // Links rendered as pills\n    > a {\n      border-radius: @nav-pills-border-radius;\n    }\n    + li {\n      margin-left: 2px;\n    }\n\n    // Active state\n    &.active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @nav-pills-active-link-hover-color;\n        background-color: @nav-pills-active-link-hover-bg;\n      }\n    }\n  }\n}\n\n\n// Stacked pills\n.nav-stacked {\n  > li {\n    float: none;\n    + li {\n      margin-top: 2px;\n      margin-left: 0; // no need for this gap between nav items\n    }\n  }\n}\n\n\n// Nav variations\n// --------------------------------------------------\n\n// Justified nav links\n// -------------------------\n\n.nav-justified {\n  width: 100%;\n\n  > li {\n    float: none;\n     > a {\n      text-align: center;\n      margin-bottom: 5px;\n    }\n  }\n\n  > .dropdown .dropdown-menu {\n    top: auto;\n    left: auto;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li {\n      display: table-cell;\n      width: 1%;\n      > a {\n        margin-bottom: 0;\n      }\n    }\n  }\n}\n\n// Move borders to anchors instead of bottom of list\n//\n// Mixin for adding on top the shared `.nav-justified` styles for our tabs\n.nav-tabs-justified {\n  border-bottom: 0;\n\n  > li > a {\n    // Override margin from .nav-tabs\n    margin-right: 0;\n    border-radius: @border-radius-base;\n  }\n\n  > .active > a,\n  > .active > a:hover,\n  > .active > a:focus {\n    border: 1px solid @nav-tabs-justified-link-border-color;\n  }\n\n  @media (min-width: @screen-sm-min) {\n    > li > a {\n      border-bottom: 1px solid @nav-tabs-justified-link-border-color;\n      border-radius: @border-radius-base @border-radius-base 0 0;\n    }\n    > .active > a,\n    > .active > a:hover,\n    > .active > a:focus {\n      border-bottom-color: @nav-tabs-justified-active-link-border-color;\n    }\n  }\n}\n\n\n// Tabbable tabs\n// -------------------------\n\n// Hide tabbable panes to start, show them when `.active`\n.tab-content {\n  > .tab-pane {\n    display: none;\n  }\n  > .active {\n    display: block;\n  }\n}\n\n\n// Dropdowns\n// -------------------------\n\n// Specific dropdowns\n.nav-tabs .dropdown-menu {\n  // make dropdown border overlap tab border\n  margin-top: -1px;\n  // Remove the top rounded corners here since there is a hard edge above the menu\n  .border-top-radius(0);\n}\n","//\n// Navbars\n// --------------------------------------------------\n\n\n// Wrapper and base class\n//\n// Provide a static navbar from which we expand to create full-width, fixed, and\n// other navbar variations.\n\n.navbar {\n  position: relative;\n  min-height: @navbar-height; // Ensure a navbar always shows (e.g., without a .navbar-brand in collapsed mode)\n  margin-bottom: @navbar-margin-bottom;\n  border: 1px solid transparent;\n\n  // Prevent floats from breaking the navbar\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: @navbar-border-radius;\n  }\n}\n\n\n// Navbar heading\n//\n// Groups `.navbar-brand` and `.navbar-toggle` into a single component for easy\n// styling of responsive aspects.\n\n.navbar-header {\n  &:extend(.clearfix all);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n  }\n}\n\n\n// Navbar collapse (body)\n//\n// Group your navbar content into this for easy collapsing and expanding across\n// various device sizes. By default, this content is collapsed when <768px, but\n// will expand past that for a horizontal display.\n//\n// To start (on mobile devices) the navbar links, forms, and buttons are stacked\n// vertically and include a `max-height` to overflow in case you have too much\n// content for the user's viewport.\n\n.navbar-collapse {\n  max-height: @navbar-collapse-max-height;\n  overflow-x: visible;\n  padding-right: @navbar-padding-horizontal;\n  padding-left:  @navbar-padding-horizontal;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255,255,255,.1);\n  &:extend(.clearfix all);\n  -webkit-overflow-scrolling: touch;\n\n  &.in {\n    overflow-y: auto;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n\n    &.collapse {\n      display: block !important;\n      height: auto !important;\n      padding-bottom: 0; // Override default setting\n      overflow: visible !important;\n    }\n\n    &.in {\n      overflow-y: visible;\n    }\n\n    // Undo the collapse side padding for navbars with containers to ensure\n    // alignment of right-aligned contents.\n    .navbar-fixed-top &,\n    .navbar-static-top &,\n    .navbar-fixed-bottom & {\n      padding-left: 0;\n      padding-right: 0;\n    }\n  }\n}\n\n\n// Both navbar header and collapse\n//\n// When a container is present, change the behavior of the header and collapse.\n\n.container,\n.container-fluid {\n  > .navbar-header,\n  > .navbar-collapse {\n    margin-right: -@navbar-padding-horizontal;\n    margin-left:  -@navbar-padding-horizontal;\n\n    @media (min-width: @grid-float-breakpoint) {\n      margin-right: 0;\n      margin-left:  0;\n    }\n  }\n}\n\n\n//\n// Navbar alignment options\n//\n// Display the navbar across the entirety of the page or fixed it to the top or\n// bottom of the page.\n\n// Static top (unfixed, but 100% wide) navbar\n.navbar-static-top {\n  z-index: @zindex-navbar;\n  border-width: 0 0 1px;\n\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n\n// Fix the top/bottom navbars when screen real estate supports it\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: @zindex-navbar-fixed;\n\n  // Undo the rounded corners\n  @media (min-width: @grid-float-breakpoint) {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0; // override .navbar defaults\n  border-width: 1px 0 0;\n}\n\n\n// Brand/project name\n\n.navbar-brand {\n  float: left;\n  padding: @navbar-padding-vertical @navbar-padding-horizontal;\n  font-size: @font-size-large;\n  line-height: @line-height-computed;\n  height: @line-height-computed;\n\n  &:hover,\n  &:focus {\n    text-decoration: none;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    .navbar > .container &,\n    .navbar > .container-fluid & {\n      margin-left: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Navbar toggle\n//\n// Custom button for toggling the `.navbar-collapse`, powered by the collapse\n// JavaScript plugin.\n\n.navbar-toggle {\n  position: relative;\n  float: right;\n  margin-right: @navbar-padding-horizontal;\n  padding: 9px 10px;\n  .navbar-vertical-align(34px);\n  background-color: transparent;\n  background-image: none; // Reset unusual Firefox-on-Android default style; see https://github.com/necolas/normalize.css/issues/214\n  border: 1px solid transparent;\n  border-radius: @border-radius-base;\n\n  // We remove the `outline` here, but later compensate by attaching `:hover`\n  // styles to `:focus`.\n  &:focus {\n    outline: none;\n  }\n\n  // Bars\n  .icon-bar {\n    display: block;\n    width: 22px;\n    height: 2px;\n    border-radius: 1px;\n  }\n  .icon-bar + .icon-bar {\n    margin-top: 4px;\n  }\n\n  @media (min-width: @grid-float-breakpoint) {\n    display: none;\n  }\n}\n\n\n// Navbar nav links\n//\n// Builds on top of the `.nav` components with its own modifier class to make\n// the nav the full height of the horizontal nav (above 768px).\n\n.navbar-nav {\n  margin: (@navbar-padding-vertical / 2) -@navbar-padding-horizontal;\n\n  > li > a {\n    padding-top:    10px;\n    padding-bottom: 10px;\n    line-height: @line-height-computed;\n  }\n\n  @media (max-width: @grid-float-breakpoint-max) {\n    // Dropdowns get custom display when collapsed\n    .open .dropdown-menu {\n      position: static;\n      float: none;\n      width: auto;\n      margin-top: 0;\n      background-color: transparent;\n      border: 0;\n      box-shadow: none;\n      > li > a,\n      .dropdown-header {\n        padding: 5px 15px 5px 25px;\n      }\n      > li > a {\n        line-height: @line-height-computed;\n        &:hover,\n        &:focus {\n          background-image: none;\n        }\n      }\n    }\n  }\n\n  // Uncollapse the nav\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin: 0;\n\n    > li {\n      float: left;\n      > a {\n        padding-top:    @navbar-padding-vertical;\n        padding-bottom: @navbar-padding-vertical;\n      }\n    }\n\n    &.navbar-right:last-child {\n      margin-right: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Component alignment\n//\n// Repurpose the pull utilities as their own navbar utilities to avoid specificity\n// issues with parents and chaining. Only do this when the navbar is uncollapsed\n// though so that navbar contents properly stack and align in mobile.\n\n@media (min-width: @grid-float-breakpoint) {\n  .navbar-left  { .pull-left(); }\n  .navbar-right { .pull-right(); }\n}\n\n\n// Navbar form\n//\n// Extension of the `.form-inline` with some extra flavor for optimum display in\n// our navbars.\n\n.navbar-form {\n  margin-left: -@navbar-padding-horizontal;\n  margin-right: -@navbar-padding-horizontal;\n  padding: 10px @navbar-padding-horizontal;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  @shadow: inset 0 1px 0 rgba(255,255,255,.1), 0 1px 0 rgba(255,255,255,.1);\n  .box-shadow(@shadow);\n\n  // Mixin behavior for optimum display\n  .form-inline();\n\n  .form-group {\n    @media (max-width: @grid-float-breakpoint-max) {\n      margin-bottom: 5px;\n    }\n  }\n\n  // Vertically center in expanded, horizontal navbar\n  .navbar-vertical-align(@input-height-base);\n\n  // Undo 100% width for pull classes\n  @media (min-width: @grid-float-breakpoint) {\n    width: auto;\n    border: 0;\n    margin-left: 0;\n    margin-right: 0;\n    padding-top: 0;\n    padding-bottom: 0;\n    .box-shadow(none);\n\n    // Outdent the form if last child to line up with content down the page\n    &.navbar-right:last-child {\n      margin-right: -@navbar-padding-horizontal;\n    }\n  }\n}\n\n\n// Dropdown menus\n\n// Menu position and menu carets\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  .border-top-radius(0);\n}\n// Menu position and menu caret support for dropups via extra dropup class\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  .border-bottom-radius(0);\n}\n\n\n// Buttons in navbars\n//\n// Vertically center a button within a navbar (when *not* in a form).\n\n.navbar-btn {\n  .navbar-vertical-align(@input-height-base);\n\n  &.btn-sm {\n    .navbar-vertical-align(@input-height-small);\n  }\n  &.btn-xs {\n    .navbar-vertical-align(22);\n  }\n}\n\n\n// Text in navbars\n//\n// Add a class to make any element properly align itself vertically within the navbars.\n\n.navbar-text {\n  .navbar-vertical-align(@line-height-computed);\n\n  @media (min-width: @grid-float-breakpoint) {\n    float: left;\n    margin-left: @navbar-padding-horizontal;\n    margin-right: @navbar-padding-horizontal;\n\n    // Outdent the form if last child to line up with content down the page\n    &.navbar-right:last-child {\n      margin-right: 0;\n    }\n  }\n}\n\n// Alternate navbars\n// --------------------------------------------------\n\n// Default navbar\n.navbar-default {\n  background-color: @navbar-default-bg;\n  border-color: @navbar-default-border;\n\n  .navbar-brand {\n    color: @navbar-default-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-default-brand-hover-color;\n      background-color: @navbar-default-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-default-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-default-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-hover-color;\n        background-color: @navbar-default-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-active-color;\n        background-color: @navbar-default-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-default-link-disabled-color;\n        background-color: @navbar-default-link-disabled-bg;\n      }\n    }\n  }\n\n  .navbar-toggle {\n    border-color: @navbar-default-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-default-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-default-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: @navbar-default-border;\n  }\n\n  // Dropdown menu items\n  .navbar-nav {\n    // Remove background color from open dropdown\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        background-color: @navbar-default-link-active-bg;\n        color: @navbar-default-link-active-color;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display when collapsed\n      .open .dropdown-menu {\n        > li > a {\n          color: @navbar-default-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-hover-color;\n            background-color: @navbar-default-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-active-color;\n            background-color: @navbar-default-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-default-link-disabled-color;\n            background-color: @navbar-default-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n\n  // Links in navbars\n  //\n  // Add a class to ensure links outside the navbar nav are colored correctly.\n\n  .navbar-link {\n    color: @navbar-default-link-color;\n    &:hover {\n      color: @navbar-default-link-hover-color;\n    }\n  }\n\n}\n\n// Inverse navbar\n\n.navbar-inverse {\n  background-color: @navbar-inverse-bg;\n  border-color: @navbar-inverse-border;\n\n  .navbar-brand {\n    color: @navbar-inverse-brand-color;\n    &:hover,\n    &:focus {\n      color: @navbar-inverse-brand-hover-color;\n      background-color: @navbar-inverse-brand-hover-bg;\n    }\n  }\n\n  .navbar-text {\n    color: @navbar-inverse-color;\n  }\n\n  .navbar-nav {\n    > li > a {\n      color: @navbar-inverse-link-color;\n\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-hover-color;\n        background-color: @navbar-inverse-link-hover-bg;\n      }\n    }\n    > .active > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-active-color;\n        background-color: @navbar-inverse-link-active-bg;\n      }\n    }\n    > .disabled > a {\n      &,\n      &:hover,\n      &:focus {\n        color: @navbar-inverse-link-disabled-color;\n        background-color: @navbar-inverse-link-disabled-bg;\n      }\n    }\n  }\n\n  // Darken the responsive nav toggle\n  .navbar-toggle {\n    border-color: @navbar-inverse-toggle-border-color;\n    &:hover,\n    &:focus {\n      background-color: @navbar-inverse-toggle-hover-bg;\n    }\n    .icon-bar {\n      background-color: @navbar-inverse-toggle-icon-bar-bg;\n    }\n  }\n\n  .navbar-collapse,\n  .navbar-form {\n    border-color: darken(@navbar-inverse-bg, 7%);\n  }\n\n  // Dropdowns\n  .navbar-nav {\n    > .open > a {\n      &,\n      &:hover,\n      &:focus {\n        background-color: @navbar-inverse-link-active-bg;\n        color: @navbar-inverse-link-active-color;\n      }\n    }\n\n    @media (max-width: @grid-float-breakpoint-max) {\n      // Dropdowns get custom display\n      .open .dropdown-menu {\n        > .dropdown-header {\n          border-color: @navbar-inverse-border;\n        }\n        .divider {\n          background-color: @navbar-inverse-border;\n        }\n        > li > a {\n          color: @navbar-inverse-link-color;\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-hover-color;\n            background-color: @navbar-inverse-link-hover-bg;\n          }\n        }\n        > .active > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-active-color;\n            background-color: @navbar-inverse-link-active-bg;\n          }\n        }\n        > .disabled > a {\n          &,\n          &:hover,\n          &:focus {\n            color: @navbar-inverse-link-disabled-color;\n            background-color: @navbar-inverse-link-disabled-bg;\n          }\n        }\n      }\n    }\n  }\n\n  .navbar-link {\n    color: @navbar-inverse-link-color;\n    &:hover {\n      color: @navbar-inverse-link-hover-color;\n    }\n  }\n\n}\n","//\n// Utility classes\n// --------------------------------------------------\n\n\n// Floats\n// -------------------------\n\n.clearfix {\n  .clearfix();\n}\n.center-block {\n  .center-block();\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n\n\n// Toggling content\n// -------------------------\n\n// Note: Deprecated .hide in favor of .hidden or .sr-only (as appropriate) in v3.0.1\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  .text-hide();\n}\n\n\n// Hide from screenreaders and browsers\n//\n// Credit: HTML5 Boilerplate\n\n.hidden {\n  display: none !important;\n  visibility: hidden !important;\n}\n\n\n// For Affix plugin\n// -------------------------\n\n.affix {\n  position: fixed;\n}\n","//\n// Breadcrumbs\n// --------------------------------------------------\n\n\n.breadcrumb {\n  padding: @breadcrumb-padding-vertical @breadcrumb-padding-horizontal;\n  margin-bottom: @line-height-computed;\n  list-style: none;\n  background-color: @breadcrumb-bg;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline-block;\n\n    + li:before {\n      content: \"@{breadcrumb-separator}\\00a0\"; // Unicode space added since inline-block means non-collapsing white-space\n      padding: 0 5px;\n      color: @breadcrumb-color;\n    }\n  }\n\n  > .active {\n    color: @breadcrumb-active-color;\n  }\n}\n","//\n// Pagination (multiple pages)\n// --------------------------------------------------\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  border-radius: @border-radius-base;\n\n  > li {\n    display: inline; // Remove list-style and block-level defaults\n    > a,\n    > span {\n      position: relative;\n      float: left; // Collapse white-space\n      padding: @padding-base-vertical @padding-base-horizontal;\n      line-height: @line-height-base;\n      text-decoration: none;\n      color: @pagination-color;\n      background-color: @pagination-bg;\n      border: 1px solid @pagination-border;\n      margin-left: -1px;\n    }\n    &:first-child {\n      > a,\n      > span {\n        margin-left: 0;\n        .border-left-radius(@border-radius-base);\n      }\n    }\n    &:last-child {\n      > a,\n      > span {\n        .border-right-radius(@border-radius-base);\n      }\n    }\n  }\n\n  > li > a,\n  > li > span {\n    &:hover,\n    &:focus {\n      color: @pagination-hover-color;\n      background-color: @pagination-hover-bg;\n      border-color: @pagination-hover-border;\n    }\n  }\n\n  > .active > a,\n  > .active > span {\n    &,\n    &:hover,\n    &:focus {\n      z-index: 2;\n      color: @pagination-active-color;\n      background-color: @pagination-active-bg;\n      border-color: @pagination-active-border;\n      cursor: default;\n    }\n  }\n\n  > .disabled {\n    > span,\n    > span:hover,\n    > span:focus,\n    > a,\n    > a:hover,\n    > a:focus {\n      color: @pagination-disabled-color;\n      background-color: @pagination-disabled-bg;\n      border-color: @pagination-disabled-border;\n      cursor: not-allowed;\n    }\n  }\n}\n\n// Sizing\n// --------------------------------------------------\n\n// Large\n.pagination-lg {\n  .pagination-size(@padding-large-vertical; @padding-large-horizontal; @font-size-large; @border-radius-large);\n}\n\n// Small\n.pagination-sm {\n  .pagination-size(@padding-small-vertical; @padding-small-horizontal; @font-size-small; @border-radius-small);\n}\n","//\n// Pager pagination\n// --------------------------------------------------\n\n\n.pager {\n  padding-left: 0;\n  margin: @line-height-computed 0;\n  list-style: none;\n  text-align: center;\n  &:extend(.clearfix all);\n  li {\n    display: inline;\n    > a,\n    > span {\n      display: inline-block;\n      padding: 5px 14px;\n      background-color: @pager-bg;\n      border: 1px solid @pager-border;\n      border-radius: @pager-border-radius;\n    }\n\n    > a:hover,\n    > a:focus {\n      text-decoration: none;\n      background-color: @pager-hover-bg;\n    }\n  }\n\n  .next {\n    > a,\n    > span {\n      float: right;\n    }\n  }\n\n  .previous {\n    > a,\n    > span {\n      float: left;\n    }\n  }\n\n  .disabled {\n    > a,\n    > a:hover,\n    > a:focus,\n    > span {\n      color: @pager-disabled-color;\n      background-color: @pager-bg;\n      cursor: not-allowed;\n    }\n  }\n\n}\n","//\n// Labels\n// --------------------------------------------------\n\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: bold;\n  line-height: 1;\n  color: @label-color;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n\n  // Add hover effects, but only for links\n  &[href] {\n    &:hover,\n    &:focus {\n      color: @label-link-hover-color;\n      text-decoration: none;\n      cursor: pointer;\n    }\n  }\n\n  // Empty labels collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for labels in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n}\n\n// Colors\n// Contextual variations (linked labels get darker on :hover)\n\n.label-default {\n  .label-variant(@label-default-bg);\n}\n\n.label-primary {\n  .label-variant(@label-primary-bg);\n}\n\n.label-success {\n  .label-variant(@label-success-bg);\n}\n\n.label-info {\n  .label-variant(@label-info-bg);\n}\n\n.label-warning {\n  .label-variant(@label-warning-bg);\n}\n\n.label-danger {\n  .label-variant(@label-danger-bg);\n}\n","//\n// Badges\n// --------------------------------------------------\n\n\n// Base classes\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: @font-size-small;\n  font-weight: @badge-font-weight;\n  color: @badge-color;\n  line-height: @badge-line-height;\n  vertical-align: baseline;\n  white-space: nowrap;\n  text-align: center;\n  background-color: @badge-bg;\n  border-radius: @badge-border-radius;\n\n  // Empty badges collapse automatically (not available in IE8)\n  &:empty {\n    display: none;\n  }\n\n  // Quick fix for badges in buttons\n  .btn & {\n    position: relative;\n    top: -1px;\n  }\n  .btn-xs & {\n    top: 0;\n    padding: 1px 5px;\n  }\n}\n\n// Hover state, but only for links\na.badge {\n  &:hover,\n  &:focus {\n    color: @badge-link-hover-color;\n    text-decoration: none;\n    cursor: pointer;\n  }\n}\n\n// Account for counters in navs\na.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: @badge-active-color;\n  background-color: @badge-active-bg;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n","//\n// Jumbotron\n// --------------------------------------------------\n\n\n.jumbotron {\n  padding: @jumbotron-padding;\n  margin-bottom: @jumbotron-padding;\n  color: @jumbotron-color;\n  background-color: @jumbotron-bg;\n\n  h1,\n  .h1 {\n    color: @jumbotron-heading-color;\n  }\n  p {\n    margin-bottom: (@jumbotron-padding / 2);\n    font-size: @jumbotron-font-size;\n    font-weight: 200;\n  }\n\n  .container & {\n    border-radius: @border-radius-large; // Only round corners at higher resolutions if contained in a container\n  }\n\n  .container {\n    max-width: 100%;\n  }\n\n  @media screen and (min-width: @screen-sm-min) {\n    padding-top:    (@jumbotron-padding * 1.6);\n    padding-bottom: (@jumbotron-padding * 1.6);\n\n    .container & {\n      padding-left:  (@jumbotron-padding * 2);\n      padding-right: (@jumbotron-padding * 2);\n    }\n\n    h1,\n    .h1 {\n      font-size: (@font-size-base * 4.5);\n    }\n  }\n}\n","//\n// Thumbnails\n// --------------------------------------------------\n\n\n// Mixin and adjust the regular image class\n.thumbnail {\n  display: block;\n  padding: @thumbnail-padding;\n  margin-bottom: @line-height-computed;\n  line-height: @line-height-base;\n  background-color: @thumbnail-bg;\n  border: 1px solid @thumbnail-border;\n  border-radius: @thumbnail-border-radius;\n  .transition(all .2s ease-in-out);\n\n  > img,\n  a > img {\n    .img-responsive();\n    margin-left: auto;\n    margin-right: auto;\n  }\n\n  // Add a hover state for linked versions only\n  a&:hover,\n  a&:focus,\n  a&.active {\n    border-color: @link-color;\n  }\n\n  // Image captions\n  .caption {\n    padding: @thumbnail-caption-padding;\n    color: @thumbnail-caption-color;\n  }\n}\n","//\n// Alerts\n// --------------------------------------------------\n\n\n// Base styles\n// -------------------------\n\n.alert {\n  padding: @alert-padding;\n  margin-bottom: @line-height-computed;\n  border: 1px solid transparent;\n  border-radius: @alert-border-radius;\n\n  // Headings for larger alerts\n  h4 {\n    margin-top: 0;\n    // Specified for the h4 to prevent conflicts of changing @headings-color\n    color: inherit;\n  }\n  // Provide class for links that match alerts\n  .alert-link {\n    font-weight: @alert-link-font-weight;\n  }\n\n  // Improve alignment and spacing of inner content\n  > p,\n  > ul {\n    margin-bottom: 0;\n  }\n  > p + p {\n    margin-top: 5px;\n  }\n}\n\n// Dismissable alerts\n//\n// Expand the right padding and account for the close button's positioning.\n\n.alert-dismissable {\n padding-right: (@alert-padding + 20);\n\n  // Adjust close link position\n  .close {\n    position: relative;\n    top: -2px;\n    right: -21px;\n    color: inherit;\n  }\n}\n\n// Alternate styles\n//\n// Generate contextual modifier classes for colorizing the alert.\n\n.alert-success {\n  .alert-variant(@alert-success-bg; @alert-success-border; @alert-success-text);\n}\n.alert-info {\n  .alert-variant(@alert-info-bg; @alert-info-border; @alert-info-text);\n}\n.alert-warning {\n  .alert-variant(@alert-warning-bg; @alert-warning-border; @alert-warning-text);\n}\n.alert-danger {\n  .alert-variant(@alert-danger-bg; @alert-danger-border; @alert-danger-text);\n}\n","//\n// Progress bars\n// --------------------------------------------------\n\n\n// Bar animations\n// -------------------------\n\n// WebKit\n@-webkit-keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n// Spec and IE10+\n@keyframes progress-bar-stripes {\n  from  { background-position: 40px 0; }\n  to    { background-position: 0 0; }\n}\n\n\n\n// Bar itself\n// -------------------------\n\n// Outer container\n.progress {\n  overflow: hidden;\n  height: @line-height-computed;\n  margin-bottom: @line-height-computed;\n  background-color: @progress-bg;\n  border-radius: @border-radius-base;\n  .box-shadow(inset 0 1px 2px rgba(0,0,0,.1));\n}\n\n// Bar of progress\n.progress-bar {\n  float: left;\n  width: 0%;\n  height: 100%;\n  font-size: @font-size-small;\n  line-height: @line-height-computed;\n  color: @progress-bar-color;\n  text-align: center;\n  background-color: @progress-bar-bg;\n  .box-shadow(inset 0 -1px 0 rgba(0,0,0,.15));\n  .transition(width .6s ease);\n}\n\n// Striped bars\n.progress-striped .progress-bar {\n  #gradient > .striped();\n  background-size: 40px 40px;\n}\n\n// Call animation for the active one\n.progress.active .progress-bar {\n  .animation(progress-bar-stripes 2s linear infinite);\n}\n\n\n\n// Variations\n// -------------------------\n\n.progress-bar-success {\n  .progress-bar-variant(@progress-bar-success-bg);\n}\n\n.progress-bar-info {\n  .progress-bar-variant(@progress-bar-info-bg);\n}\n\n.progress-bar-warning {\n  .progress-bar-variant(@progress-bar-warning-bg);\n}\n\n.progress-bar-danger {\n  .progress-bar-variant(@progress-bar-danger-bg);\n}\n","// Media objects\n// Source: http://stubbornella.org/content/?p=497\n// --------------------------------------------------\n\n\n// Common styles\n// -------------------------\n\n// Clear the floats\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n\n// Proper spacing between instances of .media\n.media,\n.media .media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n\n// For images and videos, set to block\n.media-object {\n  display: block;\n}\n\n// Reset margins on headings for tighter default spacing\n.media-heading {\n  margin: 0 0 5px;\n}\n\n\n// Media image alignment\n// -------------------------\n\n.media {\n  > .pull-left {\n    margin-right: 10px;\n  }\n  > .pull-right {\n    margin-left: 10px;\n  }\n}\n\n\n// Media list variation\n// -------------------------\n\n// Undo default ul/ol styles\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n","//\n// List groups\n// --------------------------------------------------\n\n\n// Base class\n//\n// Easily usable on <ul>, <ol>, or <div>.\n\n.list-group {\n  // No need to set list-style: none; since .list-group-item is block level\n  margin-bottom: 20px;\n  padding-left: 0; // reset padding because ul and ol\n}\n\n\n// Individual list items\n//\n// Use on `li`s or `div`s within the `.list-group` parent.\n\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  // Place the border on the list items and negative margin up for better styling\n  margin-bottom: -1px;\n  background-color: @list-group-bg;\n  border: 1px solid @list-group-border;\n\n  // Round the first and last items\n  &:first-child {\n    .border-top-radius(@list-group-border-radius);\n  }\n  &:last-child {\n    margin-bottom: 0;\n    .border-bottom-radius(@list-group-border-radius);\n  }\n\n  // Align badges within list items\n  > .badge {\n    float: right;\n  }\n  > .badge + .badge {\n    margin-right: 5px;\n  }\n}\n\n\n// Linked list items\n//\n// Use anchor elements instead of `li`s or `div`s to create linked list items.\n// Includes an extra `.active` modifier class for showing selected items.\n\na.list-group-item {\n  color: @list-group-link-color;\n\n  .list-group-item-heading {\n    color: @list-group-link-heading-color;\n  }\n\n  // Hover state\n  &:hover,\n  &:focus {\n    text-decoration: none;\n    background-color: @list-group-hover-bg;\n  }\n\n  // Active class on item itself, not parent\n  &.active,\n  &.active:hover,\n  &.active:focus {\n    z-index: 2; // Place active items above their siblings for proper border styling\n    color: @list-group-active-color;\n    background-color: @list-group-active-bg;\n    border-color: @list-group-active-border;\n\n    // Force color to inherit for custom content\n    .list-group-item-heading {\n      color: inherit;\n    }\n    .list-group-item-text {\n      color: @list-group-active-text-color;\n    }\n  }\n}\n\n\n// Contextual variants\n//\n// Add modifier classes to change text and background color on individual items.\n// Organizationally, this must come after the `:hover` states.\n\n.list-group-item-variant(success; @state-success-bg; @state-success-text);\n.list-group-item-variant(info; @state-info-bg; @state-info-text);\n.list-group-item-variant(warning; @state-warning-bg; @state-warning-text);\n.list-group-item-variant(danger; @state-danger-bg; @state-danger-text);\n\n\n// Custom content options\n//\n// Extra classes for creating well-formatted content within `.list-group-item`s.\n\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n","//\n// Panels\n// --------------------------------------------------\n\n\n// Base class\n.panel {\n  margin-bottom: @line-height-computed;\n  background-color: @panel-bg;\n  border: 1px solid transparent;\n  border-radius: @panel-border-radius;\n  .box-shadow(0 1px 1px rgba(0,0,0,.05));\n}\n\n// Panel contents\n.panel-body {\n  padding: @panel-body-padding;\n  &:extend(.clearfix all);\n}\n\n\n// List groups in panels\n//\n// By default, space out list group content from panel headings to account for\n// any kind of custom content between the two.\n\n.panel {\n  > .list-group {\n    margin-bottom: 0;\n    .list-group-item {\n      border-width: 1px 0;\n      border-radius: 0;\n      &:first-child {\n        border-top: 0;\n      }\n      &:last-child {\n        border-bottom: 0;\n      }\n    }\n    // Add border top radius for first one\n    &:first-child {\n      .list-group-item:first-child {\n        .border-top-radius((@panel-border-radius - 1));\n      }\n    }\n    // Add border bottom radius for last one\n    &:last-child {\n      .list-group-item:last-child {\n        .border-bottom-radius((@panel-border-radius - 1));\n      }\n    }\n  }\n}\n// Collapse space between when there's no additional content.\n.panel-heading + .list-group {\n  .list-group-item:first-child {\n    border-top-width: 0;\n  }\n}\n\n\n// Tables in panels\n//\n// Place a non-bordered `.table` within a panel (not within a `.panel-body`) and\n// watch it go full width.\n\n.panel {\n  > .table,\n  > .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  // Add border top radius for first one\n  > .table:first-child,\n  > .table-responsive:first-child > .table:first-child {\n    > thead:first-child,\n    > tbody:first-child {\n      > tr:first-child {\n        td:first-child,\n        th:first-child {\n          border-top-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-top-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  // Add border bottom radius for last one\n  > .table:last-child,\n  > .table-responsive:last-child > .table:last-child {\n    > tbody:last-child,\n    > tfoot:last-child {\n      > tr:last-child {\n        td:first-child,\n        th:first-child {\n          border-bottom-left-radius: (@panel-border-radius - 1);\n        }\n        td:last-child,\n        th:last-child {\n          border-bottom-right-radius: (@panel-border-radius - 1);\n        }\n      }\n    }\n  }\n  > .panel-body + .table,\n  > .panel-body + .table-responsive {\n    border-top: 1px solid @table-border-color;\n  }\n  > .table > tbody:first-child > tr:first-child th,\n  > .table > tbody:first-child > tr:first-child td {\n    border-top: 0;\n  }\n  > .table-bordered,\n  > .table-responsive > .table-bordered {\n    border: 0;\n    > thead,\n    > tbody,\n    > tfoot {\n      > tr {\n        > th:first-child,\n        > td:first-child {\n          border-left: 0;\n        }\n        > th:last-child,\n        > td:last-child {\n          border-right: 0;\n        }\n        &:first-child > th,\n        &:first-child > td {\n          border-top: 0;\n        }\n        &:last-child > th,\n        &:last-child > td {\n          border-bottom: 0;\n        }\n      }\n    }\n  }\n  > .table-responsive {\n    border: 0;\n    margin-bottom: 0;\n  }\n}\n\n\n// Optional heading\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  .border-top-radius((@panel-border-radius - 1));\n\n  > .dropdown .dropdown-toggle {\n    color: inherit;\n  }\n}\n\n// Within heading, strip any `h*` tag of its default margins for spacing.\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: ceil((@font-size-base * 1.125));\n  color: inherit;\n\n  > a {\n    color: inherit;\n  }\n}\n\n// Optional footer (stays gray in every modifier class)\n.panel-footer {\n  padding: 10px 15px;\n  background-color: @panel-footer-bg;\n  border-top: 1px solid @panel-inner-border;\n  .border-bottom-radius((@panel-border-radius - 1));\n}\n\n\n// Collapsable panels (aka, accordion)\n//\n// Wrap a series of panels in `.panel-group` to turn them into an accordion with\n// the help of our collapse JavaScript plugin.\n\n.panel-group {\n  margin-bottom: @line-height-computed;\n\n  // Tighten up margin so it's only between panels\n  .panel {\n    margin-bottom: 0;\n    border-radius: @panel-border-radius;\n    overflow: hidden; // crop contents when collapsed\n    + .panel {\n      margin-top: 5px;\n    }\n  }\n\n  .panel-heading {\n    border-bottom: 0;\n    + .panel-collapse .panel-body {\n      border-top: 1px solid @panel-inner-border;\n    }\n  }\n  .panel-footer {\n    border-top: 0;\n    + .panel-collapse .panel-body {\n      border-bottom: 1px solid @panel-inner-border;\n    }\n  }\n}\n\n\n// Contextual variations\n.panel-default {\n  .panel-variant(@panel-default-border; @panel-default-text; @panel-default-heading-bg; @panel-default-border);\n}\n.panel-primary {\n  .panel-variant(@panel-primary-border; @panel-primary-text; @panel-primary-heading-bg; @panel-primary-border);\n}\n.panel-success {\n  .panel-variant(@panel-success-border; @panel-success-text; @panel-success-heading-bg; @panel-success-border);\n}\n.panel-info {\n  .panel-variant(@panel-info-border; @panel-info-text; @panel-info-heading-bg; @panel-info-border);\n}\n.panel-warning {\n  .panel-variant(@panel-warning-border; @panel-warning-text; @panel-warning-heading-bg; @panel-warning-border);\n}\n.panel-danger {\n  .panel-variant(@panel-danger-border; @panel-danger-text; @panel-danger-heading-bg; @panel-danger-border);\n}\n","//\n// Wells\n// --------------------------------------------------\n\n\n// Base class\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: @well-bg;\n  border: 1px solid @well-border;\n  border-radius: @border-radius-base;\n  .box-shadow(inset 0 1px 1px rgba(0,0,0,.05));\n  blockquote {\n    border-color: #ddd;\n    border-color: rgba(0,0,0,.15);\n  }\n}\n\n// Sizes\n.well-lg {\n  padding: 24px;\n  border-radius: @border-radius-large;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: @border-radius-small;\n}\n","//\n// Close icons\n// --------------------------------------------------\n\n\n.close {\n  float: right;\n  font-size: (@font-size-base * 1.5);\n  font-weight: @close-font-weight;\n  line-height: 1;\n  color: @close-color;\n  text-shadow: @close-text-shadow;\n  .opacity(.2);\n\n  &:hover,\n  &:focus {\n    color: @close-color;\n    text-decoration: none;\n    cursor: pointer;\n    .opacity(.5);\n  }\n\n  // Additional properties for button version\n  // iOS requires the button element instead of an anchor tag.\n  // If you want the anchor version, it requires `href=\"#\"`.\n  button& {\n    padding: 0;\n    cursor: pointer;\n    background: transparent;\n    border: 0;\n    -webkit-appearance: none;\n  }\n}\n","//\n// Modals\n// --------------------------------------------------\n\n// .modal-open      - body class for killing the scroll\n// .modal           - container to scroll within\n// .modal-dialog    - positioning shell for the actual modal\n// .modal-content   - actual modal w/ bg and corners and shit\n\n// Kill the scroll on the body\n.modal-open {\n  overflow: hidden;\n}\n\n// Container that the modal scrolls within\n.modal {\n  display: none;\n  overflow: auto;\n  overflow-y: scroll;\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal;\n  -webkit-overflow-scrolling: touch;\n\n  // Prevent Chrome on Windows from adding a focus outline. For details, see\n  // https://github.com/twbs/bootstrap/pull/10951.\n  outline: 0;\n\n  // When fading in the modal, animate it to slide down\n  &.fade .modal-dialog {\n    .translate(0, -25%);\n    .transition-transform(~\"0.3s ease-out\");\n  }\n  &.in .modal-dialog { .translate(0, 0)}\n}\n\n// Shell div to position the modal with bottom padding\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n\n// Actual modal\n.modal-content {\n  position: relative;\n  background-color: @modal-content-bg;\n  border: 1px solid @modal-content-fallback-border-color; //old browsers fallback (ie8 etc)\n  border: 1px solid @modal-content-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 3px 9px rgba(0,0,0,.5));\n  background-clip: padding-box;\n  // Remove focus outline from opened modal\n  outline: none;\n}\n\n// Modal background\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: @zindex-modal-background;\n  background-color: @modal-backdrop-bg;\n  // Fade for backdrop\n  &.fade { .opacity(0); }\n  &.in { .opacity(@modal-backdrop-opacity); }\n}\n\n// Modal header\n// Top section of the modal w/ title and dismiss\n.modal-header {\n  padding: @modal-title-padding;\n  border-bottom: 1px solid @modal-header-border-color;\n  min-height: (@modal-title-padding + @modal-title-line-height);\n}\n// Close icon\n.modal-header .close {\n  margin-top: -2px;\n}\n\n// Title text within header\n.modal-title {\n  margin: 0;\n  line-height: @modal-title-line-height;\n}\n\n// Modal body\n// Where all modal content resides (sibling of .modal-header and .modal-footer)\n.modal-body {\n  position: relative;\n  padding: @modal-inner-padding;\n}\n\n// Footer (for actions)\n.modal-footer {\n  margin-top: 15px;\n  padding: (@modal-inner-padding - 1) @modal-inner-padding @modal-inner-padding;\n  text-align: right; // right align buttons\n  border-top: 1px solid @modal-footer-border-color;\n  &:extend(.clearfix all); // clear it in case folks use .pull-* classes on buttons\n\n  // Properly space out buttons\n  .btn + .btn {\n    margin-left: 5px;\n    margin-bottom: 0; // account for input[type=\"submit\"] which gets the bottom margin like all other inputs\n  }\n  // but override that for button groups\n  .btn-group .btn + .btn {\n    margin-left: -1px;\n  }\n  // and override it for block buttons as well\n  .btn-block + .btn-block {\n    margin-left: 0;\n  }\n}\n\n// Scale up the modal\n@media (min-width: @screen-sm-min) {\n\n  // Automatically set modal's width for larger viewports\n  .modal-dialog {\n    width: @modal-md;\n    margin: 30px auto;\n  }\n  .modal-content {\n    .box-shadow(0 5px 15px rgba(0,0,0,.5));\n  }\n\n  // Modal sizes\n  .modal-sm { width: @modal-sm; }\n  .modal-lg { width: @modal-lg; }\n\n}\n","//\n// Tooltips\n// --------------------------------------------------\n\n\n// Base class\n.tooltip {\n  position: absolute;\n  z-index: @zindex-tooltip;\n  display: block;\n  visibility: visible;\n  font-size: @font-size-small;\n  line-height: 1.4;\n  .opacity(0);\n\n  &.in     { .opacity(@tooltip-opacity); }\n  &.top    { margin-top:  -3px; padding: @tooltip-arrow-width 0; }\n  &.right  { margin-left:  3px; padding: 0 @tooltip-arrow-width; }\n  &.bottom { margin-top:   3px; padding: @tooltip-arrow-width 0; }\n  &.left   { margin-left: -3px; padding: 0 @tooltip-arrow-width; }\n}\n\n// Wrapper for the tooltip content\n.tooltip-inner {\n  max-width: @tooltip-max-width;\n  padding: 3px 8px;\n  color: @tooltip-color;\n  text-align: center;\n  text-decoration: none;\n  background-color: @tooltip-bg;\n  border-radius: @border-radius-base;\n}\n\n// Arrows\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.tooltip {\n  &.top .tooltip-arrow {\n    bottom: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-left .tooltip-arrow {\n    bottom: 0;\n    left: @tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.top-right .tooltip-arrow {\n    bottom: 0;\n    right: @tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-top-color: @tooltip-arrow-color;\n  }\n  &.right .tooltip-arrow {\n    top: 50%;\n    left: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width @tooltip-arrow-width @tooltip-arrow-width 0;\n    border-right-color: @tooltip-arrow-color;\n  }\n  &.left .tooltip-arrow {\n    top: 50%;\n    right: 0;\n    margin-top: -@tooltip-arrow-width;\n    border-width: @tooltip-arrow-width 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-left-color: @tooltip-arrow-color;\n  }\n  &.bottom .tooltip-arrow {\n    top: 0;\n    left: 50%;\n    margin-left: -@tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-left .tooltip-arrow {\n    top: 0;\n    left: @tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n  &.bottom-right .tooltip-arrow {\n    top: 0;\n    right: @tooltip-arrow-width;\n    border-width: 0 @tooltip-arrow-width @tooltip-arrow-width;\n    border-bottom-color: @tooltip-arrow-color;\n  }\n}\n","//\n// Popovers\n// --------------------------------------------------\n\n\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: @zindex-popover;\n  display: none;\n  max-width: @popover-max-width;\n  padding: 1px;\n  text-align: left; // Reset given new insertion method\n  background-color: @popover-bg;\n  background-clip: padding-box;\n  border: 1px solid @popover-fallback-border-color;\n  border: 1px solid @popover-border-color;\n  border-radius: @border-radius-large;\n  .box-shadow(0 5px 10px rgba(0,0,0,.2));\n\n  // Overrides for proper insertion\n  white-space: normal;\n\n  // Offset the popover to account for the popover arrow\n  &.top     { margin-top: -10px; }\n  &.right   { margin-left: 10px; }\n  &.bottom  { margin-top: 10px; }\n  &.left    { margin-left: -10px; }\n}\n\n.popover-title {\n  margin: 0; // reset heading margin\n  padding: 8px 14px;\n  font-size: @font-size-base;\n  font-weight: normal;\n  line-height: 18px;\n  background-color: @popover-title-bg;\n  border-bottom: 1px solid darken(@popover-title-bg, 5%);\n  border-radius: 5px 5px 0 0;\n}\n\n.popover-content {\n  padding: 9px 14px;\n}\n\n// Arrows\n//\n// .arrow is outer, .arrow:after is inner\n\n.popover .arrow {\n  &,\n  &:after {\n    position: absolute;\n    display: block;\n    width: 0;\n    height: 0;\n    border-color: transparent;\n    border-style: solid;\n  }\n}\n.popover .arrow {\n  border-width: @popover-arrow-outer-width;\n}\n.popover .arrow:after {\n  border-width: @popover-arrow-width;\n  content: \"\";\n}\n\n.popover {\n  &.top .arrow {\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-bottom-width: 0;\n    border-top-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-top-color: @popover-arrow-outer-color;\n    bottom: -@popover-arrow-outer-width;\n    &:after {\n      content: \" \";\n      bottom: 1px;\n      margin-left: -@popover-arrow-width;\n      border-bottom-width: 0;\n      border-top-color: @popover-arrow-color;\n    }\n  }\n  &.right .arrow {\n    top: 50%;\n    left: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-left-width: 0;\n    border-right-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-right-color: @popover-arrow-outer-color;\n    &:after {\n      content: \" \";\n      left: 1px;\n      bottom: -@popover-arrow-width;\n      border-left-width: 0;\n      border-right-color: @popover-arrow-color;\n    }\n  }\n  &.bottom .arrow {\n    left: 50%;\n    margin-left: -@popover-arrow-outer-width;\n    border-top-width: 0;\n    border-bottom-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-bottom-color: @popover-arrow-outer-color;\n    top: -@popover-arrow-outer-width;\n    &:after {\n      content: \" \";\n      top: 1px;\n      margin-left: -@popover-arrow-width;\n      border-top-width: 0;\n      border-bottom-color: @popover-arrow-color;\n    }\n  }\n\n  &.left .arrow {\n    top: 50%;\n    right: -@popover-arrow-outer-width;\n    margin-top: -@popover-arrow-outer-width;\n    border-right-width: 0;\n    border-left-color: @popover-arrow-outer-fallback-color; // IE8 fallback\n    border-left-color: @popover-arrow-outer-color;\n    &:after {\n      content: \" \";\n      right: 1px;\n      border-right-width: 0;\n      border-left-color: @popover-arrow-color;\n      bottom: -@popover-arrow-width;\n    }\n  }\n\n}\n","//\n// Carousel\n// --------------------------------------------------\n\n\n// Wrapper for the slide container and indicators\n.carousel {\n  position: relative;\n}\n\n.carousel-inner {\n  position: relative;\n  overflow: hidden;\n  width: 100%;\n\n  > .item {\n    display: none;\n    position: relative;\n    .transition(.6s ease-in-out left);\n\n    // Account for jankitude on images\n    > img,\n    > a > img {\n      .img-responsive();\n      line-height: 1;\n    }\n  }\n\n  > .active,\n  > .next,\n  > .prev { display: block; }\n\n  > .active {\n    left: 0;\n  }\n\n  > .next,\n  > .prev {\n    position: absolute;\n    top: 0;\n    width: 100%;\n  }\n\n  > .next {\n    left: 100%;\n  }\n  > .prev {\n    left: -100%;\n  }\n  > .next.left,\n  > .prev.right {\n    left: 0;\n  }\n\n  > .active.left {\n    left: -100%;\n  }\n  > .active.right {\n    left: 100%;\n  }\n\n}\n\n// Left/right controls for nav\n// ---------------------------\n\n.carousel-control {\n  position: absolute;\n  top: 0;\n  left: 0;\n  bottom: 0;\n  width: @carousel-control-width;\n  .opacity(@carousel-control-opacity);\n  font-size: @carousel-control-font-size;\n  color: @carousel-control-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n  // We can't have this transition here because WebKit cancels the carousel\n  // animation if you trip this while in the middle of another animation.\n\n  // Set gradients for backgrounds\n  &.left {\n    #gradient > .horizontal(@start-color: rgba(0,0,0,.5); @end-color: rgba(0,0,0,.0001));\n  }\n  &.right {\n    left: auto;\n    right: 0;\n    #gradient > .horizontal(@start-color: rgba(0,0,0,.0001); @end-color: rgba(0,0,0,.5));\n  }\n\n  // Hover/focus state\n  &:hover,\n  &:focus {\n    outline: none;\n    color: @carousel-control-color;\n    text-decoration: none;\n    .opacity(.9);\n  }\n\n  // Toggles\n  .icon-prev,\n  .icon-next,\n  .glyphicon-chevron-left,\n  .glyphicon-chevron-right {\n    position: absolute;\n    top: 50%;\n    z-index: 5;\n    display: inline-block;\n  }\n  .icon-prev,\n  .glyphicon-chevron-left {\n    left: 50%;\n  }\n  .icon-next,\n  .glyphicon-chevron-right {\n    right: 50%;\n  }\n  .icon-prev,\n  .icon-next {\n    width:  20px;\n    height: 20px;\n    margin-top: -10px;\n    margin-left: -10px;\n    font-family: serif;\n  }\n\n  .icon-prev {\n    &:before {\n      content: '\\2039';// SINGLE LEFT-POINTING ANGLE QUOTATION MARK (U+2039)\n    }\n  }\n  .icon-next {\n    &:before {\n      content: '\\203a';// SINGLE RIGHT-POINTING ANGLE QUOTATION MARK (U+203A)\n    }\n  }\n}\n\n// Optional indicator pips\n//\n// Add an unordered list with the following class and add a list item for each\n// slide your carousel holds.\n\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  margin-left: -30%;\n  padding-left: 0;\n  list-style: none;\n  text-align: center;\n\n  li {\n    display: inline-block;\n    width:  10px;\n    height: 10px;\n    margin: 1px;\n    text-indent: -999px;\n    border: 1px solid @carousel-indicator-border-color;\n    border-radius: 10px;\n    cursor: pointer;\n\n    // IE8-9 hack for event handling\n    //\n    // Internet Explorer 8-9 does not support clicks on elements without a set\n    // `background-color`. We cannot use `filter` since that's not viewed as a\n    // background color by the browser. Thus, a hack is needed.\n    //\n    // For IE8, we set solid black as it doesn't support `rgba()`. For IE9, we\n    // set alpha transparency for the best results possible.\n    background-color: #000 \\9; // IE8\n    background-color: rgba(0,0,0,0); // IE9\n  }\n  .active {\n    margin: 0;\n    width:  12px;\n    height: 12px;\n    background-color: @carousel-indicator-active-bg;\n  }\n}\n\n// Optional captions\n// -----------------------------\n// Hidden by default for smaller viewports\n.carousel-caption {\n  position: absolute;\n  left: 15%;\n  right: 15%;\n  bottom: 20px;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: @carousel-caption-color;\n  text-align: center;\n  text-shadow: @carousel-text-shadow;\n  & .btn {\n    text-shadow: none; // No shadow for button elements in carousel-caption\n  }\n}\n\n\n// Scale up controls for tablets and up\n@media screen and (min-width: @screen-sm-min) {\n\n  // Scale up the controls a smidge\n  .carousel-control {\n    .glyphicons-chevron-left,\n    .glyphicons-chevron-right,\n    .icon-prev,\n    .icon-next {\n      width: 30px;\n      height: 30px;\n      margin-top: -15px;\n      margin-left: -15px;\n      font-size: 30px;\n    }\n  }\n\n  // Show and left align the captions\n  .carousel-caption {\n    left: 20%;\n    right: 20%;\n    padding-bottom: 30px;\n  }\n\n  // Move up the indicators\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n","//\n// Responsive: Utility classes\n// --------------------------------------------------\n\n\n// IE10 in Windows (Phone) 8\n//\n// Support for responsive views via media queries is kind of borked in IE10, for\n// Surface/desktop in split view and for Windows Phone 8. This particular fix\n// must be accompanied by a snippet of JavaScript to sniff the user agent and\n// apply some conditional CSS to *only* the Surface/desktop Windows 8. Look at\n// our Getting Started page for more information on this bug.\n//\n// For more information, see the following:\n//\n// Issue: https://github.com/twbs/bootstrap/issues/10497\n// Docs: http://getbootstrap.com/getting-started/#browsers\n// Source: http://timkadlec.com/2012/10/ie10-snap-mode-and-responsive-design/\n\n@-ms-viewport {\n  width: device-width;\n}\n\n\n// Visibility utilities\n.visible-xs {\n  .responsive-invisibility();\n\n  @media (max-width: @screen-xs-max) {\n    .responsive-visibility();\n  }\n}\n.visible-sm {\n  .responsive-invisibility();\n\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-visibility();\n  }\n}\n.visible-md {\n  .responsive-invisibility();\n\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-visibility();\n  }\n}\n.visible-lg {\n  .responsive-invisibility();\n\n  @media (min-width: @screen-lg-min) {\n    .responsive-visibility();\n  }\n}\n\n.hidden-xs {\n  @media (max-width: @screen-xs-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-sm {\n  @media (min-width: @screen-sm-min) and (max-width: @screen-sm-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-md {\n  @media (min-width: @screen-md-min) and (max-width: @screen-md-max) {\n    .responsive-invisibility();\n  }\n}\n.hidden-lg {\n  @media (min-width: @screen-lg-min) {\n    .responsive-invisibility();\n  }\n}\n\n\n// Print utilities\n//\n// Media queries are placed on the inside to be mixin-friendly.\n\n.visible-print {\n  .responsive-invisibility();\n\n  @media print {\n    .responsive-visibility();\n  }\n}\n\n.hidden-print {\n  @media print {\n    .responsive-invisibility();\n  }\n}\n"]}
\ No newline at end of file
diff --git a/doc/css/bootstrap.min.css b/doc/css/bootstrap.min.css
new file mode 100644 (file)
index 0000000..381834e
--- /dev/null
@@ -0,0 +1,7 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+/*! normalize.css v3.0.0 | MIT License | git.io/normalize */html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background:0 0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-collapse:collapse;border-spacing:0}td,th{padding:0}@media print{*{text-shadow:none!important;color:#000!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}select{background:#fff!important}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:before,:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:62.5%;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.428571429;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive{display:block;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{padding:4px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out;display:inline-block;max-width:100%;height:auto}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0,0,0,0);border:0}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small,h1 .small,h2 .small,h3 .small,h4 .small,h5 .small,h6 .small,.h1 .small,.h2 .small,.h3 .small,.h4 .small,.h5 .small,.h6 .small{font-weight:400;line-height:1;color:#999}h1,.h1,h2,.h2,h3,.h3{margin-top:20px;margin-bottom:10px}h1 small,.h1 small,h2 small,.h2 small,h3 small,.h3 small,h1 .small,.h1 .small,h2 .small,.h2 .small,h3 .small,.h3 .small{font-size:65%}h4,.h4,h5,.h5,h6,.h6{margin-top:10px;margin-bottom:10px}h4 small,.h4 small,h5 small,.h5 small,h6 small,.h6 small,h4 .small,.h4 .small,h5 .small,.h5 .small,h6 .small,.h6 .small{font-size:75%}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:200;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}small,.small{font-size:85%}cite{font-style:normal}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-muted{color:#999}.text-primary{color:#428bca}a.text-primary:hover{color:#3071a9}.text-success{color:#3c763d}a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#428bca}a.bg-primary:hover{background-color:#3071a9}.bg-success{background-color:#dff0d8}a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline>li{display:inline-block;padding-left:5px;padding-right:5px}.list-inline>li:first-child{padding-left:0}dl{margin-top:0;margin-bottom:20px}dt,dd{line-height:1.428571429}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;clear:left;text-align:right;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote p:last-child,blockquote ul:last-child,blockquote ol:last-child{margin-bottom:0}blockquote footer,blockquote small,blockquote .small{display:block;font-size:80%;line-height:1.428571429;color:#999}blockquote footer:before,blockquote small:before,blockquote .small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0;text-align:right}.blockquote-reverse footer:before,blockquote.pull-right footer:before,.blockquote-reverse small:before,blockquote.pull-right small:before,.blockquote-reverse .small:before,blockquote.pull-right .small:before{content:''}.blockquote-reverse footer:after,blockquote.pull-right footer:after,.blockquote-reverse small:after,blockquote.pull-right small:after,.blockquote-reverse .small:after,blockquote.pull-right .small:after{content:'\00A0 \2014'}blockquote:before,blockquote:after{content:""}address{margin-bottom:20px;font-style:normal;line-height:1.428571429}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;white-space:nowrap;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.428571429;word-break:break-all;word-wrap:break-word;color:#333;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{margin-right:auto;margin-left:auto;padding-left:15px;padding-right:15px}.row{margin-left:-15px;margin-right:-15px}.col-xs-1,.col-sm-1,.col-md-1,.col-lg-1,.col-xs-2,.col-sm-2,.col-md-2,.col-lg-2,.col-xs-3,.col-sm-3,.col-md-3,.col-lg-3,.col-xs-4,.col-sm-4,.col-md-4,.col-lg-4,.col-xs-5,.col-sm-5,.col-md-5,.col-lg-5,.col-xs-6,.col-sm-6,.col-md-6,.col-lg-6,.col-xs-7,.col-sm-7,.col-md-7,.col-lg-7,.col-xs-8,.col-sm-8,.col-md-8,.col-lg-8,.col-xs-9,.col-sm-9,.col-md-9,.col-lg-9,.col-xs-10,.col-sm-10,.col-md-10,.col-lg-10,.col-xs-11,.col-sm-11,.col-md-11,.col-lg-11,.col-xs-12,.col-sm-12,.col-md-12,.col-lg-12{position:relative;min-height:1px;padding-left:15px;padding-right:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666666666666%}.col-xs-10{width:83.33333333333334%}.col-xs-9{width:75%}.col-xs-8{width:66.66666666666666%}.col-xs-7{width:58.333333333333336%}.col-xs-6{width:50%}.col-xs-5{width:41.66666666666667%}.col-xs-4{width:33.33333333333333%}.col-xs-3{width:25%}.col-xs-2{width:16.666666666666664%}.col-xs-1{width:8.333333333333332%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666666666666%}.col-xs-pull-10{right:83.33333333333334%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666666666666%}.col-xs-pull-7{right:58.333333333333336%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666666666667%}.col-xs-pull-4{right:33.33333333333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.666666666666664%}.col-xs-pull-1{right:8.333333333333332%}.col-xs-pull-0{right:0}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666666666666%}.col-xs-push-10{left:83.33333333333334%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666666666666%}.col-xs-push-7{left:58.333333333333336%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666666666667%}.col-xs-push-4{left:33.33333333333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.666666666666664%}.col-xs-push-1{left:8.333333333333332%}.col-xs-push-0{left:0}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666666666666%}.col-xs-offset-10{margin-left:83.33333333333334%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666666666666%}.col-xs-offset-7{margin-left:58.333333333333336%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666666666667%}.col-xs-offset-4{margin-left:33.33333333333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.666666666666664%}.col-xs-offset-1{margin-left:8.333333333333332%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666666666666%}.col-sm-10{width:83.33333333333334%}.col-sm-9{width:75%}.col-sm-8{width:66.66666666666666%}.col-sm-7{width:58.333333333333336%}.col-sm-6{width:50%}.col-sm-5{width:41.66666666666667%}.col-sm-4{width:33.33333333333333%}.col-sm-3{width:25%}.col-sm-2{width:16.666666666666664%}.col-sm-1{width:8.333333333333332%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666666666666%}.col-sm-pull-10{right:83.33333333333334%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666666666666%}.col-sm-pull-7{right:58.333333333333336%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666666666667%}.col-sm-pull-4{right:33.33333333333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.666666666666664%}.col-sm-pull-1{right:8.333333333333332%}.col-sm-pull-0{right:0}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666666666666%}.col-sm-push-10{left:83.33333333333334%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666666666666%}.col-sm-push-7{left:58.333333333333336%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666666666667%}.col-sm-push-4{left:33.33333333333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.666666666666664%}.col-sm-push-1{left:8.333333333333332%}.col-sm-push-0{left:0}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666666666666%}.col-sm-offset-10{margin-left:83.33333333333334%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666666666666%}.col-sm-offset-7{margin-left:58.333333333333336%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666666666667%}.col-sm-offset-4{margin-left:33.33333333333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.666666666666664%}.col-sm-offset-1{margin-left:8.333333333333332%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666666666666%}.col-md-10{width:83.33333333333334%}.col-md-9{width:75%}.col-md-8{width:66.66666666666666%}.col-md-7{width:58.333333333333336%}.col-md-6{width:50%}.col-md-5{width:41.66666666666667%}.col-md-4{width:33.33333333333333%}.col-md-3{width:25%}.col-md-2{width:16.666666666666664%}.col-md-1{width:8.333333333333332%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666666666666%}.col-md-pull-10{right:83.33333333333334%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666666666666%}.col-md-pull-7{right:58.333333333333336%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666666666667%}.col-md-pull-4{right:33.33333333333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.666666666666664%}.col-md-pull-1{right:8.333333333333332%}.col-md-pull-0{right:0}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666666666666%}.col-md-push-10{left:83.33333333333334%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666666666666%}.col-md-push-7{left:58.333333333333336%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666666666667%}.col-md-push-4{left:33.33333333333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.666666666666664%}.col-md-push-1{left:8.333333333333332%}.col-md-push-0{left:0}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666666666666%}.col-md-offset-10{margin-left:83.33333333333334%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666666666666%}.col-md-offset-7{margin-left:58.333333333333336%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666666666667%}.col-md-offset-4{margin-left:33.33333333333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.666666666666664%}.col-md-offset-1{margin-left:8.333333333333332%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666666666666%}.col-lg-10{width:83.33333333333334%}.col-lg-9{width:75%}.col-lg-8{width:66.66666666666666%}.col-lg-7{width:58.333333333333336%}.col-lg-6{width:50%}.col-lg-5{width:41.66666666666667%}.col-lg-4{width:33.33333333333333%}.col-lg-3{width:25%}.col-lg-2{width:16.666666666666664%}.col-lg-1{width:8.333333333333332%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666666666666%}.col-lg-pull-10{right:83.33333333333334%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666666666666%}.col-lg-pull-7{right:58.333333333333336%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666666666667%}.col-lg-pull-4{right:33.33333333333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.666666666666664%}.col-lg-pull-1{right:8.333333333333332%}.col-lg-pull-0{right:0}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666666666666%}.col-lg-push-10{left:83.33333333333334%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666666666666%}.col-lg-push-7{left:58.333333333333336%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666666666667%}.col-lg-push-4{left:33.33333333333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.666666666666664%}.col-lg-push-1{left:8.333333333333332%}.col-lg-push-0{left:0}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666666666666%}.col-lg-offset-10{margin-left:83.33333333333334%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666666666666%}.col-lg-offset-7{margin-left:58.333333333333336%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666666666667%}.col-lg-offset-4{margin-left:33.33333333333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.666666666666664%}.col-lg-offset-1{margin-left:8.333333333333332%}.col-lg-offset-0{margin-left:0}}table{max-width:100%;background-color:transparent}th{text-align:left}.table{width:100%;margin-bottom:20px}.table>thead>tr>th,.table>tbody>tr>th,.table>tfoot>tr>th,.table>thead>tr>td,.table>tbody>tr>td,.table>tfoot>tr>td{padding:8px;line-height:1.428571429;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>th,.table>caption+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>td,.table>thead:first-child>tr:first-child>td{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>thead>tr>th,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>tbody>tr>td,.table-condensed>tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*=col-]{position:static;float:none;display:table-column}table td[class*=col-],table th[class*=col-]{position:static;float:none;display:table-cell}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover,.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr.active:hover>th{background-color:#e8e8e8}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr.success:hover>th{background-color:#d0e9c6}.table>thead>tr>td.info,.table>tbody>tr>td.info,.table>tfoot>tr>td.info,.table>thead>tr>th.info,.table>tbody>tr>th.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>tbody>tr.info>td,.table>tfoot>tr.info>td,.table>thead>tr.info>th,.table>tbody>tr.info>th,.table>tfoot>tr.info>th{background-color:#d9edf7}.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover,.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr.info:hover>th{background-color:#c4e3f3}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr.warning:hover>th{background-color:#faf2cc}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr.danger:hover>th{background-color:#ebcccc}@media (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-y:hidden;overflow-x:scroll;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd;-webkit-overflow-scrolling:touch}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{padding:0;margin:0;border:0;min-width:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=radio],input[type=checkbox]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=radio]:focus,input[type=checkbox]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.428571429;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.428571429;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control:-moz-placeholder{color:#999}.form-control::-moz-placeholder{color:#999;opacity:1}.form-control:-ms-input-placeholder{color:#999}.form-control::-webkit-input-placeholder{color:#999}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee;opacity:1}textarea.form-control{height:auto}input[type=date]{line-height:34px}.form-group{margin-bottom:15px}.radio,.checkbox{display:block;min-height:20px;margin-top:10px;margin-bottom:10px;padding-left:20px}.radio label,.checkbox label{display:inline;font-weight:400;cursor:pointer}.radio input[type=radio],.radio-inline input[type=radio],.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox]{float:left;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;vertical-align:middle;font-weight:400;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type=radio][disabled],input[type=checkbox][disabled],.radio[disabled],.radio-inline[disabled],.checkbox[disabled],.checkbox-inline[disabled],fieldset[disabled] input[type=radio],fieldset[disabled] input[type=checkbox],fieldset[disabled] .radio,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.input-sm{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm,select[multiple].input-sm{height:auto}.input-lg{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:46px;line-height:46px}textarea.input-lg,select[multiple].input-lg{height:auto}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.has-feedback .form-control-feedback{position:absolute;top:25px;right:0;display:block;width:34px;height:34px;line-height:34px;text-align:center}.has-success .help-block,.has-success .control-label,.has-success .radio,.has-success .checkbox,.has-success .radio-inline,.has-success .checkbox-inline{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;border-color:#3c763d;background-color:#dff0d8}.has-success .form-control-feedback{color:#3c763d}.has-warning .help-block,.has-warning .control-label,.has-warning .radio,.has-warning .checkbox,.has-warning .radio-inline,.has-warning .checkbox-inline{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;border-color:#8a6d3b;background-color:#fcf8e3}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .help-block,.has-error .control-label,.has-error .radio,.has-error .checkbox,.has-error .radio-inline,.has-error .checkbox-inline{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;border-color:#a94442;background-color:#f2dede}.has-error .form-control-feedback{color:#a94442}.form-control-static{margin-bottom:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .radio,.form-inline .checkbox{display:inline-block;margin-top:0;margin-bottom:0;padding-left:0;vertical-align:middle}.form-inline .radio input[type=radio],.form-inline .checkbox input[type=checkbox]{float:none;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .control-label,.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{margin-top:0;margin-bottom:0;padding-top:7px}.form-horizontal .radio,.form-horizontal .checkbox{min-height:27px}.form-horizontal .form-group{margin-left:-15px;margin-right:-15px}.form-horizontal .form-control-static{padding-top:7px}@media (min-width:768px){.form-horizontal .control-label{text-align:right}}.form-horizontal .has-feedback .form-control-feedback{top:0;right:15px}.btn{display:inline-block;margin-bottom:0;font-weight:400;text-align:center;vertical-align:middle;cursor:pointer;background-image:none;border:1px solid transparent;white-space:nowrap;padding:6px 12px;font-size:14px;line-height:1.428571429;border-radius:4px;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;-o-user-select:none;user-select:none}.btn:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{outline:0;background-image:none;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{cursor:not-allowed;pointer-events:none;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{color:#333;background-color:#ebebeb;border-color:#adadad}.btn-default:active,.btn-default.active,.open .dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{color:#fff;background-color:#3276b1;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open .dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-primary .badge{color:#428bca;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{color:#fff;background-color:#47a447;border-color:#398439}.btn-success:active,.btn-success.active,.open .dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{color:#fff;background-color:#39b3d7;border-color:#269abc}.btn-info:active,.btn-info.active,.open .dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{color:#fff;background-color:#ed9c28;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open .dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{color:#fff;background-color:#d2322d;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open .dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{color:#428bca;font-weight:400;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#999;text-decoration:none}.btn-lg{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%;padding-left:0;padding-right:0}.btn-block+.btn-block{margin-top:5px}input[type=submit].btn-block,input[type=reset].btn-block,input[type=button].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;transition:height .35s ease}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;font-size:14px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175);background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.428571429;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{text-decoration:none;color:#262626;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;outline:0;background-color:#428bca}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);cursor:not-allowed}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{left:auto;right:0}.dropdown-menu-left{left:0;right:auto}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.428571429;color:#999}.dropdown-backdrop{position:fixed;left:0;right:0;bottom:0;top:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px solid;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media (min-width:768px){.navbar-right .dropdown-menu{left:auto;right:0}.navbar-right .dropdown-menu-left{left:0;right:auto}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-bottom-left-radius:0;border-top-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-bottom-right-radius:0;border-top-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-bottom-left-radius:0;border-top-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group-xs>.btn{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-group>.btn+.dropdown-toggle{padding-left:8px;padding-right:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-left:12px;padding-right:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-bottom-left-radius:4px;border-top-right-radius:0;border-top-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-right-radius:0;border-top-left-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{float:none;display:table-cell;width:1%}.btn-group-justified>.btn-group .btn{width:100%}[data-toggle=buttons]>.btn>input[type=radio],[data-toggle=buttons]>.btn>input[type=checkbox]{display:none}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-left:0;padding-right:0}.input-group .form-control{float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn,select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn,select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=radio],.input-group-addon input[type=checkbox]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group-btn:last-child>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-top-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:first-child>.btn-group:not(:first-child)>.btn{border-bottom-left-radius:0;border-top-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:hover,.input-group-btn>.btn:focus,.input-group-btn>.btn:active{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{margin-left:-1px}.nav{margin-bottom:0;padding-left:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#999}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#999;text-decoration:none;background-color:transparent;cursor:not-allowed}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.428571429;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent;cursor:default}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{text-align:center;margin-bottom:5px}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{text-align:center;margin-bottom:5px}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-right-radius:0;border-top-left-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{max-height:340px;overflow-x:visible;padding-right:15px;padding-left:15px;border-top:1px solid transparent;box-shadow:inset 0 1px 0 rgba(255,255,255,.1);-webkit-overflow-scrolling:touch}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{padding-left:0;padding-right:0}}.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030}@media (min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;padding:15px;font-size:18px;line-height:20px;height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;margin-right:15px;padding:9px 10px;margin-top:8px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}.navbar-nav.navbar-right:last-child{margin-right:-15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{margin-left:-15px;margin-right:-15px;padding:10px 15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);margin-top:8px;margin-bottom:8px}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;margin-top:0;margin-bottom:0;padding-left:0;vertical-align:middle}.navbar-form .radio input[type=radio],.navbar-form .checkbox input[type=checkbox]{float:none;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media (min-width:768px){.navbar-form{width:auto;border:0;margin-left:0;margin-right:0;padding-top:0;padding-bottom:0;-webkit-box-shadow:none;box-shadow:none}.navbar-form.navbar-right:last-child{margin-right:-15px}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-right-radius:0;border-top-left-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-left:15px;margin-right:15px}.navbar-text.navbar-right:last-child{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{background-color:#e7e7e7;color:#555}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#999}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .navbar-nav>li>a{color:#999}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{background-color:#080808;color:#fff}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#999}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover{color:#fff}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{content:"/\00a0";padding:0 5px;color:#ccc}.breadcrumb>.active{color:#999}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;line-height:1.428571429;text-decoration:none;color:#428bca;background-color:#fff;border:1px solid #ddd;margin-left:-1px}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-bottom-left-radius:4px;border-top-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-bottom-right-radius:4px;border-top-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{color:#2a6496;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca;cursor:default}.pagination>.disabled>span,.pagination>.disabled>span:hover,.pagination>.disabled>span:focus,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#999;background-color:#fff;border-color:#ddd;cursor:not-allowed}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-bottom-left-radius:6px;border-top-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-bottom-right-radius:6px;border-top-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-bottom-left-radius:3px;border-top-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-bottom-right-radius:3px;border-top-right-radius:3px}.pager{padding-left:0;margin:20px 0;list-style:none;text-align:center}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;background-color:#fff;cursor:not-allowed}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}.label[href]:hover,.label[href]:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#999}.label-default[href]:hover,.label-default[href]:focus{background-color:gray}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;color:#fff;line-height:1;vertical-align:baseline;white-space:nowrap;text-align:center;background-color:#999;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge{top:0;padding:1px 5px}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron h1,.jumbotron .h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.container .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-left:60px;padding-right:60px}.jumbotron h1,.jumbotron .h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.428571429;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img,.thumbnail a>img{display:block;max-width:100%;height:auto;margin-left:auto;margin-right:auto}a.thumbnail:hover,a.thumbnail:focus,a.thumbnail.active{border-color:#428bca}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable{padding-right:35px}.alert-dismissable .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{background-color:#dff0d8;border-color:#d6e9c6;color:#3c763d}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{background-color:#d9edf7;border-color:#bce8f1;color:#31708f}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{background-color:#fcf8e3;border-color:#faebcc;color:#8a6d3b}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{background-color:#f2dede;border-color:#ebccd1;color:#a94442}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{overflow:hidden;height:20px;margin-bottom:20px;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-size:40px 40px}.progress.active .progress-bar{-webkit-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{margin-bottom:20px;padding-left:0}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-right-radius:4px;border-top-left-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{text-decoration:none;background-color:#f5f5f5}a.list-group-item.active,a.list-group-item.active:hover,a.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}a.list-group-item.active .list-group-item-heading,a.list-group-item.active:hover .list-group-item-heading,a.list-group-item.active:focus .list-group-item-heading{color:inherit}a.list-group-item.active .list-group-item-text,a.list-group-item.active:hover .list-group-item-text,a.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:hover,a.list-group-item-success:focus{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:hover,a.list-group-item-success.active:focus{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:hover,a.list-group-item-info:focus{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:hover,a.list-group-item-info.active:focus{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:hover,a.list-group-item-warning:focus{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:hover,a.list-group-item-warning.active:focus{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:hover,a.list-group-item-danger:focus{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:hover,a.list-group-item-danger.active:focus{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group .list-group-item:first-child{border-top:0}.panel>.list-group .list-group-item:last-child{border-bottom:0}.panel>.list-group:first-child .list-group-item:first-child{border-top-right-radius:3px;border-top-left-radius:3px}.panel>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.panel>.table,.panel>.table-responsive>.table{margin-bottom:0}.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child th,.panel>.table>tbody:first-child>tr:first-child td{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th,.panel>.table-bordered>tfoot>tr:first-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:first-child>th,.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>tfoot>tr:first-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:first-child>td{border-top:0}.panel>.table-bordered>thead>tr:last-child>th,.panel>.table-responsive>.table-bordered>thead>tr:last-child>th,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th,.panel>.table-bordered>thead>tr:last-child>td,.panel>.table-responsive>.table-bordered>thead>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}.panel>.table-responsive{border:0;margin-bottom:0}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-right-radius:3px;border-top-left-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px;overflow:hidden}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse .panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse .panel-body{border-top-color:#ddd}.panel-default>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse .panel-body{border-top-color:#428bca}.panel-primary>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse .panel-body{border-top-color:#d6e9c6}.panel-success>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse .panel-body{border-top-color:#bce8f1}.panel-info>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse .panel-body{border-top-color:#faebcc}.panel-warning>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse .panel-body{border-top-color:#ebccd1}.panel-danger>.panel-footer+.panel-collapse .panel-body{border-bottom-color:#ebccd1}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.5;filter:alpha(opacity=50)}button.close{padding:0;cursor:pointer;background:0 0;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal{display:none;overflow:auto;overflow-y:scroll;position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transform:translate(0,-25%);-ms-transform:translate(0,-25%);transform:translate(0,-25%);-webkit-transition:-webkit-transform .3s ease-out;-moz-transition:-moz-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out}.modal.in .modal-dialog{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5);background-clip:padding-box;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0;filter:alpha(opacity=0)}.modal-backdrop.in{opacity:.5;filter:alpha(opacity=50)}.modal-header{padding:15px;border-bottom:1px solid #e5e5e5;min-height:16.428571429px}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.428571429}.modal-body{position:relative;padding:20px}.modal-footer{margin-top:15px;padding:19px 20px 20px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-left:5px;margin-bottom:0}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1030;display:block;visibility:visible;font-size:12px;line-height:1.4;opacity:0;filter:alpha(opacity=0)}.tooltip.in{opacity:.9;filter:alpha(opacity=90)}.tooltip.top{margin-top:-3px;padding:5px 0}.tooltip.right{margin-left:3px;padding:0 5px}.tooltip.bottom{margin-top:3px;padding:5px 0}.tooltip.left{margin-left:-3px;padding:0 5px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{bottom:0;right:5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;background-color:#fff;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2);white-space:normal}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{margin:0;padding:8px 14px;font-size:14px;font-weight:400;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{left:50%;margin-left:-11px;border-bottom-width:0;border-top-color:#999;border-top-color:rgba(0,0,0,.25);bottom:-11px}.popover.top .arrow:after{content:" ";bottom:1px;margin-left:-10px;border-bottom-width:0;border-top-color:#fff}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-left-width:0;border-right-color:#999;border-right-color:rgba(0,0,0,.25)}.popover.right .arrow:after{content:" ";left:1px;bottom:-10px;border-left-width:0;border-right-color:#fff}.popover.bottom .arrow{left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25);top:-11px}.popover.bottom .arrow:after{content:" ";top:1px;margin-left:-10px;border-top-width:0;border-bottom-color:#fff}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left .arrow:after{content:" ";right:1px;border-right-width:0;border-left-color:#fff;bottom:-10px}.carousel{position:relative}.carousel-inner{position:relative;overflow:hidden;width:100%}.carousel-inner>.item{display:none;position:relative;-webkit-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;max-width:100%;height:auto;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;left:0;bottom:0;width:15%;opacity:.5;filter:alpha(opacity=50);font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-control.left{background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,.5) 0),color-stop(rgba(0,0,0,.0001) 100%));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1)}.carousel-control.right{left:auto;right:0;background-image:-webkit-linear-gradient(left,color-stop(rgba(0,0,0,.0001) 0),color-stop(rgba(0,0,0,.5) 100%));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1)}.carousel-control:hover,.carousel-control:focus{outline:0;color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .glyphicon-chevron-left{left:50%}.carousel-control .icon-next,.carousel-control .glyphicon-chevron-right{right:50%}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;margin-left:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;margin-left:-30%;padding-left:0;list-style:none;text-align:center}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;border:1px solid #fff;border-radius:10px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0)}.carousel-indicators .active{margin:0;width:12px;height:12px;background-color:#fff}.carousel-caption{position:absolute;left:15%;right:15%;bottom:20px;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicons-chevron-left,.carousel-control .glyphicons-chevron-right,.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;margin-left:-15px;font-size:30px}.carousel-caption{left:20%;right:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after,.container:before,.container:after,.container-fluid:before,.container-fluid:after,.row:before,.row:after,.form-horizontal .form-group:before,.form-horizontal .form-group:after,.btn-toolbar:before,.btn-toolbar:after,.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after,.nav:before,.nav:after,.navbar:before,.navbar:after,.navbar-header:before,.navbar-header:after,.navbar-collapse:before,.navbar-collapse:after,.pager:before,.pager:after,.panel-body:before,.panel-body:after,.modal-footer:before,.modal-footer:after{content:" ";display:table}.clearfix:after,.container:after,.container-fluid:after,.row:after,.form-horizontal .form-group:after,.btn-toolbar:after,.btn-group-vertical>.btn-group:after,.nav:after,.navbar:after,.navbar-header:after,.navbar-collapse:after,.pager:after,.panel-body:after,.modal-footer:after{clear:both}.center-block{display:block;margin-left:auto;margin-right:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important;visibility:hidden!important}.affix{position:fixed}@-ms-viewport{width:device-width}.visible-xs,tr.visible-xs,th.visible-xs,td.visible-xs{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}.visible-sm,tr.visible-sm,th.visible-sm,td.visible-sm{display:none!important}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}.visible-md,tr.visible-md,th.visible-md,td.visible-md{display:none!important}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}.visible-lg,tr.visible-lg,th.visible-lg,td.visible-lg{display:none!important}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}@media (max-width:767px){.hidden-xs,tr.hidden-xs,th.hidden-xs,td.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm,tr.hidden-sm,th.hidden-sm,td.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md,tr.hidden-md,th.hidden-md,td.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg,tr.hidden-lg,th.hidden-lg,td.hidden-lg{display:none!important}}.visible-print,tr.visible-print,th.visible-print,td.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}}@media print{.hidden-print,tr.hidden-print,th.hidden-print,td.hidden-print{display:none!important}}
\ No newline at end of file
diff --git a/doc/css/code.css b/doc/css/code.css
new file mode 100644 (file)
index 0000000..59dc1be
--- /dev/null
@@ -0,0 +1,27 @@
+table.code {
+    font-family: Menlo,Monaco,Consolas,"Courier New",monospace;
+    display: block;
+    padding: 9.5px;
+    margin: 0px 0px 10px;
+    font-size: 13px;
+    line-height: 1.42857;
+    color: rgb(51, 51, 51);
+    word-break: break-all;
+    word-wrap: break-word;
+    background-color: rgb(245, 245, 245);
+    border: 1px solid rgb(204, 204, 204);
+    border-radius: 4px 4px 4px 4px;
+}
+
+table.code tr td {
+    white-space: pre;
+}
+
+table.code tr td:nth-child(2) {
+    color: #d14;
+    padding-left: .5em;
+}
+
+.userinput {
+    color: #d14;
+}
diff --git a/doc/css/font-awesome.css b/doc/css/font-awesome.css
new file mode 100644 (file)
index 0000000..eb4127b
--- /dev/null
@@ -0,0 +1,1566 @@
+/*!
+ *  Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome
+ *  License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+/* FONT PATH
+ * -------------------------- */
+@font-face {
+  font-family: 'FontAwesome';
+  src: url('../fonts/fontawesome-webfont.eot?v=4.1.0');
+  src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');
+  font-weight: normal;
+  font-style: normal;
+}
+.fa {
+  display: inline-block;
+  font-family: FontAwesome;
+  font-style: normal;
+  font-weight: normal;
+  line-height: 1;
+  -webkit-font-smoothing: antialiased;
+  -moz-osx-font-smoothing: grayscale;
+}
+/* makes the font 33% larger relative to the icon container */
+.fa-lg {
+  font-size: 1.33333333em;
+  line-height: 0.75em;
+  vertical-align: -15%;
+}
+.fa-2x {
+  font-size: 2em;
+}
+.fa-3x {
+  font-size: 3em;
+}
+.fa-4x {
+  font-size: 4em;
+}
+.fa-5x {
+  font-size: 5em;
+}
+.fa-fw {
+  width: 1.28571429em;
+  text-align: center;
+}
+.fa-ul {
+  padding-left: 0;
+  margin-left: 2.14285714em;
+  list-style-type: none;
+}
+.fa-ul > li {
+  position: relative;
+}
+.fa-li {
+  position: absolute;
+  left: -2.14285714em;
+  width: 2.14285714em;
+  top: 0.14285714em;
+  text-align: center;
+}
+.fa-li.fa-lg {
+  left: -1.85714286em;
+}
+.fa-border {
+  padding: .2em .25em .15em;
+  border: solid 0.08em #eeeeee;
+  border-radius: .1em;
+}
+.pull-right {
+  float: right;
+}
+.pull-left {
+  float: left;
+}
+.fa.pull-left {
+  margin-right: .3em;
+}
+.fa.pull-right {
+  margin-left: .3em;
+}
+.fa-spin {
+  -webkit-animation: spin 2s infinite linear;
+  -moz-animation: spin 2s infinite linear;
+  -o-animation: spin 2s infinite linear;
+  animation: spin 2s infinite linear;
+}
+@-moz-keyframes spin {
+  0% {
+    -moz-transform: rotate(0deg);
+  }
+  100% {
+    -moz-transform: rotate(359deg);
+  }
+}
+@-webkit-keyframes spin {
+  0% {
+    -webkit-transform: rotate(0deg);
+  }
+  100% {
+    -webkit-transform: rotate(359deg);
+  }
+}
+@-o-keyframes spin {
+  0% {
+    -o-transform: rotate(0deg);
+  }
+  100% {
+    -o-transform: rotate(359deg);
+  }
+}
+@keyframes spin {
+  0% {
+    -webkit-transform: rotate(0deg);
+    transform: rotate(0deg);
+  }
+  100% {
+    -webkit-transform: rotate(359deg);
+    transform: rotate(359deg);
+  }
+}
+.fa-rotate-90 {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);
+  -webkit-transform: rotate(90deg);
+  -moz-transform: rotate(90deg);
+  -ms-transform: rotate(90deg);
+  -o-transform: rotate(90deg);
+  transform: rotate(90deg);
+}
+.fa-rotate-180 {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);
+  -webkit-transform: rotate(180deg);
+  -moz-transform: rotate(180deg);
+  -ms-transform: rotate(180deg);
+  -o-transform: rotate(180deg);
+  transform: rotate(180deg);
+}
+.fa-rotate-270 {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);
+  -webkit-transform: rotate(270deg);
+  -moz-transform: rotate(270deg);
+  -ms-transform: rotate(270deg);
+  -o-transform: rotate(270deg);
+  transform: rotate(270deg);
+}
+.fa-flip-horizontal {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);
+  -webkit-transform: scale(-1, 1);
+  -moz-transform: scale(-1, 1);
+  -ms-transform: scale(-1, 1);
+  -o-transform: scale(-1, 1);
+  transform: scale(-1, 1);
+}
+.fa-flip-vertical {
+  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);
+  -webkit-transform: scale(1, -1);
+  -moz-transform: scale(1, -1);
+  -ms-transform: scale(1, -1);
+  -o-transform: scale(1, -1);
+  transform: scale(1, -1);
+}
+.fa-stack {
+  position: relative;
+  display: inline-block;
+  width: 2em;
+  height: 2em;
+  line-height: 2em;
+  vertical-align: middle;
+}
+.fa-stack-1x,
+.fa-stack-2x {
+  position: absolute;
+  left: 0;
+  width: 100%;
+  text-align: center;
+}
+.fa-stack-1x {
+  line-height: inherit;
+}
+.fa-stack-2x {
+  font-size: 2em;
+}
+.fa-inverse {
+  color: #ffffff;
+}
+/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen
+   readers do not read off random characters that represent icons */
+.fa-glass:before {
+  content: "\f000";
+}
+.fa-music:before {
+  content: "\f001";
+}
+.fa-search:before {
+  content: "\f002";
+}
+.fa-envelope-o:before {
+  content: "\f003";
+}
+.fa-heart:before {
+  content: "\f004";
+}
+.fa-star:before {
+  content: "\f005";
+}
+.fa-star-o:before {
+  content: "\f006";
+}
+.fa-user:before {
+  content: "\f007";
+}
+.fa-film:before {
+  content: "\f008";
+}
+.fa-th-large:before {
+  content: "\f009";
+}
+.fa-th:before {
+  content: "\f00a";
+}
+.fa-th-list:before {
+  content: "\f00b";
+}
+.fa-check:before {
+  content: "\f00c";
+}
+.fa-times:before {
+  content: "\f00d";
+}
+.fa-search-plus:before {
+  content: "\f00e";
+}
+.fa-search-minus:before {
+  content: "\f010";
+}
+.fa-power-off:before {
+  content: "\f011";
+}
+.fa-signal:before {
+  content: "\f012";
+}
+.fa-gear:before,
+.fa-cog:before {
+  content: "\f013";
+}
+.fa-trash-o:before {
+  content: "\f014";
+}
+.fa-home:before {
+  content: "\f015";
+}
+.fa-file-o:before {
+  content: "\f016";
+}
+.fa-clock-o:before {
+  content: "\f017";
+}
+.fa-road:before {
+  content: "\f018";
+}
+.fa-download:before {
+  content: "\f019";
+}
+.fa-arrow-circle-o-down:before {
+  content: "\f01a";
+}
+.fa-arrow-circle-o-up:before {
+  content: "\f01b";
+}
+.fa-inbox:before {
+  content: "\f01c";
+}
+.fa-play-circle-o:before {
+  content: "\f01d";
+}
+.fa-rotate-right:before,
+.fa-repeat:before {
+  content: "\f01e";
+}
+.fa-refresh:before {
+  content: "\f021";
+}
+.fa-list-alt:before {
+  content: "\f022";
+}
+.fa-lock:before {
+  content: "\f023";
+}
+.fa-flag:before {
+  content: "\f024";
+}
+.fa-headphones:before {
+  content: "\f025";
+}
+.fa-volume-off:before {
+  content: "\f026";
+}
+.fa-volume-down:before {
+  content: "\f027";
+}
+.fa-volume-up:before {
+  content: "\f028";
+}
+.fa-qrcode:before {
+  content: "\f029";
+}
+.fa-barcode:before {
+  content: "\f02a";
+}
+.fa-tag:before {
+  content: "\f02b";
+}
+.fa-tags:before {
+  content: "\f02c";
+}
+.fa-book:before {
+  content: "\f02d";
+}
+.fa-bookmark:before {
+  content: "\f02e";
+}
+.fa-print:before {
+  content: "\f02f";
+}
+.fa-camera:before {
+  content: "\f030";
+}
+.fa-font:before {
+  content: "\f031";
+}
+.fa-bold:before {
+  content: "\f032";
+}
+.fa-italic:before {
+  content: "\f033";
+}
+.fa-text-height:before {
+  content: "\f034";
+}
+.fa-text-width:before {
+  content: "\f035";
+}
+.fa-align-left:before {
+  content: "\f036";
+}
+.fa-align-center:before {
+  content: "\f037";
+}
+.fa-align-right:before {
+  content: "\f038";
+}
+.fa-align-justify:before {
+  content: "\f039";
+}
+.fa-list:before {
+  content: "\f03a";
+}
+.fa-dedent:before,
+.fa-outdent:before {
+  content: "\f03b";
+}
+.fa-indent:before {
+  content: "\f03c";
+}
+.fa-video-camera:before {
+  content: "\f03d";
+}
+.fa-photo:before,
+.fa-image:before,
+.fa-picture-o:before {
+  content: "\f03e";
+}
+.fa-pencil:before {
+  content: "\f040";
+}
+.fa-map-marker:before {
+  content: "\f041";
+}
+.fa-adjust:before {
+  content: "\f042";
+}
+.fa-tint:before {
+  content: "\f043";
+}
+.fa-edit:before,
+.fa-pencil-square-o:before {
+  content: "\f044";
+}
+.fa-share-square-o:before {
+  content: "\f045";
+}
+.fa-check-square-o:before {
+  content: "\f046";
+}
+.fa-arrows:before {
+  content: "\f047";
+}
+.fa-step-backward:before {
+  content: "\f048";
+}
+.fa-fast-backward:before {
+  content: "\f049";
+}
+.fa-backward:before {
+  content: "\f04a";
+}
+.fa-play:before {
+  content: "\f04b";
+}
+.fa-pause:before {
+  content: "\f04c";
+}
+.fa-stop:before {
+  content: "\f04d";
+}
+.fa-forward:before {
+  content: "\f04e";
+}
+.fa-fast-forward:before {
+  content: "\f050";
+}
+.fa-step-forward:before {
+  content: "\f051";
+}
+.fa-eject:before {
+  content: "\f052";
+}
+.fa-chevron-left:before {
+  content: "\f053";
+}
+.fa-chevron-right:before {
+  content: "\f054";
+}
+.fa-plus-circle:before {
+  content: "\f055";
+}
+.fa-minus-circle:before {
+  content: "\f056";
+}
+.fa-times-circle:before {
+  content: "\f057";
+}
+.fa-check-circle:before {
+  content: "\f058";
+}
+.fa-question-circle:before {
+  content: "\f059";
+}
+.fa-info-circle:before {
+  content: "\f05a";
+}
+.fa-crosshairs:before {
+  content: "\f05b";
+}
+.fa-times-circle-o:before {
+  content: "\f05c";
+}
+.fa-check-circle-o:before {
+  content: "\f05d";
+}
+.fa-ban:before {
+  content: "\f05e";
+}
+.fa-arrow-left:before {
+  content: "\f060";
+}
+.fa-arrow-right:before {
+  content: "\f061";
+}
+.fa-arrow-up:before {
+  content: "\f062";
+}
+.fa-arrow-down:before {
+  content: "\f063";
+}
+.fa-mail-forward:before,
+.fa-share:before {
+  content: "\f064";
+}
+.fa-expand:before {
+  content: "\f065";
+}
+.fa-compress:before {
+  content: "\f066";
+}
+.fa-plus:before {
+  content: "\f067";
+}
+.fa-minus:before {
+  content: "\f068";
+}
+.fa-asterisk:before {
+  content: "\f069";
+}
+.fa-exclamation-circle:before {
+  content: "\f06a";
+}
+.fa-gift:before {
+  content: "\f06b";
+}
+.fa-leaf:before {
+  content: "\f06c";
+}
+.fa-fire:before {
+  content: "\f06d";
+}
+.fa-eye:before {
+  content: "\f06e";
+}
+.fa-eye-slash:before {
+  content: "\f070";
+}
+.fa-warning:before,
+.fa-exclamation-triangle:before {
+  content: "\f071";
+}
+.fa-plane:before {
+  content: "\f072";
+}
+.fa-calendar:before {
+  content: "\f073";
+}
+.fa-random:before {
+  content: "\f074";
+}
+.fa-comment:before {
+  content: "\f075";
+}
+.fa-magnet:before {
+  content: "\f076";
+}
+.fa-chevron-up:before {
+  content: "\f077";
+}
+.fa-chevron-down:before {
+  content: "\f078";
+}
+.fa-retweet:before {
+  content: "\f079";
+}
+.fa-shopping-cart:before {
+  content: "\f07a";
+}
+.fa-folder:before {
+  content: "\f07b";
+}
+.fa-folder-open:before {
+  content: "\f07c";
+}
+.fa-arrows-v:before {
+  content: "\f07d";
+}
+.fa-arrows-h:before {
+  content: "\f07e";
+}
+.fa-bar-chart-o:before {
+  content: "\f080";
+}
+.fa-twitter-square:before {
+  content: "\f081";
+}
+.fa-facebook-square:before {
+  content: "\f082";
+}
+.fa-camera-retro:before {
+  content: "\f083";
+}
+.fa-key:before {
+  content: "\f084";
+}
+.fa-gears:before,
+.fa-cogs:before {
+  content: "\f085";
+}
+.fa-comments:before {
+  content: "\f086";
+}
+.fa-thumbs-o-up:before {
+  content: "\f087";
+}
+.fa-thumbs-o-down:before {
+  content: "\f088";
+}
+.fa-star-half:before {
+  content: "\f089";
+}
+.fa-heart-o:before {
+  content: "\f08a";
+}
+.fa-sign-out:before {
+  content: "\f08b";
+}
+.fa-linkedin-square:before {
+  content: "\f08c";
+}
+.fa-thumb-tack:before {
+  content: "\f08d";
+}
+.fa-external-link:before {
+  content: "\f08e";
+}
+.fa-sign-in:before {
+  content: "\f090";
+}
+.fa-trophy:before {
+  content: "\f091";
+}
+.fa-github-square:before {
+  content: "\f092";
+}
+.fa-upload:before {
+  content: "\f093";
+}
+.fa-lemon-o:before {
+  content: "\f094";
+}
+.fa-phone:before {
+  content: "\f095";
+}
+.fa-square-o:before {
+  content: "\f096";
+}
+.fa-bookmark-o:before {
+  content: "\f097";
+}
+.fa-phone-square:before {
+  content: "\f098";
+}
+.fa-twitter:before {
+  content: "\f099";
+}
+.fa-facebook:before {
+  content: "\f09a";
+}
+.fa-github:before {
+  content: "\f09b";
+}
+.fa-unlock:before {
+  content: "\f09c";
+}
+.fa-credit-card:before {
+  content: "\f09d";
+}
+.fa-rss:before {
+  content: "\f09e";
+}
+.fa-hdd-o:before {
+  content: "\f0a0";
+}
+.fa-bullhorn:before {
+  content: "\f0a1";
+}
+.fa-bell:before {
+  content: "\f0f3";
+}
+.fa-certificate:before {
+  content: "\f0a3";
+}
+.fa-hand-o-right:before {
+  content: "\f0a4";
+}
+.fa-hand-o-left:before {
+  content: "\f0a5";
+}
+.fa-hand-o-up:before {
+  content: "\f0a6";
+}
+.fa-hand-o-down:before {
+  content: "\f0a7";
+}
+.fa-arrow-circle-left:before {
+  content: "\f0a8";
+}
+.fa-arrow-circle-right:before {
+  content: "\f0a9";
+}
+.fa-arrow-circle-up:before {
+  content: "\f0aa";
+}
+.fa-arrow-circle-down:before {
+  content: "\f0ab";
+}
+.fa-globe:before {
+  content: "\f0ac";
+}
+.fa-wrench:before {
+  content: "\f0ad";
+}
+.fa-tasks:before {
+  content: "\f0ae";
+}
+.fa-filter:before {
+  content: "\f0b0";
+}
+.fa-briefcase:before {
+  content: "\f0b1";
+}
+.fa-arrows-alt:before {
+  content: "\f0b2";
+}
+.fa-group:before,
+.fa-users:before {
+  content: "\f0c0";
+}
+.fa-chain:before,
+.fa-link:before {
+  content: "\f0c1";
+}
+.fa-cloud:before {
+  content: "\f0c2";
+}
+.fa-flask:before {
+  content: "\f0c3";
+}
+.fa-cut:before,
+.fa-scissors:before {
+  content: "\f0c4";
+}
+.fa-copy:before,
+.fa-files-o:before {
+  content: "\f0c5";
+}
+.fa-paperclip:before {
+  content: "\f0c6";
+}
+.fa-save:before,
+.fa-floppy-o:before {
+  content: "\f0c7";
+}
+.fa-square:before {
+  content: "\f0c8";
+}
+.fa-navicon:before,
+.fa-reorder:before,
+.fa-bars:before {
+  content: "\f0c9";
+}
+.fa-list-ul:before {
+  content: "\f0ca";
+}
+.fa-list-ol:before {
+  content: "\f0cb";
+}
+.fa-strikethrough:before {
+  content: "\f0cc";
+}
+.fa-underline:before {
+  content: "\f0cd";
+}
+.fa-table:before {
+  content: "\f0ce";
+}
+.fa-magic:before {
+  content: "\f0d0";
+}
+.fa-truck:before {
+  content: "\f0d1";
+}
+.fa-pinterest:before {
+  content: "\f0d2";
+}
+.fa-pinterest-square:before {
+  content: "\f0d3";
+}
+.fa-google-plus-square:before {
+  content: "\f0d4";
+}
+.fa-google-plus:before {
+  content: "\f0d5";
+}
+.fa-money:before {
+  content: "\f0d6";
+}
+.fa-caret-down:before {
+  content: "\f0d7";
+}
+.fa-caret-up:before {
+  content: "\f0d8";
+}
+.fa-caret-left:before {
+  content: "\f0d9";
+}
+.fa-caret-right:before {
+  content: "\f0da";
+}
+.fa-columns:before {
+  content: "\f0db";
+}
+.fa-unsorted:before,
+.fa-sort:before {
+  content: "\f0dc";
+}
+.fa-sort-down:before,
+.fa-sort-desc:before {
+  content: "\f0dd";
+}
+.fa-sort-up:before,
+.fa-sort-asc:before {
+  content: "\f0de";
+}
+.fa-envelope:before {
+  content: "\f0e0";
+}
+.fa-linkedin:before {
+  content: "\f0e1";
+}
+.fa-rotate-left:before,
+.fa-undo:before {
+  content: "\f0e2";
+}
+.fa-legal:before,
+.fa-gavel:before {
+  content: "\f0e3";
+}
+.fa-dashboard:before,
+.fa-tachometer:before {
+  content: "\f0e4";
+}
+.fa-comment-o:before {
+  content: "\f0e5";
+}
+.fa-comments-o:before {
+  content: "\f0e6";
+}
+.fa-flash:before,
+.fa-bolt:before {
+  content: "\f0e7";
+}
+.fa-sitemap:before {
+  content: "\f0e8";
+}
+.fa-umbrella:before {
+  content: "\f0e9";
+}
+.fa-paste:before,
+.fa-clipboard:before {
+  content: "\f0ea";
+}
+.fa-lightbulb-o:before {
+  content: "\f0eb";
+}
+.fa-exchange:before {
+  content: "\f0ec";
+}
+.fa-cloud-download:before {
+  content: "\f0ed";
+}
+.fa-cloud-upload:before {
+  content: "\f0ee";
+}
+.fa-user-md:before {
+  content: "\f0f0";
+}
+.fa-stethoscope:before {
+  content: "\f0f1";
+}
+.fa-suitcase:before {
+  content: "\f0f2";
+}
+.fa-bell-o:before {
+  content: "\f0a2";
+}
+.fa-coffee:before {
+  content: "\f0f4";
+}
+.fa-cutlery:before {
+  content: "\f0f5";
+}
+.fa-file-text-o:before {
+  content: "\f0f6";
+}
+.fa-building-o:before {
+  content: "\f0f7";
+}
+.fa-hospital-o:before {
+  content: "\f0f8";
+}
+.fa-ambulance:before {
+  content: "\f0f9";
+}
+.fa-medkit:before {
+  content: "\f0fa";
+}
+.fa-fighter-jet:before {
+  content: "\f0fb";
+}
+.fa-beer:before {
+  content: "\f0fc";
+}
+.fa-h-square:before {
+  content: "\f0fd";
+}
+.fa-plus-square:before {
+  content: "\f0fe";
+}
+.fa-angle-double-left:before {
+  content: "\f100";
+}
+.fa-angle-double-right:before {
+  content: "\f101";
+}
+.fa-angle-double-up:before {
+  content: "\f102";
+}
+.fa-angle-double-down:before {
+  content: "\f103";
+}
+.fa-angle-left:before {
+  content: "\f104";
+}
+.fa-angle-right:before {
+  content: "\f105";
+}
+.fa-angle-up:before {
+  content: "\f106";
+}
+.fa-angle-down:before {
+  content: "\f107";
+}
+.fa-desktop:before {
+  content: "\f108";
+}
+.fa-laptop:before {
+  content: "\f109";
+}
+.fa-tablet:before {
+  content: "\f10a";
+}
+.fa-mobile-phone:before,
+.fa-mobile:before {
+  content: "\f10b";
+}
+.fa-circle-o:before {
+  content: "\f10c";
+}
+.fa-quote-left:before {
+  content: "\f10d";
+}
+.fa-quote-right:before {
+  content: "\f10e";
+}
+.fa-spinner:before {
+  content: "\f110";
+}
+.fa-circle:before {
+  content: "\f111";
+}
+.fa-mail-reply:before,
+.fa-reply:before {
+  content: "\f112";
+}
+.fa-github-alt:before {
+  content: "\f113";
+}
+.fa-folder-o:before {
+  content: "\f114";
+}
+.fa-folder-open-o:before {
+  content: "\f115";
+}
+.fa-smile-o:before {
+  content: "\f118";
+}
+.fa-frown-o:before {
+  content: "\f119";
+}
+.fa-meh-o:before {
+  content: "\f11a";
+}
+.fa-gamepad:before {
+  content: "\f11b";
+}
+.fa-keyboard-o:before {
+  content: "\f11c";
+}
+.fa-flag-o:before {
+  content: "\f11d";
+}
+.fa-flag-checkered:before {
+  content: "\f11e";
+}
+.fa-terminal:before {
+  content: "\f120";
+}
+.fa-code:before {
+  content: "\f121";
+}
+.fa-mail-reply-all:before,
+.fa-reply-all:before {
+  content: "\f122";
+}
+.fa-star-half-empty:before,
+.fa-star-half-full:before,
+.fa-star-half-o:before {
+  content: "\f123";
+}
+.fa-location-arrow:before {
+  content: "\f124";
+}
+.fa-crop:before {
+  content: "\f125";
+}
+.fa-code-fork:before {
+  content: "\f126";
+}
+.fa-unlink:before,
+.fa-chain-broken:before {
+  content: "\f127";
+}
+.fa-question:before {
+  content: "\f128";
+}
+.fa-info:before {
+  content: "\f129";
+}
+.fa-exclamation:before {
+  content: "\f12a";
+}
+.fa-superscript:before {
+  content: "\f12b";
+}
+.fa-subscript:before {
+  content: "\f12c";
+}
+.fa-eraser:before {
+  content: "\f12d";
+}
+.fa-puzzle-piece:before {
+  content: "\f12e";
+}
+.fa-microphone:before {
+  content: "\f130";
+}
+.fa-microphone-slash:before {
+  content: "\f131";
+}
+.fa-shield:before {
+  content: "\f132";
+}
+.fa-calendar-o:before {
+  content: "\f133";
+}
+.fa-fire-extinguisher:before {
+  content: "\f134";
+}
+.fa-rocket:before {
+  content: "\f135";
+}
+.fa-maxcdn:before {
+  content: "\f136";
+}
+.fa-chevron-circle-left:before {
+  content: "\f137";
+}
+.fa-chevron-circle-right:before {
+  content: "\f138";
+}
+.fa-chevron-circle-up:before {
+  content: "\f139";
+}
+.fa-chevron-circle-down:before {
+  content: "\f13a";
+}
+.fa-html5:before {
+  content: "\f13b";
+}
+.fa-css3:before {
+  content: "\f13c";
+}
+.fa-anchor:before {
+  content: "\f13d";
+}
+.fa-unlock-alt:before {
+  content: "\f13e";
+}
+.fa-bullseye:before {
+  content: "\f140";
+}
+.fa-ellipsis-h:before {
+  content: "\f141";
+}
+.fa-ellipsis-v:before {
+  content: "\f142";
+}
+.fa-rss-square:before {
+  content: "\f143";
+}
+.fa-play-circle:before {
+  content: "\f144";
+}
+.fa-ticket:before {
+  content: "\f145";
+}
+.fa-minus-square:before {
+  content: "\f146";
+}
+.fa-minus-square-o:before {
+  content: "\f147";
+}
+.fa-level-up:before {
+  content: "\f148";
+}
+.fa-level-down:before {
+  content: "\f149";
+}
+.fa-check-square:before {
+  content: "\f14a";
+}
+.fa-pencil-square:before {
+  content: "\f14b";
+}
+.fa-external-link-square:before {
+  content: "\f14c";
+}
+.fa-share-square:before {
+  content: "\f14d";
+}
+.fa-compass:before {
+  content: "\f14e";
+}
+.fa-toggle-down:before,
+.fa-caret-square-o-down:before {
+  content: "\f150";
+}
+.fa-toggle-up:before,
+.fa-caret-square-o-up:before {
+  content: "\f151";
+}
+.fa-toggle-right:before,
+.fa-caret-square-o-right:before {
+  content: "\f152";
+}
+.fa-euro:before,
+.fa-eur:before {
+  content: "\f153";
+}
+.fa-gbp:before {
+  content: "\f154";
+}
+.fa-dollar:before,
+.fa-usd:before {
+  content: "\f155";
+}
+.fa-rupee:before,
+.fa-inr:before {
+  content: "\f156";
+}
+.fa-cny:before,
+.fa-rmb:before,
+.fa-yen:before,
+.fa-jpy:before {
+  content: "\f157";
+}
+.fa-ruble:before,
+.fa-rouble:before,
+.fa-rub:before {
+  content: "\f158";
+}
+.fa-won:before,
+.fa-krw:before {
+  content: "\f159";
+}
+.fa-bitcoin:before,
+.fa-btc:before {
+  content: "\f15a";
+}
+.fa-file:before {
+  content: "\f15b";
+}
+.fa-file-text:before {
+  content: "\f15c";
+}
+.fa-sort-alpha-asc:before {
+  content: "\f15d";
+}
+.fa-sort-alpha-desc:before {
+  content: "\f15e";
+}
+.fa-sort-amount-asc:before {
+  content: "\f160";
+}
+.fa-sort-amount-desc:before {
+  content: "\f161";
+}
+.fa-sort-numeric-asc:before {
+  content: "\f162";
+}
+.fa-sort-numeric-desc:before {
+  content: "\f163";
+}
+.fa-thumbs-up:before {
+  content: "\f164";
+}
+.fa-thumbs-down:before {
+  content: "\f165";
+}
+.fa-youtube-square:before {
+  content: "\f166";
+}
+.fa-youtube:before {
+  content: "\f167";
+}
+.fa-xing:before {
+  content: "\f168";
+}
+.fa-xing-square:before {
+  content: "\f169";
+}
+.fa-youtube-play:before {
+  content: "\f16a";
+}
+.fa-dropbox:before {
+  content: "\f16b";
+}
+.fa-stack-overflow:before {
+  content: "\f16c";
+}
+.fa-instagram:before {
+  content: "\f16d";
+}
+.fa-flickr:before {
+  content: "\f16e";
+}
+.fa-adn:before {
+  content: "\f170";
+}
+.fa-bitbucket:before {
+  content: "\f171";
+}
+.fa-bitbucket-square:before {
+  content: "\f172";
+}
+.fa-tumblr:before {
+  content: "\f173";
+}
+.fa-tumblr-square:before {
+  content: "\f174";
+}
+.fa-long-arrow-down:before {
+  content: "\f175";
+}
+.fa-long-arrow-up:before {
+  content: "\f176";
+}
+.fa-long-arrow-left:before {
+  content: "\f177";
+}
+.fa-long-arrow-right:before {
+  content: "\f178";
+}
+.fa-apple:before {
+  content: "\f179";
+}
+.fa-windows:before {
+  content: "\f17a";
+}
+.fa-android:before {
+  content: "\f17b";
+}
+.fa-linux:before {
+  content: "\f17c";
+}
+.fa-dribbble:before {
+  content: "\f17d";
+}
+.fa-skype:before {
+  content: "\f17e";
+}
+.fa-foursquare:before {
+  content: "\f180";
+}
+.fa-trello:before {
+  content: "\f181";
+}
+.fa-female:before {
+  content: "\f182";
+}
+.fa-male:before {
+  content: "\f183";
+}
+.fa-gittip:before {
+  content: "\f184";
+}
+.fa-sun-o:before {
+  content: "\f185";
+}
+.fa-moon-o:before {
+  content: "\f186";
+}
+.fa-archive:before {
+  content: "\f187";
+}
+.fa-bug:before {
+  content: "\f188";
+}
+.fa-vk:before {
+  content: "\f189";
+}
+.fa-weibo:before {
+  content: "\f18a";
+}
+.fa-renren:before {
+  content: "\f18b";
+}
+.fa-pagelines:before {
+  content: "\f18c";
+}
+.fa-stack-exchange:before {
+  content: "\f18d";
+}
+.fa-arrow-circle-o-right:before {
+  content: "\f18e";
+}
+.fa-arrow-circle-o-left:before {
+  content: "\f190";
+}
+.fa-toggle-left:before,
+.fa-caret-square-o-left:before {
+  content: "\f191";
+}
+.fa-dot-circle-o:before {
+  content: "\f192";
+}
+.fa-wheelchair:before {
+  content: "\f193";
+}
+.fa-vimeo-square:before {
+  content: "\f194";
+}
+.fa-turkish-lira:before,
+.fa-try:before {
+  content: "\f195";
+}
+.fa-plus-square-o:before {
+  content: "\f196";
+}
+.fa-space-shuttle:before {
+  content: "\f197";
+}
+.fa-slack:before {
+  content: "\f198";
+}
+.fa-envelope-square:before {
+  content: "\f199";
+}
+.fa-wordpress:before {
+  content: "\f19a";
+}
+.fa-openid:before {
+  content: "\f19b";
+}
+.fa-institution:before,
+.fa-bank:before,
+.fa-university:before {
+  content: "\f19c";
+}
+.fa-mortar-board:before,
+.fa-graduation-cap:before {
+  content: "\f19d";
+}
+.fa-yahoo:before {
+  content: "\f19e";
+}
+.fa-google:before {
+  content: "\f1a0";
+}
+.fa-reddit:before {
+  content: "\f1a1";
+}
+.fa-reddit-square:before {
+  content: "\f1a2";
+}
+.fa-stumbleupon-circle:before {
+  content: "\f1a3";
+}
+.fa-stumbleupon:before {
+  content: "\f1a4";
+}
+.fa-delicious:before {
+  content: "\f1a5";
+}
+.fa-digg:before {
+  content: "\f1a6";
+}
+.fa-pied-piper-square:before,
+.fa-pied-piper:before {
+  content: "\f1a7";
+}
+.fa-pied-piper-alt:before {
+  content: "\f1a8";
+}
+.fa-drupal:before {
+  content: "\f1a9";
+}
+.fa-joomla:before {
+  content: "\f1aa";
+}
+.fa-language:before {
+  content: "\f1ab";
+}
+.fa-fax:before {
+  content: "\f1ac";
+}
+.fa-building:before {
+  content: "\f1ad";
+}
+.fa-child:before {
+  content: "\f1ae";
+}
+.fa-paw:before {
+  content: "\f1b0";
+}
+.fa-spoon:before {
+  content: "\f1b1";
+}
+.fa-cube:before {
+  content: "\f1b2";
+}
+.fa-cubes:before {
+  content: "\f1b3";
+}
+.fa-behance:before {
+  content: "\f1b4";
+}
+.fa-behance-square:before {
+  content: "\f1b5";
+}
+.fa-steam:before {
+  content: "\f1b6";
+}
+.fa-steam-square:before {
+  content: "\f1b7";
+}
+.fa-recycle:before {
+  content: "\f1b8";
+}
+.fa-automobile:before,
+.fa-car:before {
+  content: "\f1b9";
+}
+.fa-cab:before,
+.fa-taxi:before {
+  content: "\f1ba";
+}
+.fa-tree:before {
+  content: "\f1bb";
+}
+.fa-spotify:before {
+  content: "\f1bc";
+}
+.fa-deviantart:before {
+  content: "\f1bd";
+}
+.fa-soundcloud:before {
+  content: "\f1be";
+}
+.fa-database:before {
+  content: "\f1c0";
+}
+.fa-file-pdf-o:before {
+  content: "\f1c1";
+}
+.fa-file-word-o:before {
+  content: "\f1c2";
+}
+.fa-file-excel-o:before {
+  content: "\f1c3";
+}
+.fa-file-powerpoint-o:before {
+  content: "\f1c4";
+}
+.fa-file-photo-o:before,
+.fa-file-picture-o:before,
+.fa-file-image-o:before {
+  content: "\f1c5";
+}
+.fa-file-zip-o:before,
+.fa-file-archive-o:before {
+  content: "\f1c6";
+}
+.fa-file-sound-o:before,
+.fa-file-audio-o:before {
+  content: "\f1c7";
+}
+.fa-file-movie-o:before,
+.fa-file-video-o:before {
+  content: "\f1c8";
+}
+.fa-file-code-o:before {
+  content: "\f1c9";
+}
+.fa-vine:before {
+  content: "\f1ca";
+}
+.fa-codepen:before {
+  content: "\f1cb";
+}
+.fa-jsfiddle:before {
+  content: "\f1cc";
+}
+.fa-life-bouy:before,
+.fa-life-saver:before,
+.fa-support:before,
+.fa-life-ring:before {
+  content: "\f1cd";
+}
+.fa-circle-o-notch:before {
+  content: "\f1ce";
+}
+.fa-ra:before,
+.fa-rebel:before {
+  content: "\f1d0";
+}
+.fa-ge:before,
+.fa-empire:before {
+  content: "\f1d1";
+}
+.fa-git-square:before {
+  content: "\f1d2";
+}
+.fa-git:before {
+  content: "\f1d3";
+}
+.fa-hacker-news:before {
+  content: "\f1d4";
+}
+.fa-tencent-weibo:before {
+  content: "\f1d5";
+}
+.fa-qq:before {
+  content: "\f1d6";
+}
+.fa-wechat:before,
+.fa-weixin:before {
+  content: "\f1d7";
+}
+.fa-send:before,
+.fa-paper-plane:before {
+  content: "\f1d8";
+}
+.fa-send-o:before,
+.fa-paper-plane-o:before {
+  content: "\f1d9";
+}
+.fa-history:before {
+  content: "\f1da";
+}
+.fa-circle-thin:before {
+  content: "\f1db";
+}
+.fa-header:before {
+  content: "\f1dc";
+}
+.fa-paragraph:before {
+  content: "\f1dd";
+}
+.fa-sliders:before {
+  content: "\f1de";
+}
+.fa-share-alt:before {
+  content: "\f1e0";
+}
+.fa-share-alt-square:before {
+  content: "\f1e1";
+}
+.fa-bomb:before {
+  content: "\f1e2";
+}
diff --git a/doc/css/nav-list.css b/doc/css/nav-list.css
new file mode 100644 (file)
index 0000000..a053514
--- /dev/null
@@ -0,0 +1,33 @@
+// NAV LIST
+// --------
+
+.nav-list {
+  padding-left: 15px;
+  padding-right: 15px;
+  margin-bottom: 0;
+}
+.nav-list > li > a,
+.nav-list .nav-header {
+  margin-left:  -15px;
+  margin-right: -15px;
+  text-shadow: 0 1px 0 rgba(255,255,255,.5);
+}
+.nav-list > li > a {
+  padding: 3px 15px;
+}
+.nav-list > .active > a,
+.nav-list > .active > a:hover,
+.nav-list > .active > a:focus {
+  color: white;
+  text-shadow: 0 -1px 0 rgba(0,0,0,.2);
+  background-color: rgb(66, 139, 202);
+}
+
+.spaced-out li {
+   padding-bottom: 1em;
+}
+
+.inside-list ul {
+    list-style-position: inside;
+    padding-left: 0;
+}
\ No newline at end of file
diff --git a/doc/examples/pipeline_templates/gatk-exome-fq-snp.json b/doc/examples/pipeline_templates/gatk-exome-fq-snp.json
new file mode 100644 (file)
index 0000000..481dda3
--- /dev/null
@@ -0,0 +1,175 @@
+{
+ "name":"GATK / exome PE fastq to snp",
+ "components":{
+  "extract-reference":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"file-select",
+   "script_parameters":{
+    "names":[
+     "human_g1k_v37.fasta.gz",
+     "human_g1k_v37.fasta.fai.gz",
+     "human_g1k_v37.dict.gz"
+    ],
+    "input":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi"
+   },
+   "output_name":false
+  },
+  "bwa-index":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"bwa-index",
+   "script_parameters":{
+    "input":{
+     "output_of":"extract-reference"
+    },
+    "bwa_tbz":{
+     "value":"8b6e2c4916133e1d859c9e812861ce13+70",
+     "required":true
+    }
+   },
+   "output_name":false
+  },
+  "bwa-aln":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"bwa-aln",
+   "script_parameters":{
+    "input":{
+     "dataclass":"Collection",
+     "required":"true"
+    },
+    "reference_index":{
+     "output_of":"bwa-index"
+    },
+    "samtools_tgz":{
+     "value":"c777e23cf13e5d5906abfdc08d84bfdb+74",
+     "required":true
+    },
+    "bwa_tbz":{
+     "value":"8b6e2c4916133e1d859c9e812861ce13+70",
+     "required":true
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":1
+   },
+   "output_name":false
+  },
+  "picard-gatk2-prep":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"picard-gatk2-prep",
+   "script_parameters":{
+    "input":{
+     "output_of":"bwa-aln"
+    },
+    "reference":{
+     "output_of":"extract-reference"
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":1
+   },
+   "output_name":false
+  },
+  "GATK2-realign":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"GATK2-realign",
+   "script_parameters":{
+    "input":{
+     "output_of":"picard-gatk2-prep"
+    },
+    "gatk_bundle":{
+     "value":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi",
+     "required":true
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    },
+    "gatk_tbz":{
+     "value":"7e0a277d6d2353678a11f56bab3b13f2+87",
+     "required":true
+    },
+    "regions":{
+     "value":"13b53dbe1ec032dfc495fd974aa5dd4a+87/S02972011_Covered_sort_merged.bed"
+    },
+    "region_padding":{
+     "value":10
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":2
+   },
+   "output_name":false
+  },
+  "GATK2-bqsr":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"GATK2-bqsr",
+   "script_parameters":{
+    "input":{
+     "output_of":"GATK2-realign"
+    },
+    "gatk_bundle":{
+     "value":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi",
+     "required":true
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    },
+    "gatk_tbz":{
+     "value":"7e0a277d6d2353678a11f56bab3b13f2+87",
+     "required":true
+    }
+   },
+   "output_name":false
+  },
+  "GATK2-merge-call":{
+   "repository":"arvados",
+   "script_version":"e820bd1c6890f93ea1a84ffd5730bbf0e3d8e153",
+   "script":"GATK2-merge-call",
+   "script_parameters":{
+    "input":{
+     "output_of":"GATK2-bqsr"
+    },
+    "gatk_bundle":{
+     "value":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi",
+     "required":true
+    },
+    "picard_zip":{
+     "value":"687f74675c6a0e925dec619cc2bec25f+77",
+     "required":true
+    },
+    "gatk_tbz":{
+     "value":"7e0a277d6d2353678a11f56bab3b13f2+87",
+     "required":true
+    },
+    "regions":{
+     "value":"13b53dbe1ec032dfc495fd974aa5dd4a+87/S02972011_Covered_sort_merged.bed"
+    },
+    "region_padding":{
+     "value":10
+    },
+    "GATK2_UnifiedGenotyper_args":{
+     "default":[
+      "-stand_call_conf",
+      "30.0",
+      "-stand_emit_conf",
+      "30.0",
+      "-dcov",
+      "200"
+     ]
+    }
+   },
+   "output_name":"Variant calls from UnifiedGenotyper"
+  }
+ }
+}
diff --git a/doc/examples/pipeline_templates/rtg-fq-snp.json b/doc/examples/pipeline_templates/rtg-fq-snp.json
new file mode 100644 (file)
index 0000000..c951c4c
--- /dev/null
@@ -0,0 +1,76 @@
+{
+ "name":"Real Time Genomics / PE fastq to snp",
+ "components":{
+  "extract_reference":{
+   "script":"file-select",
+   "script_parameters":{
+    "names":[
+     "human_g1k_v37.fasta.gz"
+    ],
+    "input":"d237a90bae3870b3b033aea1e99de4a9+10820+K@qr1hi"
+   },
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2"
+  },
+  "reformat_reference":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-fasta2sdf",
+   "script_parameters":{
+    "input":{
+     "output_of":"extract_reference"
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   }
+  },
+  "reformat_reads":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-fastq2sdf",
+   "script_parameters":{
+    "input":{
+     "optional":false
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   }
+  },
+  "map_reads":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-map",
+   "script_parameters":{
+    "input":{
+     "output_of":"reformat_reads"
+    },
+    "reference":{
+     "output_of":"reformat_reference"
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   },
+   "runtime_constraints":{
+    "max_tasks_per_node":1
+   }
+  },
+  "report_snp":{
+   "script_version":"4c1f8cd1431ece2ef11c130d48bb2edfd2f00ec2",
+   "script":"rtg-snp",
+   "script_parameters":{
+    "input":{
+     "output_of":"map_reads"
+    },
+    "reference":{
+     "output_of":"reformat_reference"
+    },
+    "rtg_binary_zip":"5d33618193f763b7dc3a3fdfa11d452e+95+K@qr1hi",
+    "rtg_license":{
+     "optional":false
+    }
+   }
+  }
+ }
+}
diff --git a/doc/examples/ruby/list-active-nodes.rb b/doc/examples/ruby/list-active-nodes.rb
new file mode 100755 (executable)
index 0000000..472a0c3
--- /dev/null
@@ -0,0 +1,13 @@
+#!/usr/bin/env ruby
+
+abort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'
+
+require 'arvados'
+
+arv = Arvados.new(api_version: 'v1')
+arv.node.list[:items].each do |node|
+  if node[:crunch_worker_state] != 'down'
+    ping_age = (Time.now - Time.parse(node[:last_ping_at])).to_i rescue -1
+    puts "#{node[:uuid]} #{node[:crunch_worker_state]} #{ping_age}"
+  end
+end
diff --git a/doc/fonts/FontAwesome.otf b/doc/fonts/FontAwesome.otf
new file mode 100644 (file)
index 0000000..3461e3f
Binary files /dev/null and b/doc/fonts/FontAwesome.otf differ
diff --git a/doc/fonts/fontawesome-webfont.eot b/doc/fonts/fontawesome-webfont.eot
new file mode 100755 (executable)
index 0000000..6cfd566
Binary files /dev/null and b/doc/fonts/fontawesome-webfont.eot differ
diff --git a/doc/fonts/fontawesome-webfont.svg b/doc/fonts/fontawesome-webfont.svg
new file mode 100755 (executable)
index 0000000..a9f8469
--- /dev/null
@@ -0,0 +1,504 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="fontawesomeregular" horiz-adv-x="1536" >
+<font-face units-per-em="1792" ascent="1536" descent="-256" />
+<missing-glyph horiz-adv-x="448" />
+<glyph unicode=" "  horiz-adv-x="448" />
+<glyph unicode="&#x09;" horiz-adv-x="448" />
+<glyph unicode="&#xa0;" horiz-adv-x="448" />
+<glyph unicode="&#xa8;" horiz-adv-x="1792" />
+<glyph unicode="&#xa9;" horiz-adv-x="1792" />
+<glyph unicode="&#xae;" horiz-adv-x="1792" />
+<glyph unicode="&#xb4;" horiz-adv-x="1792" />
+<glyph unicode="&#xc6;" horiz-adv-x="1792" />
+<glyph unicode="&#xd8;" horiz-adv-x="1792" />
+<glyph unicode="&#x2000;" horiz-adv-x="768" />
+<glyph unicode="&#x2001;" horiz-adv-x="1537" />
+<glyph unicode="&#x2002;" horiz-adv-x="768" />
+<glyph unicode="&#x2003;" horiz-adv-x="1537" />
+<glyph unicode="&#x2004;" horiz-adv-x="512" />
+<glyph unicode="&#x2005;" horiz-adv-x="384" />
+<glyph unicode="&#x2006;" horiz-adv-x="256" />
+<glyph unicode="&#x2007;" horiz-adv-x="256" />
+<glyph unicode="&#x2008;" horiz-adv-x="192" />
+<glyph unicode="&#x2009;" horiz-adv-x="307" />
+<glyph unicode="&#x200a;" horiz-adv-x="85" />
+<glyph unicode="&#x202f;" horiz-adv-x="307" />
+<glyph unicode="&#x205f;" horiz-adv-x="384" />
+<glyph unicode="&#x2122;" horiz-adv-x="1792" />
+<glyph unicode="&#x221e;" horiz-adv-x="1792" />
+<glyph unicode="&#x2260;" horiz-adv-x="1792" />
+<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xf000;" horiz-adv-x="1792" d="M93 1350q0 23 18 36.5t38 17.5t43 4h1408q23 0 43 -4t38 -17.5t18 -36.5q0 -35 -43 -78l-632 -632v-768h320q26 0 45 -19t19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45t45 19h320v768l-632 632q-43 43 -43 78z" />
+<glyph unicode="&#xf001;" d="M0 -64q0 50 34 89t86 60.5t103.5 32t96.5 10.5q105 0 192 -39v967q0 31 19 56.5t49 35.5l832 256q12 4 28 4q40 0 68 -28t28 -68v-1120q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89t34 89t86 60.5t103.5 32t96.5 10.5 q105 0 192 -39v537l-768 -237v-709q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89z" />
+<glyph unicode="&#xf002;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90q0 -52 -38 -90t-90 -38q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5z" />
+<glyph unicode="&#xf003;" horiz-adv-x="1792" d="M0 32v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5v768q-32 -36 -69 -66q-268 -206 -426 -338q-51 -43 -83 -67t-86.5 -48.5 t-102.5 -24.5h-1h-1q-48 0 -102.5 24.5t-86.5 48.5t-83 67q-158 132 -426 338q-37 30 -69 66v-768zM128 1120q0 -168 147 -284q193 -152 401 -317q6 -5 35 -29.5t46 -37.5t44.5 -31.5t50.5 -27.5t43 -9h1h1q20 0 43 9t50.5 27.5t44.5 31.5t46 37.5t35 29.5q208 165 401 317 q54 43 100.5 115.5t46.5 131.5v11v13.5t-0.5 13t-3 12.5t-5.5 9t-9 7.5t-14 2.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5z" />
+<glyph unicode="&#xf004;" horiz-adv-x="1792" d="M0 940q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138z " />
+<glyph unicode="&#xf005;" horiz-adv-x="1664" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -21 -10.5 -35.5t-30.5 -14.5q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500 l-364 354q-25 27 -25 48z" />
+<glyph unicode="&#xf006;" horiz-adv-x="1664" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -50 -41 -50q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354 q-25 27 -25 48zM221 829l306 -297l-73 -421l378 199l377 -199l-72 421l306 297l-422 62l-189 382l-189 -382z" />
+<glyph unicode="&#xf007;" horiz-adv-x="1408" d="M0 131q0 53 3.5 103.5t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q9 0 42 -21.5t74.5 -48t108 -48t133.5 -21.5t133.5 21.5t108 48t74.5 48t42 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5q0 -120 -73 -189.5t-194 -69.5 h-874q-121 0 -194 69.5t-73 189.5zM320 1024q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5z" />
+<glyph unicode="&#xf008;" horiz-adv-x="1920" d="M0 -96v1344q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1344q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 64v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45zM128 320q0 -26 19 -45t45 -19h128 q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM128 704q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM128 1088q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19 h-128q-26 0 -45 -19t-19 -45v-128zM512 -64q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512zM512 704q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512zM1536 64 v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45zM1536 320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM1536 704q0 -26 19 -45t45 -19h128q26 0 45 19t19 45 v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM1536 1088q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf009;" horiz-adv-x="1664" d="M0 128v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM0 896v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM896 128v384q0 52 38 90t90 38h512q52 0 90 -38 t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM896 896v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90z" />
+<glyph unicode="&#xf00a;" horiz-adv-x="1792" d="M0 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320 q-40 0 -68 28t-28 68zM640 1120v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 608v192 q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 1120v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf00b;" horiz-adv-x="1792" d="M0 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 96v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68zM640 608v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960 q-40 0 -68 28t-28 68zM640 1120v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf00c;" horiz-adv-x="1792" d="M121 608q0 40 28 68l136 136q28 28 68 28t68 -28l294 -295l656 657q28 28 68 28t68 -28l136 -136q28 -28 28 -68t-28 -68l-724 -724l-136 -136q-28 -28 -68 -28t-68 28l-136 136l-362 362q-28 28 -28 68z" />
+<glyph unicode="&#xf00d;" horiz-adv-x="1408" d="M110 214q0 40 28 68l294 294l-294 294q-28 28 -28 68t28 68l136 136q28 28 68 28t68 -28l294 -294l294 294q28 28 68 28t68 -28l136 -136q28 -28 28 -68t-28 -68l-294 -294l294 -294q28 -28 28 -68t-28 -68l-136 -136q-28 -28 -68 -28t-68 28l-294 294l-294 -294 q-28 -28 -68 -28t-68 28l-136 136q-28 28 -28 68z" />
+<glyph unicode="&#xf00e;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90t-37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM384 672v64q0 13 9.5 22.5t22.5 9.5h224v224q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-224h224q13 0 22.5 -9.5t9.5 -22.5v-64 q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-224q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v224h-224q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf010;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90t-37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM384 672v64q0 13 9.5 22.5t22.5 9.5h576q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-576q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf011;" d="M0 640q0 182 80.5 343t226.5 270q43 32 95.5 25t83.5 -50q32 -42 24.5 -94.5t-49.5 -84.5q-98 -74 -151.5 -181t-53.5 -228q0 -104 40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5t198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5q0 121 -53.5 228t-151.5 181 q-42 32 -49.5 84.5t24.5 94.5q31 43 84 50t95 -25q146 -109 226.5 -270t80.5 -343q0 -156 -61 -298t-164 -245t-245 -164t-298 -61t-298 61t-245 164t-164 245t-61 298zM640 768v640q0 52 38 90t90 38t90 -38t38 -90v-640q0 -52 -38 -90t-90 -38t-90 38t-38 90z" />
+<glyph unicode="&#xf012;" horiz-adv-x="1792" d="M0 -96v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM384 -96v320q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-320q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM768 -96v576q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-576 q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1152 -96v960q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-960q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1536 -96v1472q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1472q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf013;" d="M0 531v222q0 12 8 23t19 13l186 28q14 46 39 92q-40 57 -107 138q-10 12 -10 24q0 10 9 23q26 36 98.5 107.5t94.5 71.5q13 0 26 -10l138 -107q44 23 91 38q16 136 29 186q7 28 36 28h222q14 0 24.5 -8.5t11.5 -21.5l28 -184q49 -16 90 -37l142 107q9 9 24 9q13 0 25 -10 q129 -119 165 -170q7 -8 7 -22q0 -12 -8 -23q-15 -21 -51 -66.5t-54 -70.5q26 -50 41 -98l183 -28q13 -2 21 -12.5t8 -23.5v-222q0 -12 -8 -23t-20 -13l-185 -28q-19 -54 -39 -91q35 -50 107 -138q10 -12 10 -25t-9 -23q-27 -37 -99 -108t-94 -71q-12 0 -26 9l-138 108 q-44 -23 -91 -38q-16 -136 -29 -186q-7 -28 -36 -28h-222q-14 0 -24.5 8.5t-11.5 21.5l-28 184q-49 16 -90 37l-141 -107q-10 -9 -25 -9q-14 0 -25 11q-126 114 -165 168q-7 10 -7 23q0 12 8 23q15 21 51 66.5t54 70.5q-27 50 -41 99l-183 27q-13 2 -21 12.5t-8 23.5z M512 640q0 -106 75 -181t181 -75t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181z" />
+<glyph unicode="&#xf014;" horiz-adv-x="1408" d="M0 1056v64q0 14 9 23t23 9h309l70 167q15 37 54 63t79 26h320q40 0 79 -26t54 -63l70 -167h309q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96v-948q0 -83 -47 -143.5t-113 -60.5h-832q-66 0 -113 58.5t-47 141.5v952h-96q-14 0 -23 9t-9 23zM256 76q0 -22 7 -40.5 t14.5 -27t10.5 -8.5h832q3 0 10.5 8.5t14.5 27t7 40.5v948h-896v-948zM384 224v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23zM480 1152h448l-48 117q-7 9 -17 11h-317q-10 -2 -17 -11zM640 224v576q0 14 9 23t23 9h64 q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23zM896 224v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf015;" horiz-adv-x="1664" d="M26 636.5q1 13.5 11 21.5l719 599q32 26 76 26t76 -26l244 -204v195q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-408l219 -182q10 -8 11 -21.5t-7 -23.5l-62 -74q-8 -9 -21 -11h-3q-13 0 -21 7l-692 577l-692 -577q-12 -8 -24 -7q-13 2 -21 11l-62 74q-8 10 -7 23.5zM256 64 v480q0 1 0.5 3t0.5 3l575 474l575 -474q1 -2 1 -6v-480q0 -26 -19 -45t-45 -19h-384v384h-256v-384h-384q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf016;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22 v-376z" />
+<glyph unicode="&#xf017;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 544v64q0 14 9 23t23 9h224v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf018;" horiz-adv-x="1920" d="M50 73q0 54 26 116l417 1044q8 19 26 33t38 14h339q-13 0 -23 -9.5t-11 -22.5l-15 -192q-1 -14 8 -23t22 -9h166q13 0 22 9t8 23l-15 192q-1 13 -11 22.5t-23 9.5h339q20 0 38 -14t26 -33l417 -1044q26 -62 26 -116q0 -73 -46 -73h-704q13 0 22 9.5t8 22.5l-20 256 q-1 13 -11 22.5t-23 9.5h-272q-13 0 -23 -9.5t-11 -22.5l-20 -256q-1 -13 8 -22.5t22 -9.5h-704q-46 0 -46 73zM809 540q-1 -12 8 -20t21 -8h244q12 0 21 8t8 20v4l-24 320q-1 13 -11 22.5t-23 9.5h-186q-13 0 -23 -9.5t-11 -22.5l-24 -320v-4z" />
+<glyph unicode="&#xf019;" horiz-adv-x="1664" d="M0 96v320q0 40 28 68t68 28h465l135 -136q58 -56 136 -56t136 56l136 136h464q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68zM325 985q17 39 59 39h256v448q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-448h256q42 0 59 -39q17 -41 -14 -70 l-448 -448q-18 -19 -45 -19t-45 19l-448 448q-31 29 -14 70zM1152 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM1408 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf01a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM418 620q8 20 30 20h192v352q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-352h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-11 -9 -23 -9t-23 9l-320 320q-15 16 -7 35z" />
+<glyph unicode="&#xf01b;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM416 672q0 12 10 24l319 319q11 9 23 9t23 -9l320 -320q15 -16 7 -35q-8 -20 -30 -20h-192v-352q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v352h-192q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf01c;" d="M0 64v482q0 62 25 123l238 552q10 25 36.5 42t52.5 17h832q26 0 52.5 -17t36.5 -42l238 -552q25 -61 25 -123v-482q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM197 576h316l95 -192h320l95 192h316q-1 3 -2.5 8t-2.5 8l-212 496h-708l-212 -496q-1 -2 -2.5 -8 t-2.5 -8z" />
+<glyph unicode="&#xf01d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 320v640q0 37 32 56q33 18 64 -1l544 -320q32 -18 32 -55t-32 -55l-544 -320q-15 -9 -32 -9q-16 0 -32 8q-32 19 -32 56z" />
+<glyph unicode="&#xf01e;" d="M0 640q0 156 61 298t164 245t245 164t298 61q147 0 284.5 -55.5t244.5 -156.5l130 129q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l138 138q-148 137 -349 137q-104 0 -198.5 -40.5t-163.5 -109.5t-109.5 -163.5 t-40.5 -198.5t40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5q119 0 225 52t179 147q7 10 23 12q14 0 25 -9l137 -138q9 -8 9.5 -20.5t-7.5 -22.5q-109 -132 -264 -204.5t-327 -72.5q-156 0 -298 61t-245 164t-164 245t-61 298z" />
+<glyph unicode="&#xf021;" d="M0 0v448q0 26 19 45t45 19h448q26 0 45 -19t19 -45t-19 -45l-137 -137q71 -66 161 -102t187 -36q134 0 250 65t186 179q11 17 53 117q8 23 30 23h192q13 0 22.5 -9.5t9.5 -22.5q0 -5 -1 -7q-64 -268 -268 -434.5t-478 -166.5q-146 0 -282.5 55t-243.5 157l-129 -129 q-19 -19 -45 -19t-45 19t-19 45zM18 800v7q65 268 270 434.5t480 166.5q146 0 284 -55.5t245 -156.5l130 129q19 19 45 19t45 -19t19 -45v-448q0 -26 -19 -45t-45 -19h-448q-26 0 -45 19t-19 45t19 45l138 138q-148 137 -349 137q-134 0 -250 -65t-186 -179 q-11 -17 -53 -117q-8 -23 -30 -23h-199q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf022;" horiz-adv-x="1792" d="M0 160v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM128 160q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5v832q0 13 -9.5 22.5t-22.5 9.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5v-832z M256 288v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 544v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z M256 800v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 288v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5z M512 544v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5zM512 800v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5z " />
+<glyph unicode="&#xf023;" horiz-adv-x="1152" d="M0 96v576q0 40 28 68t68 28h32v192q0 184 132 316t316 132t316 -132t132 -316v-192h32q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68zM320 768h512v192q0 106 -75 181t-181 75t-181 -75t-75 -181v-192z" />
+<glyph unicode="&#xf024;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -72 -64 -110v-1266q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v1266q-64 38 -64 110zM320 320v742q0 32 31 55q21 14 79 43q236 120 421 120q107 0 200 -29t219 -88q38 -19 88 -19 q54 0 117.5 21t110 47t88 47t54.5 21q26 0 45 -19t19 -45v-763q0 -25 -12.5 -38.5t-39.5 -27.5q-215 -116 -369 -116q-61 0 -123.5 22t-108.5 48t-115.5 48t-142.5 22q-192 0 -464 -146q-17 -9 -33 -9q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf025;" horiz-adv-x="1664" d="M0 650q0 151 67 291t179 242.5t266 163.5t320 61t320 -61t266 -163.5t179 -242.5t67 -291q0 -166 -60 -314l-20 -49l-185 -33q-22 -83 -90.5 -136.5t-156.5 -53.5v-32q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-32 q71 0 130 -35.5t93 -95.5l68 12q29 95 29 193q0 148 -88 279t-236.5 209t-315.5 78t-315.5 -78t-236.5 -209t-88 -279q0 -98 29 -193l68 -12q34 60 93 95.5t130 35.5v32q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v32 q-88 0 -156.5 53.5t-90.5 136.5l-185 33l-20 49q-60 148 -60 314z" />
+<glyph unicode="&#xf026;" horiz-adv-x="768" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf027;" horiz-adv-x="1152" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45zM908 464q0 21 12 35.5t29 25t34 23t29 35.5t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5 q15 0 25 -5q70 -27 112.5 -93t42.5 -142t-42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5z" />
+<glyph unicode="&#xf028;" horiz-adv-x="1664" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45zM908 464q0 21 12 35.5t29 25t34 23t29 35.5t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5 q15 0 25 -5q70 -27 112.5 -93t42.5 -142t-42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5zM1008 228q0 39 39 59q56 29 76 44q74 54 115.5 135.5t41.5 173.5t-41.5 173.5t-115.5 135.5q-20 15 -76 44q-39 20 -39 59q0 26 19 45t45 19q13 0 26 -5 q140 -59 225 -188.5t85 -282.5t-85 -282.5t-225 -188.5q-13 -5 -25 -5q-27 0 -46 19t-19 45zM1109 -7q0 36 39 59q7 4 22.5 10.5t22.5 10.5q46 25 82 51q123 91 192 227t69 289t-69 289t-192 227q-36 26 -82 51q-7 4 -22.5 10.5t-22.5 10.5q-39 23 -39 59q0 26 19 45t45 19 q13 0 26 -5q211 -91 338 -283.5t127 -422.5t-127 -422.5t-338 -283.5q-13 -5 -26 -5q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf029;" horiz-adv-x="1408" d="M0 0v640h640v-640h-640zM0 768v640h640v-640h-640zM128 129h384v383h-384v-383zM128 896h384v384h-384v-384zM256 256v128h128v-128h-128zM256 1024v128h128v-128h-128zM768 0v640h384v-128h128v128h128v-384h-384v128h-128v-384h-128zM768 768v640h640v-640h-640z M896 896h384v384h-384v-384zM1024 0v128h128v-128h-128zM1024 1024v128h128v-128h-128zM1280 0v128h128v-128h-128z" />
+<glyph unicode="&#xf02a;" horiz-adv-x="1792" d="M0 0v1408h63v-1408h-63zM94 1v1407h32v-1407h-32zM189 1v1407h31v-1407h-31zM346 1v1407h31v-1407h-31zM472 1v1407h62v-1407h-62zM629 1v1407h31v-1407h-31zM692 1v1407h31v-1407h-31zM755 1v1407h31v-1407h-31zM880 1v1407h63v-1407h-63zM1037 1v1407h63v-1407h-63z M1163 1v1407h63v-1407h-63zM1289 1v1407h63v-1407h-63zM1383 1v1407h63v-1407h-63zM1541 1v1407h94v-1407h-94zM1666 1v1407h32v-1407h-32zM1729 0v1408h63v-1408h-63z" />
+<glyph unicode="&#xf02b;" d="M0 864v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117zM192 1088q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5 t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf02c;" horiz-adv-x="1920" d="M0 864v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117zM192 1088q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5 t-90.5 -37.5t-37.5 -90.5zM704 1408h224q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-36 0 -59 14t-53 45l470 470q37 37 37 90q0 52 -37 91l-715 714q-38 38 -102 64.5t-117 26.5z" />
+<glyph unicode="&#xf02d;" horiz-adv-x="1664" d="M10 184q0 4 3 27t4 37q1 8 -3 21.5t-3 19.5q2 11 8 21t16.5 23.5t16.5 23.5q23 38 45 91.5t30 91.5q3 10 0.5 30t-0.5 28q3 11 17 28t17 23q21 36 42 92t25 90q1 9 -2.5 32t0.5 28q4 13 22 30.5t22 22.5q19 26 42.5 84.5t27.5 96.5q1 8 -3 25.5t-2 26.5q2 8 9 18t18 23 t17 21q8 12 16.5 30.5t15 35t16 36t19.5 32t26.5 23.5t36 11.5t47.5 -5.5l-1 -3q38 9 51 9h761q74 0 114 -56t18 -130l-274 -906q-36 -119 -71.5 -153.5t-128.5 -34.5h-869q-27 0 -38 -15q-11 -16 -1 -43q24 -70 144 -70h923q29 0 56 15.5t35 41.5l300 987q7 22 5 57 q38 -15 59 -43q40 -57 18 -129l-275 -906q-19 -64 -76.5 -107.5t-122.5 -43.5h-923q-77 0 -148.5 53.5t-99.5 131.5q-24 67 -2 127zM492 800q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5zM575 1056 q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5z" />
+<glyph unicode="&#xf02e;" horiz-adv-x="1280" d="M0 7v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62z" />
+<glyph unicode="&#xf02f;" horiz-adv-x="1664" d="M0 160v416q0 79 56.5 135.5t135.5 56.5h64v544q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-256h64q79 0 135.5 -56.5t56.5 -135.5v-416q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-160q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v160h-224 q-13 0 -22.5 9.5t-9.5 22.5zM384 0h896v256h-896v-256zM384 640h896v384h-160q-40 0 -68 28t-28 68v160h-640v-640zM1408 576q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf030;" horiz-adv-x="1920" d="M0 128v896q0 106 75 181t181 75h224l51 136q19 49 69.5 84.5t103.5 35.5h512q53 0 103.5 -35.5t69.5 -84.5l51 -136h224q106 0 181 -75t75 -181v-896q0 -106 -75 -181t-181 -75h-1408q-106 0 -181 75t-75 181zM512 576q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5 t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM672 576q0 119 84.5 203.5t203.5 84.5t203.5 -84.5t84.5 -203.5t-84.5 -203.5t-203.5 -84.5t-203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf031;" horiz-adv-x="1664" d="M0 -128l2 79q23 7 56 12.5t57 10.5t49.5 14.5t44.5 29t31 50.5l237 616l280 724h75h53q8 -14 11 -21l205 -480q33 -78 106 -257.5t114 -274.5q15 -34 58 -144.5t72 -168.5q20 -45 35 -57q19 -15 88 -29.5t84 -20.5q6 -38 6 -57q0 -4 -0.5 -13t-0.5 -13q-63 0 -190 8 t-191 8q-76 0 -215 -7t-178 -8q0 43 4 78l131 28q1 0 12.5 2.5t15.5 3.5t14.5 4.5t15 6.5t11 8t9 11t2.5 14q0 16 -31 96.5t-72 177.5t-42 100l-450 2q-26 -58 -76.5 -195.5t-50.5 -162.5q0 -22 14 -37.5t43.5 -24.5t48.5 -13.5t57 -8.5t41 -4q1 -19 1 -58q0 -9 -2 -27 q-58 0 -174.5 10t-174.5 10q-8 0 -26.5 -4t-21.5 -4q-80 -14 -188 -14zM555 527q33 0 136.5 -2t160.5 -2q19 0 57 2q-87 253 -184 452z" />
+<glyph unicode="&#xf032;" horiz-adv-x="1408" d="M0 -128l2 94q15 4 85 16t106 27q7 12 12.5 27t8.5 33.5t5.5 32.5t3 37.5t0.5 34v35.5v30q0 982 -22 1025q-4 8 -22 14.5t-44.5 11t-49.5 7t-48.5 4.5t-30.5 3l-4 83q98 2 340 11.5t373 9.5q23 0 68.5 -0.5t67.5 -0.5q70 0 136.5 -13t128.5 -42t108 -71t74 -104.5 t28 -137.5q0 -52 -16.5 -95.5t-39 -72t-64.5 -57.5t-73 -45t-84 -40q154 -35 256.5 -134t102.5 -248q0 -100 -35 -179.5t-93.5 -130.5t-138 -85.5t-163.5 -48.5t-176 -14q-44 0 -132 3t-132 3q-106 0 -307 -11t-231 -12zM533 1292q0 -50 4 -151t4 -152q0 -27 -0.5 -80 t-0.5 -79q0 -46 1 -69q42 -7 109 -7q82 0 143 13t110 44.5t74.5 89.5t25.5 142q0 70 -29 122.5t-79 82t-108 43.5t-124 14q-50 0 -130 -13zM538.5 165q0.5 -37 4.5 -83.5t12 -66.5q74 -32 140 -32q376 0 376 335q0 114 -41 180q-27 44 -61.5 74t-67.5 46.5t-80.5 25 t-84 10.5t-94.5 2q-73 0 -101 -10q0 -53 -0.5 -159t-0.5 -158q0 -8 -1 -67.5t-0.5 -96.5z" />
+<glyph unicode="&#xf033;" horiz-adv-x="1024" d="M0 -126l17 85q6 2 81.5 21.5t111.5 37.5q28 35 41 101q1 7 62 289t114 543.5t52 296.5v25q-24 13 -54.5 18.5t-69.5 8t-58 5.5l19 103q33 -2 120 -6.5t149.5 -7t120.5 -2.5q48 0 98.5 2.5t121 7t98.5 6.5q-5 -39 -19 -89q-30 -10 -101.5 -28.5t-108.5 -33.5 q-8 -19 -14 -42.5t-9 -40t-7.5 -45.5t-6.5 -42q-27 -148 -87.5 -419.5t-77.5 -355.5q-2 -9 -13 -58t-20 -90t-16 -83.5t-6 -57.5l1 -18q17 -4 185 -31q-3 -44 -16 -99q-11 0 -32.5 -1.5t-32.5 -1.5q-29 0 -87 10t-86 10q-138 2 -206 2q-51 0 -143 -9t-121 -11z" />
+<glyph unicode="&#xf034;" horiz-adv-x="1792" d="M0 1023v383l81 1l54 -27q12 -5 211 -5q44 0 132 2t132 2q36 0 107.5 -0.5t107.5 -0.5h293q6 0 21 -0.5t20.5 0t16 3t17.5 9t15 17.5l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 48t-14.5 73.5t-7.5 35.5 q-6 8 -12 12.5t-15.5 6t-13 2.5t-18 0.5t-16.5 -0.5q-17 0 -66.5 0.5t-74.5 0.5t-64 -2t-71 -6q-9 -81 -8 -136q0 -94 2 -388t2 -455q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9 t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29t78 27q19 42 19 383q0 101 -3 303t-3 303v117q0 2 0.5 15.5t0.5 25t-1 25.5t-3 24t-5 14q-11 12 -162 12q-33 0 -93 -12t-80 -26q-19 -13 -34 -72.5t-31.5 -111t-42.5 -53.5q-42 26 -56 44zM1414 109.5q9 18.5 42 18.5h80v1024 h-80q-33 0 -42 18.5t11 44.5l126 162q20 26 49 26t49 -26l126 -162q20 -26 11 -44.5t-42 -18.5h-80v-1024h80q33 0 42 -18.5t-11 -44.5l-126 -162q-20 -26 -49 -26t-49 26l-126 162q-20 26 -11 44.5z" />
+<glyph unicode="&#xf035;" d="M0 1023v383l81 1l54 -27q12 -5 211 -5q44 0 132 2t132 2q70 0 246.5 1t304.5 0.5t247 -4.5q33 -1 56 31l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 47.5t-15 73.5t-7 36q-10 13 -27 19q-5 2 -66 2q-30 0 -93 1 t-103 1t-94 -2t-96 -7q-9 -81 -8 -136l1 -152v52q0 -55 1 -154t1.5 -180t0.5 -153q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29 t78 27q7 16 11.5 74t6 145.5t1.5 155t-0.5 153.5t-0.5 89q0 7 -2.5 21.5t-2.5 22.5q0 7 0.5 44t1 73t0 76.5t-3 67.5t-6.5 32q-11 12 -162 12q-41 0 -163 -13.5t-138 -24.5q-19 -12 -34 -71.5t-31.5 -111.5t-42.5 -54q-42 26 -56 44zM5 -64q0 28 26 49q4 3 36 30t59.5 49 t57.5 41.5t42 19.5q13 0 20.5 -10.5t10 -28.5t2.5 -33.5t-1.5 -33t-1.5 -19.5h1024q0 2 -1.5 19.5t-1.5 33t2.5 33.5t10 28.5t20.5 10.5q12 0 42 -19.5t57.5 -41.5t59.5 -49t36 -30q26 -21 26 -49t-26 -49q-4 -3 -36 -30t-59.5 -49t-57.5 -41.5t-42 -19.5q-13 0 -20.5 10.5 t-10 28.5t-2.5 33.5t1.5 33t1.5 19.5h-1024q0 -2 1.5 -19.5t1.5 -33t-2.5 -33.5t-10 -28.5t-20.5 -10.5q-12 0 -42 19.5t-57.5 41.5t-59.5 49t-36 30q-26 21 -26 49z" />
+<glyph unicode="&#xf036;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 448v128q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM0 832v128q0 26 19 45t45 19h1536 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM0 1216v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf037;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM128 832v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM384 448v128q0 26 19 45t45 19h896 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45zM512 1216v128q0 26 19 45t45 19h640q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-640q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf038;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM128 832v128q0 26 19 45t45 19h1536q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM384 448v128q0 26 19 45t45 19h1280 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM512 1216v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf039;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 448v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 832v128q0 26 19 45t45 19h1664 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 1216v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf03a;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5zM0 416v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5 t-9.5 22.5zM0 800v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5zM0 1184v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192 q-13 0 -22.5 9.5t-9.5 22.5zM384 32v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 416v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5 t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 800v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 1184v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192 q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03b;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM0 1184v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5 t-9.5 22.5zM32 704q0 14 9 23l288 288q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-576q0 -13 -9.5 -22.5t-22.5 -9.5q-14 0 -23 9l-288 288q-9 9 -9 23zM640 416v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088 q-13 0 -22.5 9.5t-9.5 22.5zM640 800v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03c;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM0 416v576q0 13 9.5 22.5t22.5 9.5q14 0 23 -9l288 -288q9 -9 9 -23t-9 -23l-288 -288q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5z M0 1184v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM640 416v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5 t-9.5 22.5zM640 800v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03d;" horiz-adv-x="1792" d="M0 288v704q0 119 84.5 203.5t203.5 84.5h704q119 0 203.5 -84.5t84.5 -203.5v-165l403 402q18 19 45 19q12 0 25 -5q39 -17 39 -59v-1088q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-403 403v-166q0 -119 -84.5 -203.5t-203.5 -84.5h-704q-119 0 -203.5 84.5 t-84.5 203.5z" />
+<glyph unicode="&#xf03e;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216z M256 128v192l320 320l160 -160l512 512l416 -416v-448h-1408zM256 960q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136z" />
+<glyph unicode="&#xf040;" d="M0 -128v416l832 832l416 -416l-832 -832h-416zM128 128h128v-128h107l91 91l-235 235l-91 -91v-107zM298 384q0 -22 22 -22q10 0 17 7l542 542q7 7 7 17q0 22 -22 22q-10 0 -17 -7l-542 -542q-7 -7 -7 -17zM896 1184l166 165q36 38 90 38q53 0 91 -38l235 -234 q37 -39 37 -91q0 -53 -37 -90l-166 -166z" />
+<glyph unicode="&#xf041;" horiz-adv-x="1024" d="M0 896q0 212 150 362t362 150t362 -150t150 -362q0 -109 -33 -179l-364 -774q-16 -33 -47.5 -52t-67.5 -19t-67.5 19t-46.5 52l-365 774q-33 70 -33 179zM256 896q0 -106 75 -181t181 -75t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181z" />
+<glyph unicode="&#xf042;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73v1088q-148 0 -273 -73t-198 -198t-73 -273z" />
+<glyph unicode="&#xf043;" horiz-adv-x="1024" d="M0 512q0 145 81 275q6 9 62.5 90.5t101 151t99.5 178t83 201.5q9 30 34 47t51 17t51.5 -17t33.5 -47q28 -93 83 -201.5t99.5 -178t101 -151t62.5 -90.5q81 -127 81 -275q0 -212 -150 -362t-362 -150t-362 150t-150 362zM256 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5 t37.5 90.5q0 36 -20 69q-1 1 -15.5 22.5t-25.5 38t-25 44t-21 50.5q-4 16 -21 16t-21 -16q-7 -23 -21 -50.5t-25 -44t-25.5 -38t-15.5 -22.5q-20 -33 -20 -69z" />
+<glyph unicode="&#xf044;" horiz-adv-x="1792" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-14 -14 -32 -8q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v126q0 13 9 22l64 64q15 15 35 7t20 -29v-190 q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM640 256v288l672 672l288 -288l-672 -672h-288zM736 448h96v-96h56l116 116l-152 152l-116 -116v-56zM944 688q16 -16 33 1l350 350q17 17 1 33t-33 -1l-350 -350q-17 -17 -1 -33zM1376 1280l92 92 q28 28 68 28t68 -28l152 -152q28 -28 28 -68t-28 -68l-92 -92z" />
+<glyph unicode="&#xf045;" horiz-adv-x="1664" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h255q13 0 22.5 -9.5t9.5 -22.5q0 -27 -26 -32q-77 -26 -133 -60q-10 -4 -16 -4h-112q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v214q0 19 18 29q28 13 54 37q16 16 35 8q21 -9 21 -29v-259 q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM256 704q0 49 3.5 91t14 90t28 88t47 81.5t68.5 74t94.5 61.5t124.5 48.5t159.5 30.5t196.5 11h160v192q0 42 39 59q13 5 25 5q26 0 45 -19l384 -384q19 -19 19 -45t-19 -45l-384 -384 q-18 -19 -45 -19q-12 0 -25 5q-39 17 -39 59v192h-160q-323 0 -438 -131q-119 -137 -74 -473q3 -23 -20 -34q-8 -2 -12 -2q-16 0 -26 13q-10 14 -21 31t-39.5 68.5t-49.5 99.5t-38.5 114t-17.5 122z" />
+<glyph unicode="&#xf046;" horiz-adv-x="1664" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-10 -10 -23 -10q-3 0 -9 2q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v254q0 13 9 22l64 64q10 10 23 10q6 0 12 -3 q20 -8 20 -29v-318q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM257 768q0 33 24 57l110 110q24 24 57 24t57 -24l263 -263l647 647q24 24 57 24t57 -24l110 -110q24 -24 24 -57t-24 -57l-814 -814q-24 -24 -57 -24t-57 24l-430 430 q-24 24 -24 57z" />
+<glyph unicode="&#xf047;" horiz-adv-x="1792" d="M0 640q0 26 19 45l256 256q19 19 45 19t45 -19t19 -45v-128h384v384h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-384h384v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45t-19 -45l-256 -256 q-19 -19 -45 -19t-45 19t-19 45v128h-384v-384h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45t19 45t45 19h128v384h-384v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf048;" horiz-adv-x="1024" d="M0 -64v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf049;" horiz-adv-x="1792" d="M0 -64v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19l710 710q19 19 32 13t13 -32v-710q4 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45 t-45 -19h-128q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04a;" horiz-adv-x="1664" d="M122 640q0 26 19 45l710 710q19 19 32 13t13 -32v-710q5 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-8 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-19 19 -19 45z" />
+<glyph unicode="&#xf04b;" horiz-adv-x="1408" d="M0 -96v1472q0 26 16.5 36t39.5 -3l1328 -738q23 -13 23 -31t-23 -31l-1328 -738q-23 -13 -39.5 -3t-16.5 36z" />
+<glyph unicode="&#xf04c;" d="M0 -64v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45zM896 -64v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04d;" d="M0 -64v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04e;" horiz-adv-x="1664" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q19 -19 19 -45t-19 -45l-710 -710q-19 -19 -32 -13t-13 32v710q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf050;" horiz-adv-x="1792" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32v710 q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf051;" horiz-adv-x="1024" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf052;" horiz-adv-x="1538" d="M1 64v256q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM1 525q-6 13 13 32l710 710q19 19 45 19t45 -19l710 -710q19 -19 13 -32t-32 -13h-1472q-26 0 -32 13z" />
+<glyph unicode="&#xf053;" horiz-adv-x="1280" d="M154 704q0 26 19 45l742 742q19 19 45 19t45 -19l166 -166q19 -19 19 -45t-19 -45l-531 -531l531 -531q19 -19 19 -45t-19 -45l-166 -166q-19 -19 -45 -19t-45 19l-742 742q-19 19 -19 45z" />
+<glyph unicode="&#xf054;" horiz-adv-x="1280" d="M90 128q0 26 19 45l531 531l-531 531q-19 19 -19 45t19 45l166 166q19 19 45 19t45 -19l742 -742q19 -19 19 -45t-19 -45l-742 -742q-19 -19 -45 -19t-45 19l-166 166q-19 19 -19 45z" />
+<glyph unicode="&#xf055;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 576q0 -26 19 -45t45 -19h256v-256q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v256h256q26 0 45 19 t19 45v128q0 26 -19 45t-45 19h-256v256q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-256h-256q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf056;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 576q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-768q-26 0 -45 -19 t-19 -45v-128z" />
+<glyph unicode="&#xf057;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM387 414q0 -27 19 -46l90 -90q19 -19 46 -19q26 0 45 19l181 181l181 -181q19 -19 45 -19q27 0 46 19 l90 90q19 19 19 46q0 26 -19 45l-181 181l181 181q19 19 19 45q0 27 -19 46l-90 90q-19 19 -46 19q-26 0 -45 -19l-181 -181l-181 181q-19 19 -45 19q-27 0 -46 -19l-90 -90q-19 -19 -19 -46q0 -26 19 -45l181 -181l-181 -181q-19 -19 -19 -45z" />
+<glyph unicode="&#xf058;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 621q0 -27 18 -45l362 -362q19 -19 45 -19q27 0 46 19l543 543q18 18 18 45q0 28 -18 46l-91 90 q-19 19 -45 19t-45 -19l-408 -407l-226 226q-19 19 -45 19t-45 -19l-91 -90q-18 -18 -18 -46z" />
+<glyph unicode="&#xf059;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM417 939q-15 -24 8 -42l132 -100q7 -6 19 -6q16 0 25 12q53 68 86 92q34 24 86 24q48 0 85.5 -26 t37.5 -59q0 -38 -20 -61t-68 -45q-63 -28 -115.5 -86.5t-52.5 -125.5v-36q0 -14 9 -23t23 -9h192q14 0 23 9t9 23q0 19 21.5 49.5t54.5 49.5q32 18 49 28.5t46 35t44.5 48t28 60.5t12.5 81q0 88 -55.5 163t-138.5 116t-170 41q-243 0 -371 -213zM640 160q0 -14 9 -23t23 -9 h192q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-192z" />
+<glyph unicode="&#xf05a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM512 160q0 -14 9 -23t23 -9h448q14 0 23 9t9 23v160q0 14 -9 23t-23 9h-96v512q0 14 -9 23t-23 9h-320 q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23t23 -9h96v-320h-96q-14 0 -23 -9t-9 -23v-160zM640 1056q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v160q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-160z" />
+<glyph unicode="&#xf05b;" d="M0 576v128q0 26 19 45t45 19h143q37 161 154.5 278.5t278.5 154.5v143q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-143q161 -37 278.5 -154.5t154.5 -278.5h143q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-143q-37 -161 -154.5 -278.5t-278.5 -154.5v-143 q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v143q-161 37 -278.5 154.5t-154.5 278.5h-143q-26 0 -45 19t-19 45zM339 512q32 -108 112.5 -188.5t188.5 -112.5v109q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-109q108 32 188.5 112.5t112.5 188.5h-109q-26 0 -45 19 t-19 45v128q0 26 19 45t45 19h109q-32 108 -112.5 188.5t-188.5 112.5v-109q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v109q-108 -32 -188.5 -112.5t-112.5 -188.5h109q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-109z" />
+<glyph unicode="&#xf05c;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM429 480q0 13 10 23l137 137l-137 137q-10 10 -10 23t10 23l146 146q10 10 23 10t23 -10l137 -137l137 137q10 10 23 10t23 -10l146 -146q10 -10 10 -23t-10 -23l-137 -137l137 -137q10 -10 10 -23t-10 -23l-146 -146q-10 -10 -23 -10t-23 10l-137 137 l-137 -137q-10 -10 -23 -10t-23 10l-146 146q-10 10 -10 23z" />
+<glyph unicode="&#xf05d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM346 640q0 26 19 45l102 102q19 19 45 19t45 -19l147 -147l275 275q19 19 45 19t45 -19l102 -102q19 -19 19 -45t-19 -45l-422 -422q-19 -19 -45 -19t-45 19l-294 294q-19 19 -19 45z" />
+<glyph unicode="&#xf05e;" d="M0 643q0 157 61 299.5t163.5 245.5t245 164t298.5 61t298.5 -61t245 -164t163.5 -245.5t61 -299.5t-61 -300t-163.5 -246t-245 -164t-298.5 -61t-298.5 61t-245 164t-163.5 246t-61 300zM224 643q0 -162 89 -299l755 754q-135 91 -300 91q-148 0 -273 -73t-198 -199 t-73 -274zM471 185q137 -89 297 -89q111 0 211.5 43.5t173.5 116.5t116 174.5t43 212.5q0 161 -87 295z" />
+<glyph unicode="&#xf060;" d="M64 576q0 52 37 91l651 650q38 38 91 38q52 0 90 -38l75 -74q38 -38 38 -91t-38 -91l-293 -293h704q52 0 84.5 -37.5t32.5 -90.5v-128q0 -53 -32.5 -90.5t-84.5 -37.5h-704l293 -294q38 -36 38 -90t-38 -90l-75 -76q-37 -37 -90 -37q-52 0 -91 37l-651 652q-37 37 -37 90 z" />
+<glyph unicode="&#xf061;" d="M0 512v128q0 53 32.5 90.5t84.5 37.5h704l-293 294q-38 36 -38 90t38 90l75 75q38 38 90 38q53 0 91 -38l651 -651q37 -35 37 -90q0 -54 -37 -91l-651 -651q-39 -37 -91 -37q-51 0 -90 37l-75 75q-38 38 -38 91t38 91l293 293h-704q-52 0 -84.5 37.5t-32.5 90.5z" />
+<glyph unicode="&#xf062;" horiz-adv-x="1664" d="M53 565q0 53 38 91l651 651q35 37 90 37q54 0 91 -37l651 -651q37 -39 37 -91q0 -51 -37 -90l-75 -75q-38 -38 -91 -38q-54 0 -90 38l-294 293v-704q0 -52 -37.5 -84.5t-90.5 -32.5h-128q-53 0 -90.5 32.5t-37.5 84.5v704l-294 -293q-36 -38 -90 -38t-90 38l-75 75 q-38 38 -38 90z" />
+<glyph unicode="&#xf063;" horiz-adv-x="1664" d="M53 704q0 53 38 91l74 75q39 37 91 37q53 0 90 -37l294 -294v704q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-704l294 294q37 37 90 37q52 0 91 -37l75 -75q37 -39 37 -91q0 -53 -37 -90l-651 -652q-39 -37 -91 -37q-53 0 -90 37l-651 652q-38 36 -38 90z" />
+<glyph unicode="&#xf064;" horiz-adv-x="1792" d="M0 416q0 199 53 333q162 403 875 403h224v256q0 26 19 45t45 19t45 -19l512 -512q19 -19 19 -45t-19 -45l-512 -512q-19 -19 -45 -19t-45 19t-19 45v256h-224q-98 0 -175.5 -6t-154 -21.5t-133 -42.5t-105.5 -69.5t-80 -101t-48.5 -138.5t-17.5 -181q0 -55 5 -123 q0 -6 2.5 -23.5t2.5 -26.5q0 -15 -8.5 -25t-23.5 -10q-16 0 -28 17q-7 9 -13 22t-13.5 30t-10.5 24q-127 285 -127 451z" />
+<glyph unicode="&#xf065;" d="M0 -64v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10t23 -10l114 -114q10 -10 10 -23t-10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45zM781 800q0 13 10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448 q26 0 45 -19t19 -45v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23z" />
+<glyph unicode="&#xf066;" d="M13 32q0 13 10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448q26 0 45 -19t19 -45v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23zM768 704v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10 t23 -10l114 -114q10 -10 10 -23t-10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf067;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h416v416q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-416h416q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-416v-416q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v416h-416q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf068;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h1216q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-1216q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf069;" horiz-adv-x="1664" d="M122.5 408.5q13.5 51.5 59.5 77.5l266 154l-266 154q-46 26 -59.5 77.5t12.5 97.5l64 110q26 46 77.5 59.5t97.5 -12.5l266 -153v307q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-307l266 153q46 26 97.5 12.5t77.5 -59.5l64 -110q26 -46 12.5 -97.5t-59.5 -77.5 l-266 -154l266 -154q46 -26 59.5 -77.5t-12.5 -97.5l-64 -110q-26 -46 -77.5 -59.5t-97.5 12.5l-266 153v-307q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v307l-266 -153q-46 -26 -97.5 -12.5t-77.5 59.5l-64 110q-26 46 -12.5 97.5z" />
+<glyph unicode="&#xf06a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM624 1126l17 -621q0 -10 10 -17.5t24 -7.5h185q14 0 23.5 7.5t10.5 17.5l18 621q0 12 -10 18 q-10 8 -24 8h-220q-14 0 -24 -8q-10 -6 -10 -18zM640 161q0 -13 10 -23t23 -10h192q13 0 22 9.5t9 23.5v190q0 14 -9 23.5t-22 9.5h-192q-13 0 -23 -10t-10 -23v-190z" />
+<glyph unicode="&#xf06b;" d="M0 544v320q0 14 9 23t23 9h440q-93 0 -158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5q107 0 168 -77l128 -165l128 165q61 77 168 77q93 0 158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5h440q14 0 23 -9t9 -23v-320q0 -14 -9 -23t-23 -9h-96v-416q0 -40 -28 -68 t-68 -28h-1088q-40 0 -68 28t-28 68v416h-96q-14 0 -23 9t-9 23zM376 1120q0 -40 28 -68t68 -28h195l-126 161q-26 31 -69 31q-40 0 -68 -28t-28 -68zM608 180q0 -25 18 -38.5t46 -13.5h192q28 0 46 13.5t18 38.5v56v468v192h-320v-192v-468v-56zM870 1024h194q40 0 68 28 t28 68t-28 68t-68 28q-43 0 -69 -31z" />
+<glyph unicode="&#xf06c;" horiz-adv-x="1792" d="M0 121q0 35 31 73.5t68 65.5t68 56t31 48q0 4 -14 38t-16 44q-9 51 -9 104q0 115 43.5 220t119 184.5t170.5 139t204 95.5q55 18 145 25.5t179.5 9t178.5 6t163.5 24t113.5 56.5l29.5 29.5t29.5 28t27 20t36.5 16t43.5 4.5q39 0 70.5 -46t47.5 -112t24 -124t8 -96 q0 -95 -20 -193q-46 -224 -184.5 -383t-357.5 -268q-214 -108 -438 -108q-148 0 -286 47q-15 5 -88 42t-96 37q-16 0 -39.5 -32t-45 -70t-52.5 -70t-60 -32q-30 0 -51 11t-31 24t-27 42q-2 4 -6 11t-5.5 10t-3 9.5t-1.5 13.5zM384 448q0 -26 19 -45t45 -19q24 0 45 19 q27 24 74 71t67 66q137 124 268.5 176t313.5 52q26 0 45 19t19 45t-19 45t-45 19q-172 0 -318 -49.5t-259.5 -134t-235.5 -219.5q-19 -21 -19 -45z" />
+<glyph unicode="&#xf06d;" horiz-adv-x="1408" d="M0 -160q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v64zM256 640q0 78 24.5 144t64 112.5t87.5 88t96 77.5t87.5 72t64 81.5t24.5 96.5q0 94 -66 224l3 -1l-1 1q90 -41 160 -83t138.5 -100 t113.5 -122.5t72.5 -150.5t27.5 -184q0 -78 -24.5 -144t-64 -112.5t-87.5 -88t-96 -77.5t-87.5 -72t-64 -81.5t-24.5 -96.5q0 -96 67 -224l-4 1l1 -1q-90 41 -160 83t-138.5 100t-113.5 122.5t-72.5 150.5t-27.5 184z" />
+<glyph unicode="&#xf06e;" horiz-adv-x="1792" d="M0 576q0 34 20 69q140 229 376.5 368t499.5 139t499.5 -139t376.5 -368q20 -35 20 -69t-20 -69q-140 -230 -376.5 -368.5t-499.5 -138.5t-499.5 139t-376.5 368q-20 35 -20 69zM128 576q133 -205 333.5 -326.5t434.5 -121.5t434.5 121.5t333.5 326.5q-152 236 -381 353 q61 -104 61 -225q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 121 61 225q-229 -117 -381 -353zM592 704q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34t-14 34t-34 14q-125 0 -214.5 -89.5t-89.5 -214.5z" />
+<glyph unicode="&#xf070;" horiz-adv-x="1792" d="M0 576q0 38 20 69q153 235 380 371t496 136q89 0 180 -17l54 97q10 16 28 16q5 0 18 -6t31 -15.5t33 -18.5t31.5 -18.5t19.5 -11.5q16 -10 16 -27q0 -7 -1 -9q-105 -188 -315 -566t-316 -567l-49 -89q-10 -16 -28 -16q-12 0 -134 70q-16 10 -16 28q0 12 44 87 q-143 65 -263.5 173t-208.5 245q-20 31 -20 69zM128 576q167 -258 427 -375l78 141q-87 63 -136 159t-49 203q0 121 61 225q-229 -117 -381 -353zM592 704q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34t-14 34t-34 14q-125 0 -214.5 -89.5 t-89.5 -214.5zM896 0l74 132q212 18 392.5 137t301.5 307q-115 179 -282 294l63 112q95 -64 182.5 -153t144.5 -184q20 -34 20 -69t-20 -69q-39 -64 -109 -145q-150 -172 -347.5 -267t-419.5 -95zM1056 286l280 502q8 -45 8 -84q0 -139 -79 -253.5t-209 -164.5z" />
+<glyph unicode="&#xf071;" horiz-adv-x="1792" d="M16 61l768 1408q17 31 47 49t65 18t65 -18t47 -49l768 -1408q35 -63 -2 -126q-17 -29 -46.5 -46t-63.5 -17h-1536q-34 0 -63.5 17t-46.5 46q-37 63 -2 126zM752 992l17 -457q0 -10 10 -16.5t24 -6.5h185q14 0 23.5 6.5t10.5 16.5l18 459q0 12 -10 19q-13 11 -24 11h-220 q-11 0 -24 -11q-10 -7 -10 -21zM768 161q0 -14 9.5 -23.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 23.5v190q0 14 -9.5 23.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -23.5v-190z" />
+<glyph unicode="&#xf072;" horiz-adv-x="1408" d="M0 477q-1 13 9 25l96 97q9 9 23 9q6 0 8 -1l194 -53l259 259l-508 279q-14 8 -17 24q-2 16 9 27l128 128q14 13 30 8l665 -159l160 160q76 76 172 108t148 -12q44 -52 12 -148t-108 -172l-161 -161l160 -696q5 -19 -12 -33l-128 -96q-7 -6 -19 -6q-4 0 -7 1q-15 3 -21 16 l-279 508l-259 -259l53 -194q5 -17 -8 -31l-96 -96q-9 -9 -23 -9h-2q-15 2 -24 13l-189 252l-252 189q-11 7 -13 23z" />
+<glyph unicode="&#xf073;" horiz-adv-x="1664" d="M0 -128v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90zM128 -128h288v288h-288v-288zM128 224 h288v320h-288v-320zM128 608h288v288h-288v-288zM384 1088q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288zM480 -128h320v288h-320v-288zM480 224h320v320h-320v-320zM480 608h320v288h-320 v-288zM864 -128h320v288h-320v-288zM864 224h320v320h-320v-320zM864 608h320v288h-320v-288zM1152 1088q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288zM1248 -128h288v288h-288v-288z M1248 224h288v320h-288v-320zM1248 608h288v288h-288v-288z" />
+<glyph unicode="&#xf074;" horiz-adv-x="1792" d="M0 160v192q0 14 9 23t23 9h224q48 0 87 15t69 45t51 61.5t45 77.5q32 62 78 171q29 66 49.5 111t54 105t64 100t74 83t90 68.5t106.5 42t128 16.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192 h-256q-48 0 -87 -15t-69 -45t-51 -61.5t-45 -77.5q-32 -62 -78 -171q-29 -66 -49.5 -111t-54 -105t-64 -100t-74 -83t-90 -68.5t-106.5 -42t-128 -16.5h-224q-14 0 -23 9t-9 23zM0 1056v192q0 14 9 23t23 9h224q250 0 410 -225q-60 -92 -137 -273q-22 45 -37 72.5 t-40.5 63.5t-51 56.5t-63 35t-81.5 14.5h-224q-14 0 -23 9t-9 23zM743 353q59 93 136 273q22 -45 37 -72.5t40.5 -63.5t51 -56.5t63 -35t81.5 -14.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192 q-32 0 -85 -0.5t-81 -1t-73 1t-71 5t-64 10.5t-63 18.5t-58 28.5t-59 40t-55 53.5t-56 69.5z" />
+<glyph unicode="&#xf075;" horiz-adv-x="1792" d="M0 640q0 130 71 248.5t191 204.5t286 136.5t348 50.5q244 0 450 -85.5t326 -233t120 -321.5t-120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22q-17 -2 -30.5 9t-17.5 29v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5 t34.5 38t31 39.5t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281z" />
+<glyph unicode="&#xf076;" d="M0 576v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -52 23.5 -90t53.5 -57t71 -30t64 -13t44 -2t44 2t64 13t71 30t53.5 57t23.5 90v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -201 -98.5 -362t-274 -251.5t-395.5 -90.5t-395.5 90.5t-274 251.5 t-98.5 362zM0 960v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45zM1024 960v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf077;" horiz-adv-x="1792" d="M90 250.5q0 26.5 19 45.5l742 741q19 19 45 19t45 -19l742 -741q19 -19 19 -45.5t-19 -45.5l-166 -165q-19 -19 -45 -19t-45 19l-531 531l-531 -531q-19 -19 -45 -19t-45 19l-166 165q-19 19 -19 45.5z" />
+<glyph unicode="&#xf078;" horiz-adv-x="1792" d="M90 773.5q0 26.5 19 45.5l166 165q19 19 45 19t45 -19l531 -531l531 531q19 19 45 19t45 -19l166 -165q19 -19 19 -45.5t-19 -45.5l-742 -741q-19 -19 -45 -19t-45 19l-742 741q-19 19 -19 45.5z" />
+<glyph unicode="&#xf079;" horiz-adv-x="1920" d="M0 704q0 24 15 41l320 384q19 22 49 22t49 -22l320 -384q15 -17 15 -41q0 -26 -19 -45t-45 -19h-192v-384h576q16 0 25 -11l160 -192q7 -11 7 -21q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-8 0 -13.5 2t-9 7t-5.5 8t-3 11.5t-1 11.5v13v11v160v416h-192q-26 0 -45 19t-19 45z M640 1120q0 13 9.5 22.5t22.5 9.5h960q8 0 13.5 -2t9 -7t5.5 -8t3 -11.5t1 -11.5v-13v-11v-160v-416h192q26 0 45 -19t19 -45q0 -24 -15 -41l-320 -384q-20 -23 -49 -23t-49 23l-320 384q-15 17 -15 41q0 26 19 45t45 19h192v384h-576q-16 0 -25 12l-160 192q-7 9 -7 20z " />
+<glyph unicode="&#xf07a;" horiz-adv-x="1664" d="M0 1216q0 26 19 45t45 19h256q16 0 28.5 -6.5t20 -15.5t13 -24.5t7.5 -26.5t5.5 -29.5t4.5 -25.5h1201q26 0 45 -19t19 -45v-512q0 -24 -16 -42.5t-41 -21.5l-1044 -122q1 -7 4.5 -21.5t6 -26.5t2.5 -22q0 -16 -24 -64h920q26 0 45 -19t19 -45t-19 -45t-45 -19h-1024 q-26 0 -45 19t-19 45q0 14 11 39.5t29.5 59.5t20.5 38l-177 823h-204q-26 0 -45 19t-19 45zM384 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM1280 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5 t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf07b;" horiz-adv-x="1664" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158z" />
+<glyph unicode="&#xf07c;" horiz-adv-x="1920" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h-832q-94 0 -197 -47.5t-164 -119.5l-337 -396l-5 -6q0 4 -0.5 12.5t-0.5 12.5zM73 56q0 31 31 66l336 396q43 51 120.5 86.5t143.5 35.5h1088q34 0 60.5 -13t26.5 -43 q0 -31 -31 -66l-336 -396q-43 -51 -120.5 -86.5t-143.5 -35.5h-1088q-34 0 -60.5 13t-26.5 43z" />
+<glyph unicode="&#xf07d;" horiz-adv-x="768" d="M64 64q0 26 19 45t45 19h128v1024h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-1024h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf07e;" horiz-adv-x="1792" d="M0 640q0 26 19 45l256 256q19 19 45 19t45 -19t19 -45v-128h1024v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19t-19 45v128h-1024v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf080;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216z M256 128v384h256v-384h-256zM640 128v896h256v-896h-256zM1024 128v640h256v-640h-256zM1408 128v1024h256v-1024h-256z" />
+<glyph unicode="&#xf081;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 286q148 -94 322 -94q112 0 210 35.5t168 95t120.5 137t75 162t24.5 168.5q0 18 -1 27q63 45 105 109 q-56 -25 -121 -34q68 40 93 117q-65 -38 -134 -51q-61 66 -153 66q-87 0 -148.5 -61.5t-61.5 -148.5q0 -29 5 -48q-129 7 -242 65t-192 155q-29 -50 -29 -106q0 -114 91 -175q-47 1 -100 26v-2q0 -75 50 -133.5t123 -72.5q-29 -8 -51 -8q-13 0 -39 4q21 -63 74.5 -104 t121.5 -42q-116 -90 -261 -90q-26 0 -50 3z" />
+<glyph unicode="&#xf082;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-192v608h203l30 224h-233v143q0 54 28 83t96 29l132 1v207q-96 9 -180 9q-136 0 -218 -80.5t-82 -225.5v-166h-224v-224h224v-608h-544 q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf083;" horiz-adv-x="1792" d="M0 0v1280q0 53 37.5 90.5t90.5 37.5h1536q53 0 90.5 -37.5t37.5 -90.5v-1280q0 -53 -37.5 -90.5t-90.5 -37.5h-1536q-53 0 -90.5 37.5t-37.5 90.5zM128 0h1536v128h-1536v-128zM128 1024h1536v118v138h-828l-64 -128h-644v-128zM256 1216h384v128h-384v-128zM512 574 q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM640 574q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM736 576q0 -14 9 -23t23 -9t23 9t9 23q0 40 28 68t68 28q14 0 23 9 t9 23t-9 23t-23 9q-66 0 -113 -47t-47 -113z" />
+<glyph unicode="&#xf084;" horiz-adv-x="1792" d="M0 752q0 160 95 313t248 248t313 95q163 0 265.5 -102.5t102.5 -265.5q0 -189 -131 -365l355 -355l96 96q-3 3 -26 24.5t-40 38.5t-33 36.5t-16 28.5q0 17 49 66t66 49q13 0 23 -10q6 -6 46 -44.5t82 -79.5t86.5 -86t73 -78t28.5 -41q0 -17 -49 -66t-66 -49 q-9 0 -28.5 16t-36.5 33t-38.5 40t-24.5 26l-96 -96l220 -220q28 -28 28 -68q0 -42 -39 -81t-81 -39q-40 0 -68 28l-671 671q-176 -131 -365 -131q-163 0 -265.5 102.5t-102.5 265.5zM192 768q0 -80 56 -136t136 -56t136 56t56 136q0 42 -19 83q41 -19 83 -19q80 0 136 56 t56 136t-56 136t-136 56t-136 -56t-56 -136q0 -42 19 -83q-41 19 -83 19q-80 0 -136 -56t-56 -136z" />
+<glyph unicode="&#xf085;" horiz-adv-x="1920" d="M0 549v185q0 10 7 19.5t16 10.5l155 24q11 35 32 76q-34 48 -90 115q-7 11 -7 20q0 12 7 20q22 30 82 89t79 59q11 0 21 -7l115 -90q34 18 77 32q11 108 23 154q7 24 30 24h186q11 0 20 -7.5t10 -17.5l23 -153q34 -10 75 -31l118 89q8 7 20 7q11 0 21 -8 q144 -133 144 -160q0 -9 -7 -19q-12 -16 -42 -54t-45 -60q23 -48 34 -82l152 -23q10 -2 17 -10.5t7 -19.5v-185q0 -10 -7 -19.5t-16 -10.5l-155 -24q-11 -35 -32 -76q34 -48 90 -115q7 -10 7 -20q0 -12 -7 -19q-23 -30 -82.5 -89.5t-78.5 -59.5q-11 0 -21 7l-115 90 q-37 -19 -77 -31q-11 -108 -23 -155q-7 -24 -30 -24h-186q-11 0 -20 7.5t-10 17.5l-23 153q-34 10 -75 31l-118 -89q-7 -7 -20 -7q-11 0 -21 8q-144 133 -144 160q0 9 7 19q10 14 41 53t47 61q-23 44 -35 82l-152 24q-10 1 -17 9.5t-7 19.5zM384 640q0 -106 75 -181t181 -75 t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181zM1152 58v140q0 16 149 31q13 29 30 52q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31 v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31zM1152 1082v140q0 16 149 31q13 29 30 52 q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71 q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31zM1408 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5q0 52 -38 90t-90 38t-90 -38t-38 -90zM1408 1152q0 -53 37.5 -90.5 t90.5 -37.5t90.5 37.5t37.5 90.5q0 52 -38 90t-90 38t-90 -38t-38 -90z" />
+<glyph unicode="&#xf086;" horiz-adv-x="1792" d="M0 768q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257t-94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25 t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224zM616 132q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5 t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132z" />
+<glyph unicode="&#xf087;" d="M0 128v640q0 53 37.5 90.5t90.5 37.5h274q36 24 137 155q58 75 107 128q24 25 35.5 85.5t30.5 126.5t62 108q39 37 90 37q84 0 151 -32.5t102 -101.5t35 -186q0 -93 -48 -192h176q104 0 180 -76t76 -179q0 -89 -49 -163q9 -33 9 -69q0 -77 -38 -144q3 -21 3 -43 q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5h-36h-93q-96 0 -189.5 22.5t-216.5 65.5q-116 40 -138 40h-288q-53 0 -90.5 37.5t-37.5 90.5zM128 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 128h32q13 0 31.5 -3t33 -6.5t38 -11t35 -11.5 t35.5 -12.5t29 -10.5q211 -73 342 -73h121q192 0 192 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5q32 1 53.5 47t21.5 81q0 51 -39 89.5t-89 38.5h-352q0 58 48 159.5t48 160.5q0 98 -32 145t-128 47q-26 -26 -38 -85 t-30.5 -125.5t-59.5 -109.5q-22 -23 -77 -91q-4 -5 -23 -30t-31.5 -41t-34.5 -42.5t-40 -44t-38.5 -35.5t-40 -27t-35.5 -9h-32v-640z" />
+<glyph unicode="&#xf088;" d="M0 512v640q0 53 37.5 90.5t90.5 37.5h288q22 0 138 40q128 44 223 66t200 22h112q140 0 226.5 -79t85.5 -216v-5q60 -77 60 -178q0 -22 -3 -43q38 -67 38 -144q0 -36 -9 -69q49 -74 49 -163q0 -103 -76 -179t-180 -76h-176q48 -99 48 -192q0 -118 -35 -186 q-35 -69 -102 -101.5t-151 -32.5q-51 0 -90 37q-34 33 -54 82t-25.5 90.5t-17.5 84.5t-31 64q-48 50 -107 127q-101 131 -137 155h-274q-53 0 -90.5 37.5t-37.5 90.5zM128 1088q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 512h32q16 0 35.5 -9 t40 -27t38.5 -35.5t40 -44t34.5 -42.5t31.5 -41t23 -30q55 -68 77 -91q41 -43 59.5 -109.5t30.5 -125.5t38 -85q96 0 128 47t32 145q0 59 -48 160.5t-48 159.5h352q50 0 89 38.5t39 89.5q0 35 -21.5 81t-53.5 47q15 17 25 47.5t10 55.5q0 69 -53 119q18 32 18 69t-17.5 73.5 t-47.5 52.5q5 30 5 56q0 85 -49 126t-136 41h-128q-131 0 -342 -73q-5 -2 -29 -10.5t-35.5 -12.5t-35 -11.5t-38 -11t-33 -6.5t-31.5 -3h-32v-640z" />
+<glyph unicode="&#xf089;" horiz-adv-x="896" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41v-1339l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354q-25 27 -25 48z" />
+<glyph unicode="&#xf08a;" horiz-adv-x="1792" d="M0 940q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138z M128 940q0 -168 187 -355l581 -560l580 559q188 188 188 356q0 81 -21.5 143t-55 98.5t-81.5 59.5t-94 31t-98 8t-112 -25.5t-110.5 -64t-86.5 -72t-60 -61.5q-18 -22 -49 -22t-49 22q-24 28 -60 61.5t-86.5 72t-110.5 64t-112 25.5t-98 -8t-94 -31t-81.5 -59.5t-55 -98.5 t-21.5 -143z" />
+<glyph unicode="&#xf08b;" horiz-adv-x="1664" d="M0 288v704q0 119 84.5 203.5t203.5 84.5h320q13 0 22.5 -9.5t9.5 -22.5q0 -4 1 -20t0.5 -26.5t-3 -23.5t-10 -19.5t-20.5 -6.5h-320q-66 0 -113 -47t-47 -113v-704q0 -66 47 -113t113 -47h288h11h13t11.5 -1t11.5 -3t8 -5.5t7 -9t2 -13.5q0 -4 1 -20t0.5 -26.5t-3 -23.5 t-10 -19.5t-20.5 -6.5h-320q-119 0 -203.5 84.5t-84.5 203.5zM384 448v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45t-19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf08c;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM223 1030q0 -51 35.5 -85.5t92.5 -34.5h1q59 0 95 34.5t36 85.5q-1 52 -36 86t-93 34t-94.5 -34t-36.5 -86z M237 122h231v694h-231v-694zM595 122h231v388q0 38 7 56q15 35 45 59.5t74 24.5q116 0 116 -157v-371h231v398q0 154 -73 233t-193 79q-136 0 -209 -117h2v101h-231q3 -66 0 -694z" />
+<glyph unicode="&#xf08d;" horiz-adv-x="1152" d="M0 320q0 123 78.5 221.5t177.5 98.5v512q-52 0 -90 38t-38 90t38 90t90 38h640q52 0 90 -38t38 -90t-38 -90t-90 -38v-512q99 0 177.5 -98.5t78.5 -221.5q0 -26 -19 -45t-45 -19h-429l-51 -483q-2 -12 -10.5 -20.5t-20.5 -8.5h-1q-27 0 -32 27l-76 485h-404q-26 0 -45 19 t-19 45zM416 672q0 -14 9 -23t23 -9t23 9t9 23v448q0 14 -9 23t-23 9t-23 -9t-9 -23v-448z" />
+<glyph unicode="&#xf08e;" horiz-adv-x="1792" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v320q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-320q0 -119 -84.5 -203.5t-203.5 -84.5h-832 q-119 0 -203.5 84.5t-84.5 203.5zM685 576q0 13 10 23l652 652l-176 176q-19 19 -19 45t19 45t45 19h512q26 0 45 -19t19 -45v-512q0 -26 -19 -45t-45 -19t-45 19l-176 176l-652 -652q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23z" />
+<glyph unicode="&#xf090;" d="M0 448v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45t-19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45zM894.5 78.5q0.5 10.5 3 23.5t10 19.5t20.5 6.5h320q66 0 113 47t47 113v704q0 66 -47 113 t-113 47h-288h-11h-13t-11.5 1t-11.5 3t-8 5.5t-7 9t-2 13.5q0 4 -1 20t-0.5 26.5t3 23.5t10 19.5t20.5 6.5h320q119 0 203.5 -84.5t84.5 -203.5v-704q0 -119 -84.5 -203.5t-203.5 -84.5h-320q-13 0 -22.5 9.5t-9.5 22.5q0 4 -1 20t-0.5 26.5z" />
+<glyph unicode="&#xf091;" horiz-adv-x="1664" d="M0 928v128q0 40 28 68t68 28h288v96q0 66 47 113t113 47h576q66 0 113 -47t47 -113v-96h288q40 0 68 -28t28 -68v-128q0 -71 -41.5 -143t-112 -130t-173 -97.5t-215.5 -44.5q-42 -54 -95 -95q-38 -34 -52.5 -72.5t-14.5 -89.5q0 -54 30.5 -91t97.5 -37q75 0 133.5 -45.5 t58.5 -114.5v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v64q0 69 58.5 114.5t133.5 45.5q67 0 97.5 37t30.5 91q0 51 -14.5 89.5t-52.5 72.5q-53 41 -95 95q-113 5 -215.5 44.5t-173 97.5t-112 130t-41.5 143zM128 928q0 -78 94.5 -162t235.5 -113q-74 162 -74 371 h-256v-96zM1206 653q141 29 235.5 113t94.5 162v96h-256q0 -209 -74 -371z" />
+<glyph unicode="&#xf092;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-224q-16 0 -24.5 1t-19.5 5t-16 14.5t-5 27.5v239q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204 q-28 9 -81 -11t-92 -44l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52 t-49.5 24l-20 3q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -103t0.5 -68q0 -22 -11 -33.5t-22 -13t-33 -1.5h-224q-119 0 -203.5 84.5t-84.5 203.5zM271 315q3 5 13 2 q10 -5 7 -12q-5 -7 -13 -2q-10 5 -7 12zM304 290q6 6 16 -3q9 -11 2 -16q-6 -7 -16 3q-9 11 -2 16zM335 233q-9 13 0 18q9 7 17 -6q9 -12 0 -19q-8 -6 -17 7zM370 206q8 9 20 -3q12 -11 4 -19q-8 -9 -20 3q-13 11 -4 19zM419 168q4 11 19 7q16 -5 13 -16q-4 -12 -19 -6 q-17 4 -13 15zM481 154q0 11 16 11q17 2 17 -11q0 -11 -16 -11q-17 -2 -17 11zM540 158q-2 12 14 15q16 2 18 -9q2 -10 -14 -14t-18 8z" />
+<glyph unicode="&#xf093;" horiz-adv-x="1664" d="M0 -32v320q0 40 28 68t68 28h427q21 -56 70.5 -92t110.5 -36h256q61 0 110.5 36t70.5 92h427q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68zM325 936q-17 39 14 69l448 448q18 19 45 19t45 -19l448 -448q31 -30 14 -69q-17 -40 -59 -40 h-256v-448q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v448h-256q-42 0 -59 40zM1152 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM1408 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf094;" d="M0 433q0 111 18 217.5t54.5 209.5t100.5 194t150 156q78 59 232 120q194 78 316 78q60 0 175.5 -24t173.5 -24q19 0 57 5t58 5q81 0 118 -50.5t37 -134.5q0 -23 -5 -68t-5 -68q0 -10 1 -18.5t3 -17t4 -13.5t6.5 -16t6.5 -17q16 -40 25 -118.5t9 -136.5q0 -165 -70 -327.5 t-196 -288t-281 -180.5q-124 -44 -326 -44q-57 0 -170 14.5t-169 14.5q-24 0 -72.5 -14.5t-73.5 -14.5q-73 0 -123.5 55.5t-50.5 128.5q0 24 11 68t11 67q0 40 -12.5 120.5t-12.5 121.5zM128 434q0 -40 12.5 -120t12.5 -121q0 -23 -11 -66.5t-11 -65.5t12 -36.5t34 -14.5 q24 0 72.5 11t73.5 11q57 0 169.5 -15.5t169.5 -15.5q181 0 284 36q129 45 235.5 152.5t166 245.5t59.5 275q0 44 -7 113.5t-18 96.5q-12 30 -17 44t-9 36.5t-4 48.5q0 23 5 68.5t5 67.5q0 37 -10 55q-4 1 -13 1q-19 0 -58 -4.5t-59 -4.5q-60 0 -176 24t-175 24 q-43 0 -94.5 -11.5t-85 -23.5t-89.5 -34q-137 -54 -202 -103q-96 -73 -159.5 -189.5t-88 -236t-24.5 -248.5z" />
+<glyph unicode="&#xf095;" horiz-adv-x="1408" d="M0 1069q0 92 51 186q56 101 106 122q25 11 68.5 21t70.5 10q14 0 21 -3q18 -6 53 -76q11 -19 30 -54t35 -63.5t31 -53.5q3 -4 17.5 -25t21.5 -35.5t7 -28.5q0 -20 -28.5 -50t-62 -55t-62 -53t-28.5 -46q0 -9 5 -22.5t8.5 -20.5t14 -24t11.5 -19q76 -137 174 -235 t235 -174q2 -1 19 -11.5t24 -14t20.5 -8.5t22.5 -5q18 0 46 28.5t53 62t55 62t50 28.5q14 0 28.5 -7t35.5 -21.5t25 -17.5q25 -15 53.5 -31t63.5 -35t54 -30q70 -35 76 -53q3 -7 3 -21q0 -27 -10 -70.5t-21 -68.5q-21 -50 -122 -106q-94 -51 -186 -51q-27 0 -52.5 3.5 t-57.5 12.5t-47.5 14.5t-55.5 20.5t-49 18q-98 35 -175 83q-128 79 -264.5 215.5t-215.5 264.5q-48 77 -83 175q-3 9 -18 49t-20.5 55.5t-14.5 47.5t-12.5 57.5t-3.5 52.5z" />
+<glyph unicode="&#xf096;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832z" />
+<glyph unicode="&#xf097;" horiz-adv-x="1280" d="M0 7v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62zM128 38l423 406l89 85l89 -85l423 -406 v1242h-1024v-1242z" />
+<glyph unicode="&#xf098;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 905q0 -16 2.5 -34t5 -30.5t9 -33t10 -29.5t12.5 -33t11 -30q60 -164 216.5 -320.5t320.5 -216.5 q6 -2 30 -11t33 -12.5t29.5 -10t33 -9t30.5 -5t34 -2.5q57 0 130.5 34t94.5 80q22 53 22 101q0 11 -2 16q-3 8 -38.5 29.5t-88.5 49.5l-53 29q-5 3 -19 13t-25 15t-21 5q-18 0 -47 -32.5t-57 -65.5t-44 -33q-7 0 -16.5 3.5t-15.5 6.5t-17 9.5t-14 8.5q-99 55 -170.5 126.5 t-126.5 170.5q-2 3 -8.5 14t-9.5 17t-6.5 15.5t-3.5 16.5q0 13 20.5 33.5t45 38.5t45 39.5t20.5 36.5q0 10 -5 21t-15 25t-13 19q-3 6 -15 28.5t-25 45.5t-26.5 47.5t-25 40.5t-16.5 18t-16 2q-48 0 -101 -22q-46 -21 -80 -94.5t-34 -130.5z" />
+<glyph unicode="&#xf099;" horiz-adv-x="1664" d="M44 145q35 -4 78 -4q225 0 401 138q-105 2 -188 64.5t-114 159.5q33 -5 61 -5q43 0 85 11q-112 23 -185.5 111.5t-73.5 205.5v4q68 -38 146 -41q-66 44 -105 115t-39 154q0 88 44 163q121 -149 294.5 -238.5t371.5 -99.5q-8 38 -8 74q0 134 94.5 228.5t228.5 94.5 q140 0 236 -102q109 21 205 78q-37 -115 -142 -178q93 10 186 50q-67 -98 -162 -167q1 -14 1 -42q0 -130 -38 -259.5t-115.5 -248.5t-184.5 -210.5t-258 -146t-323 -54.5q-271 0 -496 145z" />
+<glyph unicode="&#xf09a;" horiz-adv-x="1024" d="M95 631v296h255v218q0 186 104 288.5t277 102.5q147 0 228 -12v-264h-157q-86 0 -116 -36t-30 -108v-189h293l-39 -296h-254v-759h-306v759h-255z" />
+<glyph unicode="&#xf09b;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5q0 -251 -146.5 -451.5t-378.5 -277.5q-27 -5 -39.5 7t-12.5 30v211q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204q-28 9 -81 -11t-92 -44 l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52t-49.5 24l-20 3 q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -89t0.5 -54q0 -18 -13 -30t-40 -7q-232 77 -378.5 277.5t-146.5 451.5z" />
+<glyph unicode="&#xf09c;" horiz-adv-x="1664" d="M0 96v576q0 40 28 68t68 28h672v192q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5v-256q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45v256q0 106 -75 181t-181 75t-181 -75t-75 -181v-192h96q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960 q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf09d;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v608h-1664v-608zM128 1024h1664v224q0 13 -9.5 22.5t-22.5 9.5h-1600 q-13 0 -22.5 -9.5t-9.5 -22.5v-224zM256 128v128h256v-128h-256zM640 128v128h384v-128h-384z" />
+<glyph unicode="&#xf09e;" horiz-adv-x="1408" d="M0 192q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 697v135q0 29 21 47q17 17 43 17h5q160 -13 306 -80.5t259 -181.5q114 -113 181.5 -259t80.5 -306q2 -28 -17 -48q-18 -21 -47 -21h-135q-25 0 -43 16.5t-20 41.5q-22 229 -184.5 391.5 t-391.5 184.5q-25 2 -41.5 20t-16.5 43zM0 1201v143q0 28 20 46q18 18 44 18h3q262 -13 501.5 -120t425.5 -294q187 -186 294 -425.5t120 -501.5q2 -27 -18 -47q-18 -20 -46 -20h-143q-26 0 -44.5 17.5t-19.5 42.5q-12 215 -101 408.5t-231.5 336t-336 231.5t-408.5 102 q-25 1 -42.5 19.5t-17.5 43.5z" />
+<glyph unicode="&#xf0a0;" d="M0 160v320q0 25 16 75l197 606q17 53 63 86t101 33h782q55 0 101 -33t63 -86l197 -606q16 -50 16 -75v-320q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113zM128 160q0 -13 9.5 -22.5t22.5 -9.5h1216q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-1216 q-13 0 -22.5 -9.5t-9.5 -22.5v-320zM178 640h1180l-157 482q-4 13 -16 21.5t-26 8.5h-782q-14 0 -26 -8.5t-16 -21.5zM880 320q0 33 23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5zM1136 320q0 33 23.5 56.5t56.5 23.5 t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5z" />
+<glyph unicode="&#xf0a1;" horiz-adv-x="1792" d="M0 672v192q0 66 47 113t113 47h480q435 0 896 384q52 0 90 -38t38 -90v-384q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5v-384q0 -52 -38 -90t-90 -38q-417 347 -812 380q-58 -19 -91 -66t-31 -100.5t40 -92.5q-20 -33 -23 -65.5t6 -58t33.5 -55t48 -50 t61.5 -50.5q-29 -58 -111.5 -83t-168.5 -11.5t-132 55.5q-7 23 -29.5 87.5t-32 94.5t-23 89t-15 101t3.5 98.5t22 110.5h-122q-66 0 -113 47t-47 113zM768 633q377 -42 768 -341v954q-394 -302 -768 -343v-270z" />
+<glyph unicode="&#xf0a2;" horiz-adv-x="1664" d="M0 128q190 161 287 397.5t97 498.5q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38 t-38 90zM183 128h1298q-164 181 -246.5 411.5t-82.5 484.5q0 256 -320 256t-320 -256q0 -254 -82.5 -484.5t-246.5 -411.5zM656 0q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16t-16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16z" />
+<glyph unicode="&#xf0a3;" d="M2 435q-10 42 20 70l138 135l-138 135q-30 28 -20 70q12 41 52 51l188 48l-53 186q-12 41 19 70q29 31 70 19l186 -53l48 188q10 41 51 51q41 12 70 -19l135 -139l135 139q29 30 70 19q41 -10 51 -51l48 -188l186 53q41 12 70 -19q31 -29 19 -70l-53 -186l188 -48 q40 -10 52 -51q10 -42 -20 -70l-138 -135l138 -135q30 -28 20 -70q-12 -41 -52 -51l-188 -48l53 -186q12 -41 -19 -70q-29 -31 -70 -19l-186 53l-48 -188q-10 -40 -51 -52q-12 -2 -19 -2q-31 0 -51 22l-135 138l-135 -138q-28 -30 -70 -20q-41 11 -51 52l-48 188l-186 -53 q-41 -12 -70 19q-31 29 -19 70l53 186l-188 48q-40 10 -52 51z" />
+<glyph unicode="&#xf0a4;" horiz-adv-x="1792" d="M0 128v640q0 53 37.5 90.5t90.5 37.5h288q10 0 21.5 4.5t23.5 14t22.5 18t24 22.5t20.5 21.5t19 21.5t14 17q65 74 100 129q13 21 33 62t37 72t40.5 63t55 49.5t69.5 17.5q125 0 206.5 -67t81.5 -189q0 -68 -22 -128h374q104 0 180 -76t76 -179q0 -105 -75.5 -181 t-180.5 -76h-169q-4 -62 -37 -119q3 -21 3 -43q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5q-133 0 -322 69q-164 59 -223 59h-288q-53 0 -90.5 37.5t-37.5 90.5zM128 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 128h32q72 0 167 -32 t193.5 -64t179.5 -32q189 0 189 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5h331q52 0 90 38t38 90q0 51 -39 89.5t-89 38.5h-576q0 20 15 48.5t33 55t33 68t15 84.5q0 67 -44.5 97.5t-115.5 30.5q-24 0 -90 -139 q-24 -44 -37 -65q-40 -64 -112 -145q-71 -81 -101 -106q-69 -57 -140 -57h-32v-640z" />
+<glyph unicode="&#xf0a5;" horiz-adv-x="1792" d="M0 769q0 103 76 179t180 76h374q-22 60 -22 128q0 122 81.5 189t206.5 67q38 0 69.5 -17.5t55 -49.5t40.5 -63t37 -72t33 -62q35 -55 100 -129q2 -3 14 -17t19 -21.5t20.5 -21.5t24 -22.5t22.5 -18t23.5 -14t21.5 -4.5h288q53 0 90.5 -37.5t37.5 -90.5v-640 q0 -53 -37.5 -90.5t-90.5 -37.5h-288q-59 0 -223 -59q-190 -69 -317 -69q-142 0 -230 77.5t-87 217.5l1 5q-61 76 -61 178q0 22 3 43q-33 57 -37 119h-169q-105 0 -180.5 76t-75.5 181zM128 768q0 -52 38 -90t90 -38h331q-15 -17 -25 -47.5t-10 -55.5q0 -69 53 -119 q-18 -32 -18 -69t17.5 -73.5t47.5 -52.5q-4 -24 -4 -56q0 -85 48.5 -126t135.5 -41q84 0 183 32t194 64t167 32h32v640h-32q-35 0 -67.5 12t-62.5 37t-50 46t-49 54q-2 3 -3.5 4.5t-4 4.5t-4.5 5q-72 81 -112 145q-14 22 -38 68q-1 3 -10.5 22.5t-18.5 36t-20 35.5 t-21.5 30.5t-18.5 11.5q-71 0 -115.5 -30.5t-44.5 -97.5q0 -43 15 -84.5t33 -68t33 -55t15 -48.5h-576q-50 0 -89 -38.5t-39 -89.5zM1536 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a6;" d="M0 640q0 125 67 206.5t189 81.5q68 0 128 -22v374q0 104 76 180t179 76q105 0 181 -75.5t76 -180.5v-169q62 -4 119 -37q21 3 43 3q101 0 178 -60q139 1 219.5 -85t80.5 -227q0 -133 -69 -322q-59 -164 -59 -223v-288q0 -53 -37.5 -90.5t-90.5 -37.5h-640 q-53 0 -90.5 37.5t-37.5 90.5v288q0 10 -4.5 21.5t-14 23.5t-18 22.5t-22.5 24t-21.5 20.5t-21.5 19t-17 14q-74 65 -129 100q-21 13 -62 33t-72 37t-63 40.5t-49.5 55t-17.5 69.5zM128 640q0 -24 139 -90q44 -24 65 -37q64 -40 145 -112q81 -71 106 -101q57 -69 57 -140 v-32h640v32q0 72 32 167t64 193.5t32 179.5q0 189 -167 189q-26 0 -56 -5q-16 30 -52.5 47.5t-73.5 17.5t-69 -18q-50 53 -119 53q-25 0 -55.5 -10t-47.5 -25v331q0 52 -38 90t-90 38q-51 0 -89.5 -39t-38.5 -89v-576q-20 0 -48.5 15t-55 33t-68 33t-84.5 15 q-67 0 -97.5 -44.5t-30.5 -115.5zM1152 -64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a7;" d="M0 640q0 38 17.5 69.5t49.5 55t63 40.5t72 37t62 33q55 35 129 100q3 2 17 14t21.5 19t21.5 20.5t22.5 24t18 22.5t14 23.5t4.5 21.5v288q0 53 37.5 90.5t90.5 37.5h640q53 0 90.5 -37.5t37.5 -90.5v-288q0 -59 59 -223q69 -190 69 -317q0 -142 -77.5 -230t-217.5 -87 l-5 1q-76 -61 -178 -61q-22 0 -43 3q-54 -30 -119 -37v-169q0 -105 -76 -180.5t-181 -75.5q-103 0 -179 76t-76 180v374q-54 -22 -128 -22q-121 0 -188.5 81.5t-67.5 206.5zM128 640q0 -71 30.5 -115.5t97.5 -44.5q43 0 84.5 15t68 33t55 33t48.5 15v-576q0 -50 38.5 -89 t89.5 -39q52 0 90 38t38 90v331q46 -35 103 -35q69 0 119 53q32 -18 69 -18t73.5 17.5t52.5 47.5q24 -4 56 -4q85 0 126 48.5t41 135.5q0 84 -32 183t-64 194t-32 167v32h-640v-32q0 -35 -12 -67.5t-37 -62.5t-46 -50t-54 -49q-9 -8 -14 -12q-81 -72 -145 -112 q-22 -14 -68 -38q-3 -1 -22.5 -10.5t-36 -18.5t-35.5 -20t-30.5 -21.5t-11.5 -18.5zM1152 1344q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a8;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM251 640q0 -27 18 -45l91 -91l362 -362q18 -18 45 -18t45 18l91 91q18 18 18 45t-18 45l-189 189h502 q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-502l189 189q19 19 19 45t-19 45l-91 91q-18 18 -45 18t-45 -18l-362 -362l-91 -91q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0a9;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM256 576q0 -26 19 -45t45 -19h502l-189 -189q-19 -19 -19 -45t19 -45l91 -91q18 -18 45 -18t45 18 l362 362l91 91q18 18 18 45t-18 45l-91 91l-362 362q-18 18 -45 18t-45 -18l-91 -91q-18 -18 -18 -45t18 -45l189 -189h-502q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf0aa;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 641q0 -27 18 -45l91 -91q18 -18 45 -18t45 18l189 189v-502q0 -26 19 -45t45 -19h128q26 0 45 19 t19 45v502l189 -189q19 -19 45 -19t45 19l91 91q18 18 18 45t-18 45l-362 362l-91 91q-18 18 -45 18t-45 -18l-91 -91l-362 -362q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0ab;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 639q0 -27 18 -45l362 -362l91 -91q18 -18 45 -18t45 18l91 91l362 362q18 18 18 45t-18 45l-91 91 q-18 18 -45 18t-45 -18l-189 -189v502q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-502l-189 189q-19 19 -45 19t-45 -19l-91 -91q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0ac;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM226 979q7 -7 12 -8q4 -1 5 -9t2.5 -11t11.5 3q9 -8 3 -19q1 1 44 -27q19 -17 21 -21q3 -11 -10 -18 q-1 2 -9 9t-9 4q-3 -5 0.5 -18.5t10.5 -12.5q-7 0 -9.5 -16t-2.5 -35.5t-1 -23.5l2 -1q-3 -12 5.5 -34.5t21.5 -19.5q-13 -3 20 -43q6 -8 8 -9q3 -2 12 -7.5t15 -10t10 -10.5q4 -5 10 -22.5t14 -23.5q-2 -6 9.5 -20t10.5 -23q-1 0 -2.5 -1t-2.5 -1q3 -7 15.5 -14t15.5 -13 q1 -3 2 -10t3 -11t8 -2q2 20 -24 62q-15 25 -17 29q-3 5 -5.5 15.5t-4.5 14.5q2 0 6 -1.5t8.5 -3.5t7.5 -4t2 -3q-3 -7 2 -17.5t12 -18.5t17 -19t12 -13q6 -6 14 -19.5t0 -13.5q9 0 20 -10t17 -20q5 -8 8 -26t5 -24q2 -7 8.5 -13.5t12.5 -9.5l16 -8t13 -7q5 -2 18.5 -10.5 t21.5 -11.5q10 -4 16 -4t14.5 2.5t13.5 3.5q15 2 29 -15t21 -21q36 -19 55 -11q-2 -1 0.5 -7.5t8 -15.5t9 -14.5t5.5 -8.5q5 -6 18 -15t18 -15q6 4 7 9q-3 -8 7 -20t18 -10q14 3 14 32q-31 -15 -49 18q0 1 -2.5 5.5t-4 8.5t-2.5 8.5t0 7.5t5 3q9 0 10 3.5t-2 12.5t-4 13 q-1 8 -11 20t-12 15q-5 -9 -16 -8t-16 9q0 -1 -1.5 -5.5t-1.5 -6.5q-13 0 -15 1q1 3 2.5 17.5t3.5 22.5q1 4 5.5 12t7.5 14.5t4 12.5t-4.5 9.5t-17.5 2.5q-19 -1 -26 -20q-1 -3 -3 -10.5t-5 -11.5t-9 -7q-7 -3 -24 -2t-24 5q-13 8 -22.5 29t-9.5 37q0 10 2.5 26.5t3 25 t-5.5 24.5q3 2 9 9.5t10 10.5q2 1 4.5 1.5t4.5 0t4 1.5t3 6q-1 1 -4 3q-3 3 -4 3q7 -3 28.5 1.5t27.5 -1.5q15 -11 22 2q0 1 -2.5 9.5t-0.5 13.5q5 -27 29 -9q3 -3 15.5 -5t17.5 -5q3 -2 7 -5.5t5.5 -4.5t5 0.5t8.5 6.5q10 -14 12 -24q11 -40 19 -44q7 -3 11 -2t4.5 9.5 t0 14t-1.5 12.5l-1 8v18l-1 8q-15 3 -18.5 12t1.5 18.5t15 18.5q1 1 8 3.5t15.5 6.5t12.5 8q21 19 15 35q7 0 11 9q-1 0 -5 3t-7.5 5t-4.5 2q9 5 2 16q5 3 7.5 11t7.5 10q9 -12 21 -2q7 8 1 16q5 7 20.5 10.5t18.5 9.5q7 -2 8 2t1 12t3 12q4 5 15 9t13 5l17 11q3 4 0 4 q18 -2 31 11q10 11 -6 20q3 6 -3 9.5t-15 5.5q3 1 11.5 0.5t10.5 1.5q15 10 -7 16q-17 5 -43 -12q-2 -1 -9.5 -9.5t-13.5 -9.5q2 0 4.5 5t5 11t3.5 7q6 7 22 15q14 6 52 12q34 8 51 -11q-2 2 9.5 13t14.5 12q3 2 15 4.5t15 7.5l2 22q-12 -1 -17.5 7t-6.5 21q0 -2 -6 -8 q0 7 -4.5 8t-11.5 -1t-9 -1q-10 3 -15 7.5t-8 16.5t-4 15q-2 5 -9.5 10.5t-9.5 10.5q-1 2 -2.5 5.5t-3 6.5t-4 5.5t-5.5 2.5t-7 -5t-7.5 -10t-4.5 -5q-3 2 -6 1.5t-4.5 -1t-4.5 -3t-5 -3.5q-3 -2 -8.5 -3t-8.5 -2q15 5 -1 11q-10 4 -16 3q9 4 7.5 12t-8.5 14h5 q-1 4 -8.5 8.5t-17.5 8.5t-13 6q-8 5 -34 9.5t-33 0.5q-5 -6 -4.5 -10.5t4 -14t3.5 -12.5q1 -6 -5.5 -13t-6.5 -12q0 -7 14 -15.5t10 -21.5q-3 -8 -16 -16t-16 -12q-5 -8 -1.5 -18.5t10.5 -16.5q2 -2 1.5 -4t-3.5 -4.5t-5.5 -4t-6.5 -3.5l-3 -2q-11 -5 -20.5 6t-13.5 26 q-7 25 -16 30q-23 8 -29 -1q-5 13 -41 26q-25 9 -58 4q6 1 0 15q-7 15 -19 12q3 6 4 17.5t1 13.5q3 13 12 23q1 1 7 8.5t9.5 13.5t0.5 6q35 -4 50 11q5 5 11.5 17t10.5 17q9 6 14 5.5t14.5 -5.5t14.5 -5q14 -1 15.5 11t-7.5 20q12 -1 3 17q-5 7 -8 9q-12 4 -27 -5 q-8 -4 2 -8q-1 1 -9.5 -10.5t-16.5 -17.5t-16 5q-1 1 -5.5 13.5t-9.5 13.5q-8 0 -16 -15q3 8 -11 15t-24 8q19 12 -8 27q-7 4 -20.5 5t-19.5 -4q-5 -7 -5.5 -11.5t5 -8t10.5 -5.5t11.5 -4t8.5 -3q14 -10 8 -14q-2 -1 -8.5 -3.5t-11.5 -4.5t-6 -4q-3 -4 0 -14t-2 -14 q-5 5 -9 17.5t-7 16.5q7 -9 -25 -6l-10 1q-4 0 -16 -2t-20.5 -1t-13.5 8q-4 8 0 20q1 4 4 2q-4 3 -11 9.5t-10 8.5q-46 -15 -94 -41q6 -1 12 1q5 2 13 6.5t10 5.5q34 14 42 7l5 5q14 -16 20 -25q-7 4 -30 1q-20 -6 -22 -12q7 -12 5 -18q-4 3 -11.5 10t-14.5 11t-15 5 q-16 0 -22 -1q-146 -80 -235 -222zM877 26q0 -6 2 -16q206 36 351 189q-3 3 -12.5 4.5t-12.5 3.5q-18 7 -24 8q1 7 -2.5 13t-8 9t-12.5 8t-11 7q-2 2 -7 6t-7 5.5t-7.5 4.5t-8.5 2t-10 -1l-3 -1q-3 -1 -5.5 -2.5t-5.5 -3t-4 -3t0 -2.5q-21 17 -36 22q-5 1 -11 5.5t-10.5 7 t-10 1.5t-11.5 -7q-5 -5 -6 -15t-2 -13q-7 5 0 17.5t2 18.5q-3 6 -10.5 4.5t-12 -4.5t-11.5 -8.5t-9 -6.5t-8.5 -5.5t-8.5 -7.5q-3 -4 -6 -12t-5 -11q-2 4 -11.5 6.5t-9.5 5.5q2 -10 4 -35t5 -38q7 -31 -12 -48q-27 -25 -29 -40q-4 -22 12 -26q0 -7 -8 -20.5t-7 -21.5z" />
+<glyph unicode="&#xf0ad;" horiz-adv-x="1664" d="M21 0q0 53 38 91l681 681q39 -98 114.5 -173.5t173.5 -114.5l-682 -682q-37 -37 -90 -37q-52 0 -91 37l-106 108q-38 36 -38 90zM256 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM768 960q0 185 131.5 316.5t316.5 131.5q58 0 121.5 -16.5 t107.5 -46.5q16 -11 16 -28t-16 -28l-293 -169v-224l193 -107q5 3 79 48.5t135.5 81t70.5 35.5q15 0 23.5 -10t8.5 -25q0 -39 -23 -106q-47 -134 -164.5 -217.5t-258.5 -83.5q-185 0 -316.5 131.5t-131.5 316.5z" />
+<glyph unicode="&#xf0ae;" horiz-adv-x="1792" d="M0 64v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 576v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 1088v256q0 26 19 45t45 19h1664 q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM640 640h1024v128h-1024v-128zM1024 128h640v128h-640v-128zM1280 1152h384v128h-384v-128z" />
+<glyph unicode="&#xf0b0;" horiz-adv-x="1408" d="M5 1241q17 39 59 39h1280q42 0 59 -39q17 -41 -14 -70l-493 -493v-742q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-256 256q-19 19 -19 45v486l-493 493q-31 29 -14 70z" />
+<glyph unicode="&#xf0b1;" horiz-adv-x="1792" d="M0 160v480h672v-160q0 -26 19 -45t45 -19h320q26 0 45 19t19 45v160h672v-480q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM0 736v384q0 66 47 113t113 47h352v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h352q66 0 113 -47t47 -113v-384h-1792z M640 1280h512v128h-512v-128zM768 512v128h256v-128h-256z" />
+<glyph unicode="&#xf0b2;" d="M0 -64v448q0 42 40 59q39 17 69 -14l144 -144l355 355l-355 355l-144 -144q-19 -19 -45 -19q-12 0 -24 5q-40 17 -40 59v448q0 26 19 45t45 19h448q42 0 59 -40q17 -39 -14 -69l-144 -144l355 -355l355 355l-144 144q-31 30 -14 69q17 40 59 40h448q26 0 45 -19t19 -45 v-448q0 -42 -39 -59q-13 -5 -25 -5q-26 0 -45 19l-144 144l-355 -355l355 -355l144 144q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l144 144l-355 355l-355 -355l144 -144q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19 t-19 45z" />
+<glyph unicode="&#xf0c0;" horiz-adv-x="1920" d="M0 671q0 353 124 353q6 0 43.5 -21t97.5 -42.5t119 -21.5q67 0 133 23q-5 -37 -5 -66q0 -139 81 -256q-162 -5 -265 -128h-134q-82 0 -138 40.5t-56 118.5zM128 1280q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM256 3q0 53 3.5 103.5 t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q10 0 43 -21.5t73 -48t107 -48t135 -21.5t135 21.5t107 48t73 48t43 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5q0 -120 -73 -189.5t-194 -69.5h-874q-121 0 -194 69.5t-73 189.5 zM576 896q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5zM1280 1280q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM1327 640q81 117 81 256q0 29 -5 66q66 -23 133 -23 q59 0 119 21.5t97.5 42.5t43.5 21q124 0 124 -353q0 -78 -56 -118.5t-138 -40.5h-134q-103 123 -265 128z" />
+<glyph unicode="&#xf0c1;" horiz-adv-x="1664" d="M16 1088q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l206 -207q83 -83 83 -203q0 -123 -88 -209l88 -88q86 88 208 88q120 0 204 -84l208 -208q84 -84 84 -204t-85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-206 207q-83 83 -83 203q0 123 88 209l-88 88 q-86 -88 -208 -88q-120 0 -204 84l-208 208q-84 84 -84 204zM208 1088q0 -40 28 -68l208 -208q27 -27 68 -27q42 0 72 31q-3 3 -19 18.5t-21.5 21.5t-15 19t-13 25.5t-3.5 27.5q0 40 28 68t68 28q15 0 27.5 -3.5t25.5 -13t19 -15t21.5 -21.5t18.5 -19q33 31 33 73 q0 40 -28 68l-206 207q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67zM911 383q0 -40 28 -68l206 -207q27 -27 68 -27q40 0 68 26l147 146q28 28 28 67q0 40 -28 68l-208 208q-28 28 -68 28q-42 0 -72 -32q3 -3 19 -18.5t21.5 -21.5t15 -19t13 -25.5t3.5 -27.5 q0 -40 -28 -68t-68 -28q-15 0 -27.5 3.5t-25.5 13t-19 15t-21.5 21.5t-18.5 19q-33 -31 -33 -73z" />
+<glyph unicode="&#xf0c2;" horiz-adv-x="1920" d="M0 448q0 132 71 241.5t187 163.5q-2 28 -2 43q0 212 150 362t362 150q158 0 286.5 -88t187.5 -230q70 62 166 62q106 0 181 -75t75 -181q0 -75 -41 -138q129 -30 213 -134.5t84 -239.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z" />
+<glyph unicode="&#xf0c3;" horiz-adv-x="1664" d="M115.5 -64.5q-34.5 63.5 21.5 152.5l503 793v399h-64q-26 0 -45 19t-19 45t19 45t45 19h512q26 0 45 -19t19 -45t-19 -45t-45 -19h-64v-399l503 -793q56 -89 21.5 -152.5t-140.5 -63.5h-1152q-106 0 -140.5 63.5zM476 384h712l-272 429l-20 31v37v399h-128v-399v-37 l-20 -31z" />
+<glyph unicode="&#xf0c4;" horiz-adv-x="1792" d="M1 157q7 76 56 147t131 124q132 84 278 84q83 0 151 -31q9 13 22 22l122 73l-122 73q-13 9 -22 22q-68 -31 -151 -31q-146 0 -278 84q-82 53 -131 124t-56 147q-5 59 15.5 113t63.5 93q85 79 222 79q145 0 277 -84q83 -52 132 -123t56 -148q4 -48 -10 -97q4 -1 12 -5 l110 -66l690 387q14 8 31 8q16 0 29 -7l128 -64q30 -16 35 -51q3 -36 -25 -56l-507 -398l507 -398q28 -20 25 -56q-5 -35 -35 -51l-128 -64q-13 -7 -29 -7q-17 0 -31 8l-690 387l-110 -66q-8 -4 -12 -5q14 -49 10 -97q-7 -77 -56 -147.5t-132 -123.5q-132 -84 -277 -84 q-136 0 -222 78q-90 84 -79 207zM168 176q-25 -66 21 -108q39 -36 113 -36q100 0 192 59q81 51 106 117t-21 108q-39 36 -113 36q-100 0 -192 -59q-81 -51 -106 -117zM168 976q25 -66 106 -117q92 -59 192 -59q74 0 113 36q46 42 21 108t-106 117q-92 59 -192 59 q-74 0 -113 -36q-46 -42 -21 -108zM672 448l9 -8q2 -2 7 -6q4 -4 11 -12t11 -12l26 -26l160 96l96 -32l736 576l-128 64l-768 -431v-113zM672 704l96 -58v11q0 36 33 56l14 8l-79 47l-26 -26q-3 -3 -10 -11t-12 -12q-2 -2 -4 -3.5t-3 -2.5zM896 576q0 26 19 45t45 19t45 -19 t19 -45t-19 -45t-45 -19t-45 19t-19 45zM1018 391l582 -327l128 64l-520 408l-177 -138q-2 -3 -13 -7z" />
+<glyph unicode="&#xf0c5;" horiz-adv-x="1792" d="M0 224v672q0 40 20 88t48 76l408 408q28 28 76 48t88 20h416q40 0 68 -28t28 -68v-328q68 40 128 40h416q40 0 68 -28t28 -68v-1216q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v288h-544q-40 0 -68 28t-28 68zM128 256h512v256q0 40 20 88t48 76l316 316v416h-384 v-416q0 -40 -28 -68t-68 -28h-416v-640zM213 1024h299v299zM768 -128h896v1152h-384v-416q0 -40 -28 -68t-68 -28h-416v-640zM853 640h299v299z" />
+<glyph unicode="&#xf0c6;" horiz-adv-x="1408" d="M4 1023q0 159 110 270t269 111q158 0 273 -113l605 -606q10 -10 10 -22q0 -16 -30.5 -46.5t-46.5 -30.5q-13 0 -23 10l-606 607q-79 77 -181 77q-106 0 -179 -75t-73 -181q0 -105 76 -181l776 -777q63 -63 145 -63q64 0 106 42t42 106q0 82 -63 145l-581 581 q-26 24 -60 24q-29 0 -48 -19t-19 -48q0 -32 25 -59l410 -410q10 -10 10 -22q0 -16 -31 -47t-47 -31q-12 0 -22 10l-410 410q-63 61 -63 149q0 82 57 139t139 57q88 0 149 -63l581 -581q100 -98 100 -235q0 -117 -79 -196t-196 -79q-135 0 -235 100l-777 776 q-113 115 -113 271z" />
+<glyph unicode="&#xf0c7;" d="M0 -32v1344q0 40 28 68t68 28h928q40 0 88 -20t76 -48l280 -280q28 -28 48 -76t20 -88v-928q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 0h128v416q0 40 28 68t68 28h832q40 0 68 -28t28 -68v-416h128v896q0 14 -10 38.5t-20 34.5l-281 281q-10 10 -34 20 t-39 10v-416q0 -40 -28 -68t-68 -28h-576q-40 0 -68 28t-28 68v416h-128v-1280zM384 0h768v384h-768v-384zM640 928q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-320z" />
+<glyph unicode="&#xf0c8;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf0c9;" d="M0 64v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM0 576v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM0 1088v128q0 26 19 45t45 19h1408 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0ca;" horiz-adv-x="1792" d="M0 128q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 640q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 1152q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM512 32v192 q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 544v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z M512 1056v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0cb;" horiz-adv-x="1792" d="M15 438q0 51 23.5 93t56.5 68t66 47.5t56.5 43.5t23.5 45q0 25 -14.5 38.5t-39.5 13.5q-46 0 -81 -58l-85 59q24 51 71.5 79.5t105.5 28.5q73 0 123 -41.5t50 -112.5q0 -50 -34 -91.5t-75 -64.5t-75.5 -50.5t-35.5 -52.5h127v60h105v-159h-362q-6 36 -6 54zM19 -190 l57 88q49 -45 106 -45q29 0 50.5 14.5t21.5 42.5q0 64 -105 56l-26 56q8 10 32.5 43.5t42.5 54t37 38.5v1q-16 0 -48.5 -1t-48.5 -1v-53h-106v152h333v-88l-95 -115q51 -12 81 -49t30 -88q0 -80 -54.5 -126t-135.5 -46q-106 0 -172 66zM34 1400l136 127h106v-404h108v-99 h-335v99h107q0 41 0.5 122t0.5 121v12h-2q-8 -17 -50 -54zM512 32v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 544v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5v-192 q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 1056v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0cc;" horiz-adv-x="1792" d="M0 544v64q0 14 9 23t23 9h1728q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1728q-14 0 -23 9t-9 23zM384 972q0 181 134 309q133 127 393 127q50 0 167 -19q66 -12 177 -48q10 -38 21 -118q14 -123 14 -183q0 -18 -5 -45l-12 -3l-84 6l-14 2q-50 149 -103 205 q-88 91 -210 91q-114 0 -182 -59q-67 -58 -67 -146q0 -73 66 -140t279 -129q69 -20 173 -66q58 -28 95 -52h-743q-28 35 -51 80q-48 97 -48 188zM414 154q-1 30 0 68l2 37v44l102 2q15 -34 30 -71t22.5 -56t12.5 -27q35 -57 80 -94q43 -36 105 -57q59 -22 132 -22 q64 0 139 27q77 26 122 86q47 61 47 129q0 84 -81 157q-34 29 -137 71h411q7 -39 7 -92q0 -111 -41 -212q-23 -55 -71 -104q-37 -35 -109 -81q-80 -48 -153 -66q-80 -21 -203 -21q-114 0 -195 23l-140 40q-57 16 -72 28q-8 8 -8 22v13q0 108 -2 156z" />
+<glyph unicode="&#xf0cd;" d="M0 -32v-64q0 -14 9 -23t23 -9h1472q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-1472q-14 0 -23 -9t-9 -23zM0 1405q13 1 40 1q60 0 112 -4q132 -7 166 -7q86 0 168 3q116 4 146 5q56 0 86 2l-1 -14l2 -64v-9q-60 -9 -124 -9q-60 0 -79 -25q-13 -14 -13 -132q0 -13 0.5 -32.5 t0.5 -25.5l1 -229l14 -280q6 -124 51 -202q35 -59 96 -92q88 -47 177 -47q104 0 191 28q56 18 99 51q48 36 65 64q36 56 53 114q21 73 21 229q0 79 -3.5 128t-11 122.5t-13.5 159.5l-4 59q-5 67 -24 88q-34 35 -77 34l-100 -2l-14 3l2 86h84l205 -10q76 -3 196 10l18 -2 q6 -38 6 -51q0 -7 -4 -31q-45 -12 -84 -13q-73 -11 -79 -17q-15 -15 -15 -41q0 -7 1.5 -27t1.5 -31q8 -19 22 -396q6 -195 -15 -304q-15 -76 -41 -122q-38 -65 -112 -123q-75 -57 -182 -89q-109 -33 -255 -33q-167 0 -284 46q-119 47 -179 122q-61 76 -83 195 q-16 80 -16 237v333q0 188 -17 213q-25 36 -147 39q-37 2 -45 4z" />
+<glyph unicode="&#xf0ce;" horiz-adv-x="1664" d="M0 160v1088q0 66 47 113t113 47h1344q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113zM128 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM128 544q0 -14 9 -23t23 -9h320 q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM128 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM640 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9 t-9 -23v-192zM640 544q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM640 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23 v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 544q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192z" />
+<glyph unicode="&#xf0d0;" horiz-adv-x="1664" d="M27 160q0 27 18 45l1286 1286q18 18 45 18t45 -18l198 -198q18 -18 18 -45t-18 -45l-1286 -1286q-18 -18 -45 -18t-45 18l-198 198q-18 18 -18 45zM128 1408l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98zM320 1216l196 60l60 196l60 -196l196 -60l-196 -60 l-60 -196l-60 196zM768 1408l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98zM1083 1062l107 -107l293 293l-107 107zM1408 768l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98z" />
+<glyph unicode="&#xf0d1;" horiz-adv-x="1792" d="M64 192q0 26 19 45t45 19v320q0 8 -0.5 35t0 38t2.5 34.5t6.5 37t14 30.5t22.5 30l198 198q19 19 50.5 32t58.5 13h160v192q0 26 19 45t45 19h1024q26 0 45 -19t19 -45v-1024q0 -15 -4 -26.5t-13.5 -18.5t-16.5 -11.5t-23.5 -6t-22.5 -2t-25.5 0t-22.5 0.5 q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-64q-3 0 -22.5 -0.5t-25.5 0t-22.5 2t-23.5 6t-16.5 11.5t-13.5 18.5t-4 26.5zM256 640h384v256h-158q-13 0 -22 -9l-195 -195q-9 -9 -9 -22v-30zM384 128q0 -52 38 -90t90 -38 t90 38t38 90t-38 90t-90 38t-90 -38t-38 -90zM1280 128q0 -52 38 -90t90 -38t90 38t38 90t-38 90t-90 38t-90 -38t-38 -90z" />
+<glyph unicode="&#xf0d2;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103q-111 0 -218 32q59 93 78 164q9 34 54 211q20 -39 73 -67.5t114 -28.5q121 0 216 68.5t147 188.5t52 270q0 114 -59.5 214t-172.5 163t-255 63 q-105 0 -196 -29t-154.5 -77t-109 -110.5t-67 -129.5t-21.5 -134q0 -104 40 -183t117 -111q30 -12 38 20q2 7 8 31t8 30q6 23 -11 43q-51 61 -51 151q0 151 104.5 259.5t273.5 108.5q151 0 235.5 -82t84.5 -213q0 -170 -68.5 -289t-175.5 -119q-61 0 -98 43.5t-23 104.5 q8 35 26.5 93.5t30 103t11.5 75.5q0 50 -27 83t-77 33q-62 0 -105 -57t-43 -142q0 -73 25 -122l-99 -418q-17 -70 -13 -177q-206 91 -333 281t-127 423z" />
+<glyph unicode="&#xf0d3;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-725q85 122 108 210q9 34 53 209q21 -39 73.5 -67t112.5 -28q181 0 295.5 147.5t114.5 373.5q0 84 -35 162.5t-96.5 139t-152.5 97t-197 36.5 q-104 0 -194.5 -28.5t-153 -76.5t-107.5 -109.5t-66.5 -128t-21.5 -132.5q0 -102 39.5 -180t116.5 -110q13 -5 23.5 0t14.5 19q10 44 15 61q6 23 -11 42q-50 62 -50 150q0 150 103.5 256.5t270.5 106.5q149 0 232.5 -81t83.5 -210q0 -168 -67.5 -286t-173.5 -118 q-60 0 -97 43.5t-23 103.5q8 34 26.5 92.5t29.5 102t11 74.5q0 49 -26.5 81.5t-75.5 32.5q-61 0 -103.5 -56.5t-42.5 -139.5q0 -72 24 -121l-98 -414q-24 -100 -7 -254h-183q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf0d4;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM276 309q0 -43 18.5 -77.5t48.5 -56.5t69 -37t77.5 -21t76.5 -6q60 0 120.5 15.5t113.5 46t86 82.5t33 117 q0 49 -20 89.5t-49 66.5t-58 47.5t-49 44t-20 44.5t15.5 42.5t37.5 39.5t44 42t37.5 59.5t15.5 82.5q0 60 -22.5 99.5t-72.5 90.5h83l88 64h-265q-85 0 -161 -32t-127.5 -98t-51.5 -153q0 -93 64.5 -154.5t158.5 -61.5q22 0 43 3q-13 -29 -13 -54q0 -44 40 -94 q-175 -12 -257 -63q-47 -29 -75.5 -73t-28.5 -95zM395 338q0 46 25 80t65.5 51.5t82 25t84.5 7.5q20 0 31 -2q2 -1 23 -16.5t26 -19t23 -18t24.5 -22t19 -22.5t17 -26t9 -26.5t4.5 -31.5q0 -76 -58.5 -112.5t-139.5 -36.5q-41 0 -80.5 9.5t-75.5 28.5t-58 53t-22 78z M462 969q0 61 32 104t92 43q53 0 93.5 -45t58 -101t17.5 -107q0 -60 -33 -99.5t-92 -39.5q-53 0 -93 42.5t-57.5 96.5t-17.5 106zM960 672h128v-160h64v160h128v64h-128v128h-64v-128h-128v-64z" />
+<glyph unicode="&#xf0d5;" horiz-adv-x="1664" d="M32 182q0 81 44.5 150t118.5 115q131 82 404 100q-32 42 -47.5 74t-15.5 73q0 36 21 85q-46 -4 -68 -4q-148 0 -249.5 96.5t-101.5 244.5q0 82 36 159t99 131q77 66 182.5 98t217.5 32h418l-138 -88h-131q74 -63 112 -133t38 -160q0 -72 -24.5 -129.5t-59 -93t-69.5 -65 t-59.5 -61.5t-24.5 -66q0 -36 32 -70.5t77.5 -68t90.5 -73.5t77 -104t32 -142q0 -90 -48 -173q-72 -122 -211 -179.5t-298 -57.5q-132 0 -246.5 41.5t-171.5 137.5q-37 60 -37 131zM218 228q0 -70 35 -123.5t91.5 -83t119 -44t127.5 -14.5q58 0 111.5 13t99 39t73 73 t27.5 109q0 25 -7 49t-14.5 42t-27 41.5t-29.5 35t-38.5 34.5t-36.5 29t-41.5 30t-36.5 26q-16 2 -48 2q-53 0 -105 -7t-107.5 -25t-97 -46t-68.5 -74.5t-27 -105.5zM324 1222q0 -46 10 -97.5t31.5 -103t52 -92.5t75 -67t96.5 -26q38 0 78 16.5t66 43.5q53 57 53 159 q0 58 -17 125t-48.5 129.5t-84.5 103.5t-117 41q-42 0 -82.5 -19.5t-65.5 -52.5q-47 -59 -47 -160zM1084 731v108h212v217h105v-217h213v-108h-213v-219h-105v219h-212z" />
+<glyph unicode="&#xf0d6;" horiz-adv-x="1920" d="M0 64v1152q0 26 19 45t45 19h1792q26 0 45 -19t19 -45v-1152q0 -26 -19 -45t-45 -19h-1792q-26 0 -45 19t-19 45zM128 384q106 0 181 -75t75 -181h1152q0 106 75 181t181 75v512q-106 0 -181 75t-75 181h-1152q0 -106 -75 -181t-181 -75v-512zM640 640q0 70 21 142 t59.5 134t101.5 101t138 39t138 -39t101.5 -101t59.5 -134t21 -142t-21 -142t-59.5 -134t-101.5 -101t-138 -39t-138 39t-101.5 101t-59.5 134t-21 142zM762 791l77 -80q42 37 55 57h2v-288h-128v-96h384v96h-128v448h-114z" />
+<glyph unicode="&#xf0d7;" horiz-adv-x="1024" d="M0 832q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0d8;" horiz-adv-x="1024" d="M0 320q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0d9;" horiz-adv-x="640" d="M64 640q0 26 19 45l448 448q19 19 45 19t45 -19t19 -45v-896q0 -26 -19 -45t-45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0da;" horiz-adv-x="640" d="M0 192v896q0 26 19 45t45 19t45 -19l448 -448q19 -19 19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19t-19 45z" />
+<glyph unicode="&#xf0db;" horiz-adv-x="1664" d="M0 32v1216q0 66 47 113t113 47h1344q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h608v1152h-640v-1120zM896 0h608q13 0 22.5 9.5t9.5 22.5v1120h-640v-1152z" />
+<glyph unicode="&#xf0dc;" horiz-adv-x="1024" d="M0 448q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45zM0 832q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0dd;" horiz-adv-x="1024" d="M0 448q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0de;" horiz-adv-x="1024" d="M0 832q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0e0;" horiz-adv-x="1792" d="M0 32v794q44 -49 101 -87q362 -246 497 -345q57 -42 92.5 -65.5t94.5 -48t110 -24.5h1h1q51 0 110 24.5t94.5 48t92.5 65.5q170 123 498 345q57 39 100 87v-794q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM0 1098q0 78 41.5 130t118.5 52h1472 q65 0 112.5 -47t47.5 -113q0 -79 -49 -151t-122 -123q-376 -261 -468 -325q-10 -7 -42.5 -30.5t-54 -38t-52 -32.5t-57.5 -27t-50 -9h-1h-1q-23 0 -50 9t-57.5 27t-52 32.5t-54 38t-42.5 30.5q-91 64 -262 182.5t-205 142.5q-62 42 -117 115.5t-55 136.5z" />
+<glyph unicode="&#xf0e1;" d="M0 1217q0 74 51.5 122.5t134.5 48.5t133 -48.5t51 -122.5q1 -73 -50.5 -122t-135.5 -49h-2q-82 0 -132 49t-50 122zM19 -80v991h330v-991h-330zM531 -80q2 399 2 647t-1 296l-1 48h329v-144h-2q20 32 41 56t56.5 52t87 43.5t114.5 15.5q171 0 275 -113.5t104 -332.5v-568 h-329v530q0 105 -40.5 164.5t-126.5 59.5q-63 0 -105.5 -34.5t-63.5 -85.5q-11 -30 -11 -81v-553h-329z" />
+<glyph unicode="&#xf0e2;" d="M0 832v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298t-61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12 q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0e3;" horiz-adv-x="1792" d="M40 736q0 13 4.5 26t9 22t15.5 22t16.5 18.5t20.5 19t18 16.5q30 28 68 28q10 0 18 -1.5t16.5 -5.5t13.5 -6t13.5 -10t11.5 -10t13 -12.5t12 -12.5q-14 14 -14 34t14 34l348 348q14 14 34 14t34 -14q-2 2 -12.5 12t-12.5 13t-10 11.5t-10 13.5t-6 13.5t-5.5 16.5t-1.5 18 q0 38 28 68q3 3 16.5 18t19 20.5t18.5 16.5t22 15.5t22 9t26 4.5q40 0 68 -28l408 -408q28 -28 28 -68q0 -13 -4.5 -26t-9 -22t-15.5 -22t-16.5 -18.5t-20.5 -19t-18 -16.5q-30 -28 -68 -28q-10 0 -18 1.5t-16.5 5.5t-13.5 6t-13.5 10t-11.5 10t-13 12.5t-12 12.5 q14 -14 14 -34t-14 -34l-126 -126l256 -256q43 43 96 43q52 0 91 -37l363 -363q37 -39 37 -91q0 -53 -37 -90l-107 -108q-39 -37 -91 -37q-53 0 -90 37l-363 364q-38 36 -38 90q0 53 43 96l-256 256l-126 -126q-14 -14 -34 -14t-34 14q2 -2 12.5 -12t12.5 -13t10 -11.5 t10 -13.5t6 -13.5t5.5 -16.5t1.5 -18q0 -38 -28 -68q-3 -3 -16.5 -18t-19 -20.5t-18.5 -16.5t-22 -15.5t-22 -9t-26 -4.5q-40 0 -68 28l-408 408q-28 28 -28 68z" />
+<glyph unicode="&#xf0e4;" horiz-adv-x="1792" d="M0 384q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348q0 -261 -141 -483q-19 -29 -54 -29h-1402q-35 0 -54 29q-141 221 -141 483zM128 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z M320 832q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM710 241q-20 -77 20 -146t117 -89t146 20t89 117q16 60 -6 117t-72 91l101 382q6 26 -7.5 48.5t-38.5 29.5t-48 -6.5t-30 -39.5l-101 -382q-60 -5 -107 -43.5 t-63 -98.5zM768 1024q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1216 832q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1408 384q0 -53 37.5 -90.5 t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf0e5;" horiz-adv-x="1792" d="M0 640q0 174 120 321.5t326 233t450 85.5t450 -85.5t326 -233t120 -321.5t-120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22h-5q-15 0 -27 10.5t-16 27.5v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5 t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281zM128 640q0 -112 71.5 -213.5t201.5 -175.5l87 -50l-27 -96q-24 -91 -70 -172q152 63 275 171l43 38l57 -6q69 -8 130 -8q204 0 381.5 69.5t282 187.5t104.5 255t-104.5 255t-282 187.5t-381.5 69.5t-381.5 -69.5 t-282 -187.5t-104.5 -255z" />
+<glyph unicode="&#xf0e6;" horiz-adv-x="1792" d="M0 768q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257t-94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25 t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224zM128 768q0 -82 53 -158t149 -132l97 -56l-35 -84q34 20 62 39l44 31l53 -10q78 -14 153 -14q153 0 286 52t211.5 141t78.5 191t-78.5 191t-211.5 141t-286 52t-286 -52t-211.5 -141t-78.5 -191zM616 132 q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22 t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132z" />
+<glyph unicode="&#xf0e7;" horiz-adv-x="896" d="M1 551l201 825q4 14 16 23t28 9h328q19 0 32 -12.5t13 -29.5q0 -8 -5 -18l-171 -463l396 98q8 2 12 2q19 0 34 -15q18 -20 7 -44l-540 -1157q-13 -25 -42 -25q-4 0 -14 2q-17 5 -25.5 19t-4.5 30l197 808l-406 -101q-4 -1 -12 -1q-18 0 -31 11q-18 15 -13 39z" />
+<glyph unicode="&#xf0e8;" horiz-adv-x="1792" d="M0 -32v320q0 40 28 68t68 28h96v192q0 52 38 90t90 38h512v192h-96q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-96v-192h512q52 0 90 -38t38 -90v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320 q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf0e9;" horiz-adv-x="1664" d="M0 681q0 5 1 7q45 183 172.5 319.5t298 204.5t360.5 68q140 0 274.5 -40t246.5 -113.5t194.5 -187t115.5 -251.5q1 -2 1 -7q0 -13 -9.5 -22.5t-22.5 -9.5q-11 0 -23 10q-49 46 -93 69t-102 23q-68 0 -128 -37t-103 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -28 -17 q-18 0 -29 17q-4 6 -14.5 24t-17.5 28q-43 60 -102.5 97t-127.5 37t-127.5 -37t-102.5 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -29 -17q-17 0 -28 17q-4 6 -14.5 24t-17.5 28q-43 60 -103 97t-128 37q-58 0 -102 -23t-93 -69q-12 -10 -23 -10q-13 0 -22.5 9.5t-9.5 22.5z M384 128q0 26 19 45t45 19t45 -19t19 -45q0 -50 39 -89t89 -39t89 39t39 89v580q33 11 64 11t64 -11v-580q0 -104 -76 -180t-180 -76t-180 76t-76 180zM768 1310v98q0 26 19 45t45 19t45 -19t19 -45v-98q-42 2 -64 2t-64 -2z" />
+<glyph unicode="&#xf0ea;" horiz-adv-x="1792" d="M0 96v1344q0 40 28 68t68 28h1088q40 0 68 -28t28 -68v-328q21 -13 36 -28l408 -408q28 -28 48 -76t20 -88v-672q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v160h-544q-40 0 -68 28t-28 68zM256 1312q0 -13 9.5 -22.5t22.5 -9.5h704q13 0 22.5 9.5t9.5 22.5v64 q0 13 -9.5 22.5t-22.5 9.5h-704q-13 0 -22.5 -9.5t-9.5 -22.5v-64zM768 -128h896v640h-416q-40 0 -68 28t-28 68v416h-384v-1152zM1280 640h299l-299 299v-299z" />
+<glyph unicode="&#xf0eb;" horiz-adv-x="1024" d="M0 960q0 99 44.5 184.5t117 142t164 89t186.5 32.5t186.5 -32.5t164 -89t117 -142t44.5 -184.5q0 -155 -103 -268q-45 -49 -74.5 -87t-59.5 -95.5t-34 -107.5q47 -28 47 -82q0 -37 -25 -64q25 -27 25 -64q0 -52 -45 -81q13 -23 13 -47q0 -46 -31.5 -71t-77.5 -25 q-20 -44 -60 -70t-87 -26t-87 26t-60 70q-46 0 -77.5 25t-31.5 71q0 24 13 47q-45 29 -45 81q0 37 25 64q-25 27 -25 64q0 54 47 82q-4 50 -34 107.5t-59.5 95.5t-74.5 87q-103 113 -103 268zM128 960q0 -101 68 -180q10 -11 30.5 -33t30.5 -33q128 -153 141 -298h228 q13 145 141 298q10 11 30.5 33t30.5 33q68 79 68 180q0 72 -34.5 134t-90 101.5t-123 62t-136.5 22.5t-136.5 -22.5t-123 -62t-90 -101.5t-34.5 -134zM480 1088q0 13 9.5 22.5t22.5 9.5q50 0 99.5 -16t87 -54t37.5 -90q0 -13 -9.5 -22.5t-22.5 -9.5t-22.5 9.5t-9.5 22.5 q0 46 -54 71t-106 25q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0ec;" horiz-adv-x="1792" d="M0 256q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h1376q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5q-12 0 -24 10l-319 320q-9 9 -9 22zM0 800v192q0 13 9.5 22.5t22.5 9.5h1376v192q0 14 9 23 t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-1376q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0ed;" horiz-adv-x="1920" d="M0 448q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z M512 608q0 -14 9 -23l352 -352q9 -9 23 -9t23 9l351 351q10 12 10 24q0 14 -9 23t-23 9h-224v352q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-352h-224q-13 0 -22.5 -9.5t-9.5 -22.5z" />
+<glyph unicode="&#xf0ee;" horiz-adv-x="1920" d="M0 448q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z M512 672q0 -14 9 -23t23 -9h224v-352q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v352h224q13 0 22.5 9.5t9.5 22.5q0 14 -9 23l-352 352q-9 9 -23 9t-23 -9l-351 -351q-10 -12 -10 -24z" />
+<glyph unicode="&#xf0f0;" horiz-adv-x="1408" d="M0 131q0 68 5.5 131t24 138t47.5 132.5t81 103t120 60.5q-22 -52 -22 -120v-203q-58 -20 -93 -70t-35 -111q0 -80 56 -136t136 -56t136 56t56 136q0 61 -35.5 111t-92.5 70v203q0 62 25 93q132 -104 295 -104t295 104q25 -31 25 -93v-64q-106 0 -181 -75t-75 -181v-89 q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 52 38 90t90 38t90 -38t38 -90v-89q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 68 -34.5 127.5t-93.5 93.5q0 10 0.5 42.5t0 48t-2.5 41.5t-7 47t-13 40q68 -15 120 -60.5 t81 -103t47.5 -132.5t24 -138t5.5 -131q0 -121 -73 -190t-194 -69h-874q-121 0 -194 69t-73 190zM256 192q0 26 19 45t45 19t45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45zM320 1024q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5 t-271.5 112.5t-112.5 271.5z" />
+<glyph unicode="&#xf0f1;" horiz-adv-x="1408" d="M0 768v512q0 26 19 45t45 19q6 0 16 -2q17 30 47 48t65 18q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5q-33 0 -64 18v-402q0 -106 94 -181t226 -75t226 75t94 181v402q-31 -18 -64 -18q-53 0 -90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5q35 0 65 -18t47 -48 q10 2 16 2q26 0 45 -19t19 -45v-512q0 -144 -110 -252t-274 -128v-132q0 -106 94 -181t226 -75t226 75t94 181v395q-57 21 -92.5 70t-35.5 111q0 80 56 136t136 56t136 -56t56 -136q0 -62 -35.5 -111t-92.5 -70v-395q0 -159 -131.5 -271.5t-316.5 -112.5t-316.5 112.5 t-131.5 271.5v132q-164 20 -274 128t-110 252zM1152 832q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0f2;" horiz-adv-x="1792" d="M0 96v832q0 92 66 158t158 66h64v-1280h-64q-92 0 -158 66t-66 158zM384 -128v1280h128v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h128v-1280h-1024zM640 1152h512v128h-512v-128zM1504 -128v1280h64q92 0 158 -66t66 -158v-832q0 -92 -66 -158t-158 -66h-64z " />
+<glyph unicode="&#xf0f3;" horiz-adv-x="1664" d="M0 128q190 161 287 397.5t97 498.5q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38 t-38 90zM656 0q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16t-16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16z" />
+<glyph unicode="&#xf0f4;" horiz-adv-x="1920" d="M0 128h1792q0 -106 -75 -181t-181 -75h-1280q-106 0 -181 75t-75 181zM256 480v736q0 26 19 45t45 19h1152q159 0 271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5h-64v-32q0 -92 -66 -158t-158 -66h-704q-92 0 -158 66t-66 158zM1408 704h64q80 0 136 56t56 136 t-56 136t-136 56h-64v-384z" />
+<glyph unicode="&#xf0f5;" horiz-adv-x="1408" d="M0 832v640q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-640q0 -61 -35.5 -111t-92.5 -70v-779q0 -52 -38 -90t-90 -38h-128 q-52 0 -90 38t-38 90v779q-57 20 -92.5 70t-35.5 111zM768 416v800q0 132 94 226t226 94h256q26 0 45 -19t19 -45v-1600q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v512h-224q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f6;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM384 160v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64 q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM384 416v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM384 672v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf0f7;" horiz-adv-x="1408" d="M0 -192v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM128 -128h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224h384v1536h-1152v-1536zM256 160v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 416v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 928v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 1184v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f8;" horiz-adv-x="1408" d="M0 -192v1280q0 26 19 45t45 19h320v288q0 40 28 68t68 28h448q40 0 68 -28t28 -68v-288h320q26 0 45 -19t19 -45v-1280q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM128 -128h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224h384v1152h-256 v-32q0 -40 -28 -68t-68 -28h-448q-40 0 -68 28t-28 68v32h-256v-1152zM256 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64 q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 1056q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v96h128 v-96q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-96h-128v96q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-320zM768 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f9;" horiz-adv-x="1920" d="M64 192q0 26 19 45t45 19v416q0 26 13 58t32 51l198 198q19 19 51 32t58 13h160v320q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-1152q0 -26 -19 -45t-45 -19h-192q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-128 q-26 0 -45 19t-19 45zM256 640h384v256h-158q-14 -2 -22 -9l-195 -195q-7 -12 -9 -22v-30zM384 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM896 800q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192 q14 0 23 9t9 23v224h224q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192zM1280 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf0fa;" horiz-adv-x="1792" d="M0 96v832q0 92 66 158t158 66h32v-1280h-32q-92 0 -158 66t-66 158zM352 -128v1280h160v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h160v-1280h-1088zM512 416q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v224h224q14 0 23 9t9 23v192 q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192zM640 1152h512v128h-512v-128zM1536 -128v1280h32q92 0 158 -66t66 -158v-832q0 -92 -66 -158t-158 -66h-32z" />
+<glyph unicode="&#xf0fb;" horiz-adv-x="1920" d="M0 512v128l192 24v8h-128v32h-32v192l32 32h96l192 -224h160v416h-64v32h64h160h96q26 0 45 -4.5t19 -11.5t-19 -11.5t-45 -4.5h-69l293 -352h64l224 -64l352 -32q261 -58 287 -93l1 -3q-1 -32 -288 -96l-352 -32l-224 -64h-64l-293 -352h69q26 0 45 -4.5t19 -11.5 t-19 -11.5t-45 -4.5h-96h-160h-64v32h64v416h-160l-192 -224h-96l-32 32v192h32v32h128v8z" />
+<glyph unicode="&#xf0fc;" horiz-adv-x="1664" d="M64 1152l32 128h480l32 128h960l32 -192l-64 -32v-800l128 -192v-192h-1152v192l128 192h-128q-159 0 -271.5 112.5t-112.5 271.5v320zM384 768q0 -53 37.5 -90.5t90.5 -37.5h128v384h-256v-256z" />
+<glyph unicode="&#xf0fd;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 192q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h512v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45 v896q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-512v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-896z" />
+<glyph unicode="&#xf0fe;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 576q0 -26 19 -45t45 -19h320v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h320q26 0 45 19t19 45 v128q0 26 -19 45t-45 19h-320v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-320q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf100;" horiz-adv-x="1024" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM429 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23 l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf101;" horiz-adv-x="1024" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM397 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10 l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf102;" horiz-adv-x="1152" d="M77 224q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM77 608q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23 l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf103;" horiz-adv-x="1152" d="M77 672q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM77 1056q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10 l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf104;" horiz-adv-x="640" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf105;" horiz-adv-x="640" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf106;" horiz-adv-x="1152" d="M77 352q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf107;" horiz-adv-x="1152" d="M77 800q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf108;" horiz-adv-x="1920" d="M0 288v1088q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-544q0 -37 16 -77.5t32 -71t16 -43.5q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45q0 14 16 44t32 70t16 78h-544q-66 0 -113 47t-47 113zM128 544q0 -13 9.5 -22.5 t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v832q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-832z" />
+<glyph unicode="&#xf109;" horiz-adv-x="1920" d="M0 96v96h160h1600h160v-96q0 -40 -47 -68t-113 -28h-1600q-66 0 -113 28t-47 68zM256 416v704q0 66 47 113t113 47h1088q66 0 113 -47t47 -113v-704q0 -66 -47 -113t-113 -47h-1088q-66 0 -113 47t-47 113zM384 416q0 -13 9.5 -22.5t22.5 -9.5h1088q13 0 22.5 9.5 t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-1088q-13 0 -22.5 -9.5t-9.5 -22.5v-704zM864 112q0 -16 16 -16h160q16 0 16 16t-16 16h-160q-16 0 -16 -16z" />
+<glyph unicode="&#xf10a;" horiz-adv-x="1152" d="M0 160v1088q0 66 47 113t113 47h832q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-832q-66 0 -113 47t-47 113zM128 288q0 -13 9.5 -22.5t22.5 -9.5h832q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-832q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM512 128 q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf10b;" horiz-adv-x="768" d="M0 128v1024q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-1024q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM96 288q0 -13 9.5 -22.5t22.5 -9.5h512q13 0 22.5 9.5t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-512q-13 0 -22.5 -9.5t-9.5 -22.5v-704zM288 1136 q0 -16 16 -16h160q16 0 16 16t-16 16h-160q-16 0 -16 -16zM304 128q0 -33 23.5 -56.5t56.5 -23.5t56.5 23.5t23.5 56.5t-23.5 56.5t-56.5 23.5t-56.5 -23.5t-23.5 -56.5z" />
+<glyph unicode="&#xf10c;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273z" />
+<glyph unicode="&#xf10d;" horiz-adv-x="1664" d="M0 192v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136z M896 192v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136z" />
+<glyph unicode="&#xf10e;" horiz-adv-x="1664" d="M0 832v384q0 80 56 136t136 56h384q80 0 136 -56t56 -136v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136zM896 832v384 q0 80 56 136t136 56h384q80 0 136 -56t56 -136v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136z" />
+<glyph unicode="&#xf110;" horiz-adv-x="1568" d="M0 640q0 66 47 113t113 47t113 -47t47 -113t-47 -113t-113 -47t-113 47t-47 113zM176 1088q0 73 51.5 124.5t124.5 51.5t124.5 -51.5t51.5 -124.5t-51.5 -124.5t-124.5 -51.5t-124.5 51.5t-51.5 124.5zM208 192q0 60 42 102t102 42q59 0 101.5 -42t42.5 -102t-42.5 -102 t-101.5 -42q-60 0 -102 42t-42 102zM608 1280q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM672 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM1136 192q0 46 33 79t79 33t79 -33t33 -79 t-33 -79t-79 -33t-79 33t-33 79zM1168 1088q0 33 23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5zM1344 640q0 40 28 68t68 28t68 -28t28 -68t-28 -68t-68 -28t-68 28t-28 68z" />
+<glyph unicode="&#xf111;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5z" />
+<glyph unicode="&#xf112;" horiz-adv-x="1792" d="M0 896q0 26 19 45l512 512q19 19 45 19t45 -19t19 -45v-256h224q713 0 875 -403q53 -134 53 -333q0 -166 -127 -451q-3 -7 -10.5 -24t-13.5 -30t-13 -22q-12 -17 -28 -17q-15 0 -23.5 10t-8.5 25q0 9 2.5 26.5t2.5 23.5q5 68 5 123q0 101 -17.5 181t-48.5 138.5t-80 101 t-105.5 69.5t-133 42.5t-154 21.5t-175.5 6h-224v-256q0 -26 -19 -45t-45 -19t-45 19l-512 512q-19 19 -19 45z" />
+<glyph unicode="&#xf113;" horiz-adv-x="1664" d="M0 496q0 237 136 396q-27 82 -27 170q0 116 51 218q108 0 190 -39.5t189 -123.5q147 35 309 35q148 0 280 -32q105 82 187 121t189 39q51 -102 51 -218q0 -87 -27 -168q136 -160 136 -398q0 -207 -61 -331q-38 -77 -105.5 -133t-141 -86t-170 -47.5t-171.5 -22t-167 -4.5 q-78 0 -142 3t-147.5 12.5t-152.5 30t-137 51.5t-121 81t-86 115q-62 123 -62 331zM224 320q0 -88 32 -153.5t81 -103t122 -60t140 -29.5t149 -7h168q82 0 149 7t140 29.5t122 60t81 103t32 153.5q0 120 -69 204t-187 84q-41 0 -195 -21q-71 -11 -157 -11t-157 11 q-152 21 -195 21q-118 0 -187 -84t-69 -204zM384 320q0 40 12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82t-12.5 -82t-43 -76t-72.5 -34t-72.5 34t-43 76t-12.5 82zM1024 320q0 40 12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82t-12.5 -82t-43 -76t-72.5 -34t-72.5 34 t-43 76t-12.5 82z" />
+<glyph unicode="&#xf114;" horiz-adv-x="1664" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158zM128 224q0 -40 28 -68t68 -28h1216q40 0 68 28t28 68v704q0 40 -28 68t-68 28h-704q-40 0 -68 28t-28 68v64 q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-960z" />
+<glyph unicode="&#xf115;" horiz-adv-x="1920" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h192q54 0 99 -24.5t67 -70.5q15 -32 15 -68q0 -62 -46 -120l-295 -363q-43 -53 -116 -87.5t-140 -34.5h-1088q-92 0 -158 66t-66 158zM128 331l256 315q44 53 116 87.5 t140 34.5h768v160q0 40 -28 68t-68 28h-576q-40 0 -68 28t-28 68v64q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-853zM171 163q0 -35 53 -35h1088q40 0 86 22t71 53l294 363q18 22 18 39q0 35 -53 35h-1088q-40 0 -85.5 -21.5t-71.5 -52.5l-294 -363q-18 -24 -18 -40z " />
+<glyph unicode="&#xf116;" horiz-adv-x="1792" />
+<glyph unicode="&#xf117;" horiz-adv-x="1792" />
+<glyph unicode="&#xf118;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM402 461q-8 25 4 48.5t38 31.5q25 8 48.5 -4t31.5 -38 q25 -80 92.5 -129.5t151.5 -49.5t151.5 49.5t92.5 129.5q8 26 32 38t49 4t37 -31.5t4 -48.5q-37 -121 -138 -195t-228 -74t-228 74t-138 195zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf119;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM402 307q37 121 138 195t228 74t228 -74t138 -195q8 -25 -4 -48.5 t-37 -31.5t-49 4t-32 38q-25 80 -92.5 129.5t-151.5 49.5t-151.5 -49.5t-92.5 -129.5q-8 -26 -31.5 -38t-48.5 -4q-26 8 -38 31.5t-4 48.5zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf11a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 448q0 26 19 45t45 19h640q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5 t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf11b;" horiz-adv-x="1920" d="M0 512q0 212 150 362t362 150h896q212 0 362 -150t150 -362t-150 -362t-362 -150q-192 0 -338 128h-220q-146 -128 -338 -128q-212 0 -362 150t-150 362zM192 448q0 -14 9 -23t23 -9h192v-192q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v192h192q14 0 23 9t9 23v128 q0 14 -9 23t-23 9h-192v192q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-192h-192q-14 0 -23 -9t-9 -23v-128zM1152 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1408 640q0 -53 37.5 -90.5t90.5 -37.5 t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf11c;" horiz-adv-x="1920" d="M0 128v896q0 53 37.5 90.5t90.5 37.5h1664q53 0 90.5 -37.5t37.5 -90.5v-896q0 -53 -37.5 -90.5t-90.5 -37.5h-1664q-53 0 -90.5 37.5t-37.5 90.5zM128 128h1664v896h-1664v-896zM256 272v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM256 528v96 q0 16 16 16h224q16 0 16 -16v-96q0 -16 -16 -16h-224q-16 0 -16 16zM256 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM512 272v96q0 16 16 16h864q16 0 16 -16v-96q0 -16 -16 -16h-864q-16 0 -16 16zM512 784v96q0 16 16 16h96q16 0 16 -16v-96 q0 -16 -16 -16h-96q-16 0 -16 16zM640 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM768 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM896 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16z M1024 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1152 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1280 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1408 528v96q0 16 16 16h112v240 q0 16 16 16h96q16 0 16 -16v-352q0 -16 -16 -16h-224q-16 0 -16 16zM1536 272v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16z" />
+<glyph unicode="&#xf11d;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64zM320 320v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86 q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56zM448 426 q245 113 433 113q55 0 103.5 -7.5t98 -26t77 -31t82.5 -39.5l28 -14q44 -22 101 -22q120 0 293 92v616q-169 -91 -306 -91q-82 0 -145 32q-100 49 -184 76.5t-178 27.5q-173 0 -403 -127v-599z" />
+<glyph unicode="&#xf11e;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64zM320 320v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86 q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56zM448 426 q205 96 384 110v192q-181 -16 -384 -117v-185zM448 836q215 111 384 118v197q-172 -8 -384 -126v-189zM832 730h19q102 0 192.5 -29t197.5 -82q19 -9 39 -15v-188q42 -17 91 -17q120 0 293 92v184q-235 -116 -384 -71v224q-20 6 -39 15q-5 3 -33 17t-34.5 17t-31.5 15 t-34.5 15.5t-32.5 13t-36 12.5t-35 8.5t-39.5 7.5t-39.5 4t-44 2q-23 0 -49 -3v-222zM1280 828q148 -42 384 90v189q-169 -91 -306 -91q-45 0 -78 8v-196z" />
+<glyph unicode="&#xf120;" horiz-adv-x="1664" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM640 32v64q0 14 9 23t23 9h960q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-960 q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf121;" horiz-adv-x="1920" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM712 -52l373 1291q4 13 15.5 19.5t23.5 2.5l62 -17q13 -4 19.5 -15.5t2.5 -24.5 l-373 -1291q-4 -13 -15.5 -19.5t-23.5 -2.5l-62 17q-13 4 -19.5 15.5t-2.5 24.5zM1293 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf122;" horiz-adv-x="1792" d="M0 896q0 26 19 45l512 512q29 31 70 14q39 -17 39 -59v-69l-397 -398q-19 -19 -19 -45t19 -45l397 -397v-70q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45zM384 896q0 26 19 45l512 512q29 31 70 14q39 -17 39 -59v-262q411 -28 599 -221 q169 -173 169 -509q0 -58 -17 -133.5t-38.5 -138t-48 -125t-40.5 -90.5l-20 -40q-8 -17 -28 -17q-6 0 -9 1q-25 8 -23 34q43 400 -106 565q-64 71 -170.5 110.5t-267.5 52.5v-251q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45z" />
+<glyph unicode="&#xf123;" horiz-adv-x="1664" d="M2 900.5q9 27.5 54 34.5l502 73l225 455q20 41 49 41q28 0 49 -41l225 -455l502 -73q45 -7 54 -34.5t-24 -59.5l-363 -354l86 -500q5 -33 -6 -51.5t-34 -18.5q-17 0 -40 12l-449 236l-449 -236q-23 -12 -40 -12q-23 0 -34 18.5t-6 51.5l86 500l-364 354q-32 32 -23 59.5z M832 310l59 -31l318 -168l-60 355l-12 66l49 47l257 250l-356 52l-66 10l-30 60l-159 322v-963z" />
+<glyph unicode="&#xf124;" horiz-adv-x="1408" d="M2 561q-5 22 4 42t29 30l1280 640q13 7 29 7q27 0 45 -19q15 -14 18.5 -34.5t-6.5 -39.5l-640 -1280q-17 -35 -57 -35q-5 0 -15 2q-22 5 -35.5 22.5t-13.5 39.5v576h-576q-22 0 -39.5 13.5t-22.5 35.5z" />
+<glyph unicode="&#xf125;" horiz-adv-x="1664" d="M0 928v192q0 14 9 23t23 9h224v224q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-224h851l246 247q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-247 -246v-851h224q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-224v-224q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v224h-864 q-14 0 -23 9t-9 23v864h-224q-14 0 -23 9t-9 23zM512 301l595 595h-595v-595zM557 256h595v595z" />
+<glyph unicode="&#xf126;" horiz-adv-x="1024" d="M0 64q0 52 26 96.5t70 69.5v820q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136q0 -52 -26 -96.5t-70 -69.5v-497q54 26 154 57q55 17 87.5 29.5t70.5 31t59 39.5t40.5 51t28 69.5t8.5 91.5q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136 q0 -52 -26 -96.5t-70 -69.5q-2 -287 -226 -414q-68 -38 -203 -81q-128 -40 -169.5 -71t-41.5 -100v-26q44 -25 70 -69.5t26 -96.5q0 -80 -56 -136t-136 -56t-136 56t-56 136zM96 64q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68zM96 1216q0 -40 28 -68 t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68zM736 1088q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68z" />
+<glyph unicode="&#xf127;" horiz-adv-x="1664" d="M0 448q0 14 9 23t23 9h320q14 0 23 -9t9 -23t-9 -23t-23 -9h-320q-14 0 -23 9t-9 23zM16 1088q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l334 -335q21 -21 42 -56l-239 -18l-273 274q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67q0 -40 28 -68 l274 -274l-18 -240q-35 21 -56 42l-336 336q-84 86 -84 204zM128 32q0 13 9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-256 -256q-10 -9 -23 -9q-12 0 -23 9q-9 10 -9 23zM544 -96v320q0 14 9 23t23 9t23 -9t9 -23v-320q0 -14 -9 -23t-23 -9t-23 9t-9 23zM633 364 l239 18l273 -274q27 -27 68 -27.5t68 26.5l147 146q28 28 28 67q0 40 -28 68l-274 275l18 239q35 -21 56 -42l336 -336q84 -86 84 -204q0 -120 -85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-334 335q-21 21 -42 56zM1056 1184v320q0 14 9 23t23 9t23 -9t9 -23v-320 q0 -14 -9 -23t-23 -9t-23 9t-9 23zM1216 1120q0 13 9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-256 -256q-11 -9 -23 -9t-23 9q-9 10 -9 23zM1280 960q0 14 9 23t23 9h320q14 0 23 -9t9 -23t-9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf128;" horiz-adv-x="1024" d="M96.5 986q-2.5 15 5.5 28q160 266 464 266q80 0 161 -31t146 -83t106 -127.5t41 -158.5q0 -54 -15.5 -101t-35 -76.5t-55 -59.5t-57.5 -43.5t-61 -35.5q-41 -23 -68.5 -65t-27.5 -67q0 -17 -12 -32.5t-28 -15.5h-240q-15 0 -25.5 18.5t-10.5 37.5v45q0 83 65 156.5 t143 108.5q59 27 84 56t25 76q0 42 -46.5 74t-107.5 32q-65 0 -108 -29q-35 -25 -107 -115q-13 -16 -31 -16q-12 0 -25 8l-164 125q-13 10 -15.5 25zM384 40v240q0 16 12 28t28 12h240q16 0 28 -12t12 -28v-240q0 -16 -12 -28t-28 -12h-240q-16 0 -28 12t-12 28z" />
+<glyph unicode="&#xf129;" horiz-adv-x="640" d="M0 64v128q0 26 19 45t45 19h64v384h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-576h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45zM128 1152v192q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-192 q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf12a;" horiz-adv-x="640" d="M98 1344q-1 26 17.5 45t44.5 19h320q26 0 44.5 -19t17.5 -45l-28 -768q-1 -26 -20.5 -45t-45.5 -19h-256q-26 0 -45.5 19t-20.5 45zM128 64v224q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-224q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf12b;" d="M5 0v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258zM1013 713q0 64 26 117t65 86.5 t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q83 65 188 65q110 0 178 -59.5t68 -158.5q0 -56 -24.5 -103t-62 -76.5t-81.5 -58.5t-82 -50.5t-65.5 -51.5t-30.5 -63h232v80h126v-206h-514l-3 27q-4 28 -4 46z " />
+<glyph unicode="&#xf12c;" d="M5 0v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258zM1015 -183q0 64 26 117t65 86.5 t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q80 65 188 65q110 0 178 -59.5t68 -158.5q0 -66 -34.5 -118.5t-84 -86t-99.5 -62.5t-87 -63t-41 -73h232v80h126v-206h-514l-4 27q-3 45 -3 46z" />
+<glyph unicode="&#xf12d;" horiz-adv-x="1920" d="M1.5 146.5q5.5 37.5 30.5 65.5l896 1024q38 44 96 44h768q38 0 69.5 -20.5t47.5 -54.5q15 -34 9.5 -71.5t-30.5 -65.5l-896 -1024q-38 -44 -96 -44h-768q-38 0 -69.5 20.5t-47.5 54.5q-15 34 -9.5 71.5zM128 128h768l336 384h-768z" />
+<glyph unicode="&#xf12e;" horiz-adv-x="1664" d="M0 0v1024q2 -1 17.5 -3.5t34 -5t21.5 -3.5q150 -24 245 -24q80 0 117 35q46 44 46 89q0 22 -15 50.5t-33.5 53t-33.5 64.5t-15 83q0 82 59 127.5t144 45.5q80 0 134 -44.5t54 -123.5q0 -41 -17.5 -77.5t-38 -59t-38 -56.5t-17.5 -71q0 -57 42 -83.5t103 -26.5 q64 0 180 15t163 17v-2q-1 -2 -3.5 -17.5t-5 -34t-3.5 -21.5q-24 -150 -24 -245q0 -80 35 -117q44 -46 89 -46q22 0 50.5 15t53 33.5t64.5 33.5t83 15q82 0 127.5 -59t45.5 -143q0 -81 -44.5 -135t-123.5 -54q-41 0 -77.5 17.5t-59 38t-56.5 38t-71 17.5q-110 0 -110 -124 q0 -39 16 -115t15 -115v-5q-22 0 -33 -1q-34 -3 -97.5 -11.5t-115.5 -13.5t-98 -5q-61 0 -103 26.5t-42 83.5q0 37 17.5 71t38 56.5t38 59t17.5 77.5q0 79 -54 123.5t-135 44.5q-84 0 -143 -45.5t-59 -127.5q0 -43 15 -83t33.5 -64.5t33.5 -53t15 -50.5q0 -45 -46 -89 q-37 -35 -117 -35q-95 0 -245 24q-9 2 -27.5 4t-27.5 4l-13 2q-1 0 -3 1q-2 0 -2 1z" />
+<glyph unicode="&#xf130;" horiz-adv-x="1152" d="M0 704v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -221 -147.5 -384.5t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45 t19 45t45 19h256v132q-217 24 -364.5 187.5t-147.5 384.5zM256 704v512q0 132 94 226t226 94t226 -94t94 -226v-512q0 -132 -94 -226t-226 -94t-226 94t-94 226z" />
+<glyph unicode="&#xf131;" horiz-adv-x="1408" d="M13 64q0 13 10 23l1234 1234q10 10 23 10t23 -10l82 -82q10 -10 10 -23t-10 -23l-361 -361v-128q0 -132 -94 -226t-226 -94q-55 0 -109 19l-96 -96q97 -51 205 -51q185 0 316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -221 -147.5 -384.5 t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h256v132q-125 13 -235 81l-254 -254q-10 -10 -23 -10t-23 10l-82 82q-10 10 -10 23zM128 704v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -53 15 -113l-101 -101 q-42 103 -42 214zM384 704v512q0 132 94 226t226 94q102 0 184.5 -59t116.5 -152z" />
+<glyph unicode="&#xf132;" horiz-adv-x="1280" d="M0 576v768q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-768q0 -86 -33.5 -170.5t-83 -150t-118 -127.5t-126.5 -103t-121 -77.5t-89.5 -49.5t-42.5 -20q-12 -6 -26 -6t-26 6q-16 7 -42.5 20t-89.5 49.5t-121 77.5t-126.5 103t-118 127.5t-83 150t-33.5 170.5zM640 79 q119 63 213 137q235 184 235 360v640h-448v-1137z" />
+<glyph unicode="&#xf133;" horiz-adv-x="1664" d="M0 -128v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90zM128 -128h1408v1024h-1408v-1024z M384 1088q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288zM1152 1088q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288z" />
+<glyph unicode="&#xf134;" horiz-adv-x="1408" d="M3.5 940q-8.5 25 3.5 49q5 10 14.5 26t37.5 53.5t60.5 70t85 67t108.5 52.5q-25 42 -25 86q0 66 47 113t113 47t113 -47t47 -113q0 -33 -14 -64h302q0 11 7 20t18 11l448 96q3 1 7 1q12 0 20 -7q12 -9 12 -25v-320q0 -16 -12 -25q-8 -7 -20 -7q-4 0 -7 1l-448 96 q-11 2 -18 11t-7 20h-256v-102q111 -23 183.5 -111t72.5 -203v-800q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v800q0 106 62.5 190.5t161.5 114.5v111h-32q-59 0 -115 -23.5t-91.5 -53t-66 -66.5t-40.5 -53.5t-14 -24.5q-17 -35 -57 -35q-16 0 -29 7q-23 12 -31.5 37 zM384 1344q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf135;" horiz-adv-x="1664" d="M36 464l224 384q10 14 26 16l379 20q96 114 176 195q188 187 358 258t431 71q14 0 24 -9.5t10 -22.5q0 -249 -75.5 -430.5t-253.5 -360.5q-81 -80 -195 -176l-20 -379q-2 -16 -16 -26l-384 -224q-7 -4 -16 -4q-12 0 -23 9l-64 64q-13 14 -8 32l85 276l-281 281l-276 -85 q-3 -1 -9 -1q-14 0 -23 9l-64 64q-17 19 -5 39zM1248 1088q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68z" />
+<glyph unicode="&#xf136;" horiz-adv-x="1792" d="M0 0l204 953l-153 327h1276q101 0 189.5 -40.5t147.5 -113.5q60 -73 81 -168.5t0 -194.5l-164 -763h-334l178 832q13 56 -15 88q-27 33 -83 33h-169l-204 -953h-334l204 953h-286l-204 -953h-334z" />
+<glyph unicode="&#xf137;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM346 640q0 -26 19 -45l454 -454q19 -19 45 -19t45 19l102 102q19 19 19 45t-19 45l-307 307l307 307 q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45z" />
+<glyph unicode="&#xf138;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM506 288q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l454 454q19 19 19 45t-19 45l-454 454 q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l307 -307l-307 -307q-19 -19 -19 -45z" />
+<glyph unicode="&#xf139;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM250 544q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l307 307l307 -307q19 -19 45 -19t45 19l102 102 q19 19 19 45t-19 45l-454 454q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45z" />
+<glyph unicode="&#xf13a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM250 736q0 -26 19 -45l454 -454q19 -19 45 -19t45 19l454 454q19 19 19 45t-19 45l-102 102 q-19 19 -45 19t-45 -19l-307 -307l-307 307q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45z" />
+<glyph unicode="&#xf13b;" horiz-adv-x="1408" d="M0 1408h1408l-128 -1438l-578 -162l-574 162zM262 1114l47 -534h612l-22 -228l-197 -53l-196 53l-13 140h-175l22 -278l362 -100h4v1l359 99l50 544h-644l-15 181h674l16 175h-884z" />
+<glyph unicode="&#xf13c;" horiz-adv-x="1792" d="M12 75l71 356h297l-29 -147l422 -161l486 161l68 339h-1208l58 297h1209l38 191h-1208l59 297h1505l-266 -1333l-804 -267z" />
+<glyph unicode="&#xf13d;" horiz-adv-x="1792" d="M0 0v352q0 14 9 23t23 9h352q22 0 30 -20q8 -19 -7 -35l-100 -100q67 -91 189.5 -153.5t271.5 -82.5v647h-192q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h192v163q-58 34 -93 92.5t-35 128.5q0 106 75 181t181 75t181 -75t75 -181q0 -70 -35 -128.5t-93 -92.5v-163h192 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-192v-647q149 20 271.5 82.5t189.5 153.5l-100 100q-15 16 -7 35q8 20 30 20h352q14 0 23 -9t9 -23v-352q0 -22 -20 -30q-8 -2 -12 -2q-13 0 -23 9l-93 93q-119 -143 -318.5 -226.5t-429.5 -83.5t-429.5 83.5t-318.5 226.5 l-93 -93q-9 -9 -23 -9q-4 0 -12 2q-20 8 -20 30zM832 1280q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf13e;" horiz-adv-x="1152" d="M0 96v576q0 40 28 68t68 28h32v320q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45q0 106 -75 181t-181 75t-181 -75t-75 -181v-320h736q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28 t-28 68z" />
+<glyph unicode="&#xf140;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM256 640q0 212 150 362t362 150t362 -150t150 -362t-150 -362t-362 -150t-362 150t-150 362zM384 640q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5 t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM512 640q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181z" />
+<glyph unicode="&#xf141;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM512 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM1024 608v192q0 40 28 68t68 28h192 q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf142;" horiz-adv-x="384" d="M0 96v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h192q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf143;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 256q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z M256 575q0 -13 8.5 -22t21.5 -10q154 -11 264 -121t121 -264q1 -13 10 -21.5t22 -8.5h128q13 0 23 10t9 24q-13 232 -177 396t-396 177q-14 1 -24 -9t-10 -23v-128zM256 959q0 -13 9 -22t22 -10q204 -7 378 -111.5t278.5 -278.5t111.5 -378q1 -13 10 -22t22 -9h128 q13 0 23 10q11 9 9 23q-5 154 -56 297.5t-139.5 260t-205 205t-260 139.5t-297.5 56q-14 1 -23 -9q-10 -10 -10 -23v-128z" />
+<glyph unicode="&#xf144;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM512 320q0 -37 32 -56q16 -8 32 -8q17 0 32 9l544 320q32 18 32 55t-32 55l-544 320q-31 19 -64 1 q-32 -19 -32 -56v-640z" />
+<glyph unicode="&#xf145;" horiz-adv-x="1792" d="M54 448.5q0 53.5 37 90.5l907 906q37 37 90.5 37t90.5 -37l125 -125q-56 -56 -56 -136t56 -136t136 -56t136 56l126 -125q37 -37 37 -90.5t-37 -90.5l-907 -908q-37 -37 -90.5 -37t-90.5 37l-126 126q56 56 56 136t-56 136t-136 56t-136 -56l-125 126q-37 37 -37 90.5z M342 512q0 -26 19 -45l362 -362q18 -18 45 -18t45 18l618 618q19 19 19 45t-19 45l-362 362q-18 18 -45 18t-45 -18l-618 -618q-19 -19 -19 -45zM452 512l572 572l316 -316l-572 -572z" />
+<glyph unicode="&#xf146;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 576q0 -26 19 -45t45 -19h896q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-896q-26 0 -45 -19t-19 -45v-128 z" />
+<glyph unicode="&#xf147;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832zM256 672v64q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf148;" horiz-adv-x="1024" d="M3 18q-8 20 4 35l160 192q9 11 25 11h320v640h-192q-40 0 -58 37q-17 37 9 68l320 384q18 22 49 22t49 -22l320 -384q27 -32 9 -68q-18 -37 -58 -37h-192v-864q0 -14 -9 -23t-23 -9h-704q-21 0 -29 18z" />
+<glyph unicode="&#xf149;" horiz-adv-x="1024" d="M3 1261q9 19 29 19h704q13 0 22.5 -9.5t9.5 -23.5v-863h192q40 0 58 -37t-9 -69l-320 -384q-18 -22 -49 -22t-49 22l-320 384q-26 31 -9 69q18 37 58 37h192v640h-320q-14 0 -25 11l-160 192q-13 14 -4 34z" />
+<glyph unicode="&#xf14a;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM218 640q0 -26 19 -45l358 -358q19 -19 45 -19t45 19l614 614q19 19 19 45t-19 45l-102 102q-19 19 -45 19 t-45 -19l-467 -467l-211 211q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45z" />
+<glyph unicode="&#xf14b;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 128h288l544 544l-288 288l-544 -544v-288zM352 320v56l52 52l152 -152l-52 -52h-56v96h-96zM494 494 q-14 13 3 30l291 291q17 17 30 3q14 -13 -3 -30l-291 -291q-17 -17 -30 -3zM864 1024l288 -288l92 92q28 28 28 68t-28 68l-152 152q-28 28 -68 28t-68 -28z" />
+<glyph unicode="&#xf14c;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM282 320q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l534 534l144 -144q18 -19 45 -19q12 0 25 5q39 17 39 59 v480q0 26 -19 45t-45 19h-480q-42 0 -59 -39q-17 -41 14 -70l144 -144l-534 -534q-19 -19 -19 -45z" />
+<glyph unicode="&#xf14d;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 448q0 -181 167 -404q10 -12 25 -12q7 0 13 3q22 9 19 33q-44 354 62 473q46 52 130 75.5t224 23.5v-160 q0 -42 40 -59q12 -5 24 -5q26 0 45 19l352 352q19 19 19 45t-19 45l-352 352q-30 31 -69 14q-40 -17 -40 -59v-160q-119 0 -216 -19.5t-162.5 -51t-114 -79t-76.5 -95.5t-44.5 -109t-21.5 -111.5t-5 -110.5z" />
+<glyph unicode="&#xf14e;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 241v542l512 256v-542zM640 448l256 128l-256 128v-256z" />
+<glyph unicode="&#xf150;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM391 861q17 35 57 35h640q40 0 57 -35q18 -35 -5 -66l-320 -448q-19 -27 -52 -27t-52 27l-320 448q-23 31 -5 66z" />
+<glyph unicode="&#xf151;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM391 419q-18 35 5 66l320 448q19 27 52 27t52 -27l320 -448q23 -31 5 -66q-17 -35 -57 -35h-640q-40 0 -57 35z" />
+<glyph unicode="&#xf152;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -14 9 -23t23 -9h960q14 0 23 9t9 23v960q0 14 -9 23t-23 9h-960q-14 0 -23 -9t-9 -23v-960z M512 320v640q0 40 35 57q35 18 66 -5l448 -320q27 -19 27 -52t-27 -52l-448 -320q-31 -23 -66 -5q-35 17 -35 57z" />
+<glyph unicode="&#xf153;" horiz-adv-x="1024" d="M0 514v113q0 13 9.5 22.5t22.5 9.5h66q-2 57 1 105h-67q-14 0 -23 9t-9 23v114q0 14 9 23t23 9h98q67 210 243.5 338t400.5 128q102 0 194 -23q11 -3 20 -15q6 -11 3 -24l-43 -159q-3 -13 -14 -19.5t-24 -2.5l-4 1q-4 1 -11.5 2.5l-17.5 3.5t-22.5 3.5t-26 3t-29 2.5 t-29.5 1q-126 0 -226 -64t-150 -176h468q16 0 25 -12q10 -12 7 -26l-24 -114q-5 -26 -32 -26h-488q-3 -37 0 -105h459q15 0 25 -12q9 -12 6 -27l-24 -112q-2 -11 -11 -18.5t-20 -7.5h-387q48 -117 149.5 -185.5t228.5 -68.5q18 0 36 1.5t33.5 3.5t29.5 4.5t24.5 5t18.5 4.5 l12 3l5 2q13 5 26 -2q12 -7 15 -21l35 -159q3 -12 -3 -22.5t-17 -14.5l-5 -1q-4 -2 -10.5 -3.5t-16 -4.5t-21.5 -5.5t-25.5 -5t-30 -5t-33.5 -4.5t-36.5 -3t-38.5 -1q-234 0 -409 130.5t-238 351.5h-95q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf154;" horiz-adv-x="1024" d="M0 32v150q0 13 9.5 22.5t22.5 9.5h97v383h-95q-14 0 -23 9.5t-9 22.5v131q0 14 9 23t23 9h95v223q0 171 123.5 282t314.5 111q185 0 335 -125q9 -8 10 -20.5t-7 -22.5l-103 -127q-9 -11 -22 -12q-13 -2 -23 7q-5 5 -26 19t-69 32t-93 18q-85 0 -137 -47t-52 -123v-215 h305q13 0 22.5 -9t9.5 -23v-131q0 -13 -9.5 -22.5t-22.5 -9.5h-305v-379h414v181q0 13 9 22.5t23 9.5h162q14 0 23 -9.5t9 -22.5v-367q0 -14 -9 -23t-23 -9h-956q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf155;" horiz-adv-x="1024" d="M52 171l103 135q7 10 23 12q15 2 24 -9l2 -2q113 -99 243 -125q37 -8 74 -8q81 0 142.5 43t61.5 122q0 28 -15 53t-33.5 42t-58.5 37.5t-66 32t-80 32.5q-39 16 -61.5 25t-61.5 26.5t-62.5 31t-56.5 35.5t-53.5 42.5t-43.5 49t-35.5 58t-21 66.5t-8.5 78q0 138 98 242 t255 134v180q0 13 9.5 22.5t22.5 9.5h135q14 0 23 -9t9 -23v-176q57 -6 110.5 -23t87 -33.5t63.5 -37.5t39 -29t15 -14q17 -18 5 -38l-81 -146q-8 -15 -23 -16q-14 -3 -27 7q-3 3 -14.5 12t-39 26.5t-58.5 32t-74.5 26t-85.5 11.5q-95 0 -155 -43t-60 -111q0 -26 8.5 -48 t29.5 -41.5t39.5 -33t56 -31t60.5 -27t70 -27.5q53 -20 81 -31.5t76 -35t75.5 -42.5t62 -50t53 -63.5t31.5 -76.5t13 -94q0 -153 -99.5 -263.5t-258.5 -136.5v-175q0 -14 -9 -23t-23 -9h-135q-13 0 -22.5 9.5t-9.5 22.5v175q-66 9 -127.5 31t-101.5 44.5t-74 48t-46.5 37.5 t-17.5 18q-17 21 -2 41z" />
+<glyph unicode="&#xf156;" horiz-adv-x="898" d="M0 605v127q0 13 9.5 22.5t22.5 9.5h112q132 0 212.5 43t102.5 125h-427q-14 0 -23 9t-9 23v102q0 14 9 23t23 9h413q-57 113 -268 113h-145q-13 0 -22.5 9.5t-9.5 22.5v133q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-233q47 -61 64 -144h171 q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-168q-23 -144 -129 -234t-276 -110q167 -178 459 -536q14 -16 4 -34q-8 -18 -29 -18h-195q-16 0 -25 12q-306 367 -498 571q-9 9 -9 22z" />
+<glyph unicode="&#xf157;" horiz-adv-x="1027" d="M4 1360q-8 16 0 32q10 16 28 16h194q19 0 29 -18l215 -425q19 -38 56 -125q10 24 30.5 68t27.5 61l191 420q8 19 29 19h191q17 0 27 -16q9 -14 1 -31l-313 -579h215q13 0 22.5 -9.5t9.5 -22.5v-104q0 -14 -9.5 -23t-22.5 -9h-290v-85h290q13 0 22.5 -9.5t9.5 -22.5v-103 q0 -14 -9.5 -23t-22.5 -9h-290v-330q0 -13 -9.5 -22.5t-22.5 -9.5h-172q-13 0 -22.5 9t-9.5 23v330h-288q-13 0 -22.5 9t-9.5 23v103q0 13 9.5 22.5t22.5 9.5h288v85h-288q-13 0 -22.5 9t-9.5 23v104q0 13 9.5 22.5t22.5 9.5h214z" />
+<glyph unicode="&#xf158;" horiz-adv-x="1280" d="M0 256v128q0 14 9 23t23 9h224v118h-224q-14 0 -23 9t-9 23v149q0 13 9 22.5t23 9.5h224v629q0 14 9 23t23 9h539q200 0 326.5 -122t126.5 -315t-126.5 -315t-326.5 -122h-340v-118h505q14 0 23 -9t9 -23v-128q0 -14 -9 -23t-23 -9h-505v-192q0 -14 -9.5 -23t-22.5 -9 h-167q-14 0 -23 9t-9 23v192h-224q-14 0 -23 9t-9 23zM487 747h320q106 0 171 62t65 162t-65 162t-171 62h-320v-448z" />
+<glyph unicode="&#xf159;" horiz-adv-x="1792" d="M0 672v64q0 14 9 23t23 9h175l-33 128h-142q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h109l-89 344q-5 15 5 28q10 12 26 12h137q26 0 31 -24l90 -360h359l97 360q7 24 31 24h126q24 0 31 -24l98 -360h365l93 360q5 24 31 24h137q16 0 26 -12q10 -13 5 -28l-91 -344h111 q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-145l-34 -128h179q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-213l-164 -616q-7 -24 -31 -24h-159q-24 0 -31 24l-166 616h-209l-167 -616q-7 -24 -31 -24h-159q-11 0 -19.5 7t-10.5 17l-160 616h-208q-14 0 -23 9t-9 23z M373 896l32 -128h225l35 128h-292zM436 640l75 -300q1 -1 1 -3t1 -3q0 1 0.5 3.5t0.5 3.5l81 299h-159zM822 768h139l-35 128h-70zM1118 896l34 -128h230l33 128h-297zM1187 640l81 -299q0 -1 0.5 -3.5t1.5 -3.5q0 1 0.5 3t0.5 3l78 300h-162z" />
+<glyph unicode="&#xf15a;" horiz-adv-x="1280" d="M56 0l31 183h111q50 0 58 51v402h16q-6 1 -16 1v287q-13 68 -89 68h-111v164l212 -1q64 0 97 1v252h154v-247q82 2 122 2v245h154v-252q79 -7 140 -22.5t113 -45t82.5 -78t36.5 -114.5q18 -182 -131 -258q117 -28 175 -103t45 -214q-7 -71 -32.5 -125t-64.5 -89 t-97 -58.5t-121.5 -34.5t-145.5 -15v-255h-154v251q-80 0 -122 1v-252h-154v255q-18 0 -54 0.5t-55 0.5h-200zM522 182q8 0 37 -0.5t48 -0.5t53 1.5t58.5 4t57 8.5t55.5 14t47.5 21t39.5 30t24.5 40t9.5 51q0 36 -15 64t-37 46t-57.5 30.5t-65.5 18.5t-74 9t-69 3t-64.5 -1 t-47.5 -1v-338zM522 674q5 0 34.5 -0.5t46.5 0t50 2t55 5.5t51.5 11t48.5 18.5t37 27t27 38.5t9 51q0 33 -12.5 58.5t-30.5 42t-48 28t-55 16.5t-61.5 8t-58 2.5t-54 -1t-39.5 -0.5v-307z" />
+<glyph unicode="&#xf15b;" d="M0 -160v1600q0 40 28 68t68 28h800v-544q0 -40 28 -68t68 -28h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM1024 1024v472q22 -14 36 -28l408 -408q14 -14 28 -36h-472z" />
+<glyph unicode="&#xf15c;" d="M0 -160v1600q0 40 28 68t68 28h800v-544q0 -40 28 -68t68 -28h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM384 160q0 -14 9 -23t23 -9h704q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM384 416q0 -14 9 -23t23 -9h704 q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM384 672q0 -14 9 -23t23 -9h704q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM1024 1024v472q22 -14 36 -28l408 -408q14 -14 28 -36h-472z" />
+<glyph unicode="&#xf15d;" horiz-adv-x="1664" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM899 768v106h70l230 662h162l230 -662h70v-106h-288v106h75l-47 144h-243l-47 -144h75v-106 h-287zM988 -166l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -11v-2l14 2q9 2 30 2h248v119h121v-233h-584v90zM1191 1128h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18 t-7.5 -29z" />
+<glyph unicode="&#xf15e;" horiz-adv-x="1664" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM899 -150h70l230 662h162l230 -662h70v-106h-288v106h75l-47 144h-243l-47 -144h75v-106h-287 v106zM988 768v90l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -10v-3l14 3q9 1 30 1h248v119h121v-233h-584zM1191 104h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18t-7.5 -29 z" />
+<glyph unicode="&#xf160;" horiz-adv-x="1792" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM896 -32q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9 t-9 23v192zM896 288v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23zM896 800v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23zM896 1312v192q0 14 9 23t23 9h256q14 0 23 -9t9 -23 v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf161;" horiz-adv-x="1792" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM896 -32q0 14 9 23t23 9h256q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9 t-9 23v192zM896 288v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23zM896 800v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23zM896 1312v192q0 14 9 23t23 9h832q14 0 23 -9t9 -23 v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf162;" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM946 261q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5q0 -62 -13 -121.5t-41 -114 t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5zM976 1351l192 185h123v-654h165v-114h-469v114h167v432q0 7 0.5 19t0.5 17 v16h-2l-7 -12q-8 -13 -26 -31l-62 -58zM1085 261q0 -57 36.5 -95t104.5 -38q50 0 85 27t35 68q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94z" />
+<glyph unicode="&#xf163;" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM946 1285q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5q0 -62 -13 -121.5t-41 -114 t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5zM976 327l192 185h123v-654h165v-114h-469v114h167v432q0 7 0.5 19t0.5 17v16 h-2l-7 -12q-8 -13 -26 -31l-62 -58zM1085 1285q0 -57 36.5 -95t104.5 -38q50 0 85 27t35 68q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94z" />
+<glyph unicode="&#xf164;" horiz-adv-x="1664" d="M0 64v640q0 26 19 45t45 19h288q26 0 45 -19t19 -45v-640q0 -26 -19 -45t-45 -19h-288q-26 0 -45 19t-19 45zM128 192q0 -27 18.5 -45.5t45.5 -18.5q26 0 45 18.5t19 45.5q0 26 -19 45t-45 19q-27 0 -45.5 -19t-18.5 -45zM480 64v641q0 25 18 43.5t43 20.5q24 2 76 59 t101 121q68 87 101 120q18 18 31 48t17.5 48.5t13.5 60.5q7 39 12.5 61t19.5 52t34 50q19 19 45 19q46 0 82.5 -10.5t60 -26t40 -40.5t24 -45t12 -50t5 -45t0.5 -39q0 -38 -9.5 -76t-19 -60t-27.5 -56q-3 -6 -10 -18t-11 -22t-8 -24h277q78 0 135 -57t57 -135 q0 -86 -55 -149q15 -44 15 -76q3 -76 -43 -137q17 -56 0 -117q-15 -57 -54 -94q9 -112 -49 -181q-64 -76 -197 -78h-36h-76h-17q-66 0 -144 15.5t-121.5 29t-120.5 39.5q-123 43 -158 44q-26 1 -45 19.5t-19 44.5z" />
+<glyph unicode="&#xf165;" horiz-adv-x="1664" d="M0 448q0 -26 19 -45t45 -19h288q26 0 45 19t19 45v640q0 26 -19 45t-45 19h-288q-26 0 -45 -19t-19 -45v-640zM128 960q0 27 18.5 45.5t45.5 18.5q26 0 45 -18.5t19 -45.5q0 -26 -19 -45t-45 -19q-27 0 -45.5 19t-18.5 45zM480 447v641q0 26 19 44.5t45 19.5q35 1 158 44 q77 26 120.5 39.5t121.5 29t144 15.5h17h76h36q133 -2 197 -78q58 -69 49 -181q39 -37 54 -94q17 -61 0 -117q46 -61 43 -137q0 -32 -15 -76q55 -61 55 -149q-1 -78 -57.5 -135t-134.5 -57h-277q4 -14 8 -24t11 -22t10 -18q18 -37 27 -57t19 -58.5t10 -76.5q0 -24 -0.5 -39 t-5 -45t-12 -50t-24 -45t-40 -40.5t-60 -26t-82.5 -10.5q-26 0 -45 19q-20 20 -34 50t-19.5 52t-12.5 61q-9 42 -13.5 60.5t-17.5 48.5t-31 48q-33 33 -101 120q-49 64 -101 121t-76 59q-25 2 -43 20.5t-18 43.5z" />
+<glyph unicode="&#xf166;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM218 366q0 -176 20 -260q10 -43 42.5 -73t75.5 -35q137 -15 412 -15t412 15q43 5 75.5 35t42.5 73 q20 84 20 260q0 177 -19 260q-10 44 -43 73.5t-76 34.5q-136 15 -412 15q-275 0 -411 -15q-44 -5 -76.5 -34.5t-42.5 -73.5q-20 -87 -20 -260zM300 551v70h232v-70h-80v-423h-74v423h-78zM396 1313l24 -69t23 -69q35 -103 46 -158v-201h74v201l90 296h-75l-51 -195l-53 195 h-78zM542 205v290h66v-270q0 -24 1 -26q1 -15 15 -15q20 0 42 31v280h67v-367h-67v40q-39 -45 -76 -45q-33 0 -42 28q-6 16 -6 54zM654 936q0 -58 21 -87q27 -38 78 -38q49 0 78 38q21 27 21 87v130q0 58 -21 87q-29 38 -78 38q-51 0 -78 -38q-21 -29 -21 -87v-130zM721 923 v156q0 52 32 52t32 -52v-156q0 -51 -32 -51t-32 51zM790 128v493h67v-161q32 40 68 40q41 0 53 -42q7 -21 7 -74v-146q0 -52 -7 -73q-12 -42 -53 -42q-35 0 -68 41v-36h-67zM857 200q16 -16 33 -16q29 0 29 49v157q0 50 -29 50q-17 0 -33 -16v-224zM907 893q0 -37 6 -55 q11 -27 43 -27q36 0 77 45v-40h67v370h-67v-283q-22 -31 -42 -31q-15 0 -16 16q-1 2 -1 26v272h-67v-293zM1037 247v129q0 59 20 86q29 38 80 38t78 -38q21 -28 21 -86v-76h-133v-65q0 -51 34 -51q24 0 30 26q0 1 0.5 7t0.5 16.5v21.5h68v-9q0 -29 -2 -43q-3 -22 -15 -40 q-27 -40 -80 -40q-52 0 -81 38q-21 27 -21 86zM1103 355h66v34q0 51 -33 51t-33 -51v-34z" />
+<glyph unicode="&#xf167;" d="M27 260q0 234 26 350q14 59 58 99t103 47q183 20 554 20t555 -20q58 -7 102.5 -47t57.5 -99q26 -112 26 -350q0 -234 -26 -350q-14 -59 -58 -99t-102 -46q-184 -21 -555 -21t-555 21q-58 6 -102.5 46t-57.5 99q-26 112 -26 350zM138 509h105v-569h100v569h107v94h-312 v-94zM266 1536h106l71 -263l68 263h102l-121 -399v-271h-100v271q-14 74 -61 212q-37 103 -65 187zM463 43q0 -49 8 -73q12 -37 58 -37q48 0 102 61v-54h89v494h-89v-378q-30 -42 -57 -42q-18 0 -21 21q-1 3 -1 35v364h-89v-391zM614 1028v175q0 80 28 117q38 51 105 51 q69 0 106 -51q28 -37 28 -117v-175q0 -81 -28 -118q-37 -51 -106 -51q-67 0 -105 51q-28 38 -28 118zM704 1011q0 -70 43 -70t43 70v210q0 69 -43 69t-43 -69v-210zM798 -60h89v48q45 -55 93 -55q54 0 71 55q9 27 9 100v197q0 73 -9 99q-17 56 -71 56q-50 0 -93 -54v217h-89 v-663zM887 36v301q22 22 45 22q39 0 39 -67v-211q0 -67 -39 -67q-23 0 -45 22zM955 971v394h91v-367q0 -33 1 -35q3 -22 21 -22q27 0 57 43v381h91v-499h-91v55q-53 -62 -103 -62q-46 0 -59 37q-8 24 -8 75zM1130 100q0 -79 29 -116q39 -51 108 -51q72 0 108 53q18 27 21 54 q2 9 2 58v13h-91q0 -51 -2 -61q-7 -36 -40 -36q-46 0 -46 69v87h179v103q0 79 -27 116q-39 51 -106 51q-68 0 -107 -51q-28 -37 -28 -116v-173zM1219 245v46q0 68 45 68t45 -68v-46h-90z" />
+<glyph unicode="&#xf168;" horiz-adv-x="1408" d="M5 384q-10 17 0 36l253 448q1 0 0 1l-161 279q-12 22 -1 37q9 15 32 15h239q40 0 66 -45l164 -286q-10 -18 -257 -456q-27 -46 -65 -46h-239q-21 0 -31 17zM536 539q18 32 531 942q25 45 64 45h241q22 0 31 -15q11 -16 0 -37l-528 -934v-1l336 -615q11 -20 1 -37 q-10 -15 -32 -15h-239q-42 0 -66 45z" />
+<glyph unicode="&#xf169;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM227 396q8 -13 24 -13h185q31 0 50 36l199 352q0 1 -126 222q-21 34 -52 34h-184q-18 0 -26 -11q-7 -12 1 -29 l125 -216v-1l-196 -346q-9 -14 0 -28zM638 516q1 -2 262 -481q20 -35 52 -35h184q18 0 25 12q8 13 -1 28l-260 476v1l409 723q8 16 0 28q-7 12 -24 12h-187q-30 0 -49 -35z" />
+<glyph unicode="&#xf16a;" horiz-adv-x="1792" d="M0 640q0 96 1 150t8.5 136.5t22.5 147.5q16 73 69 123t124 58q222 25 671 25t671 -25q71 -8 124.5 -58t69.5 -123q14 -65 21.5 -147.5t8.5 -136.5t1 -150t-1 -150t-8.5 -136.5t-22.5 -147.5q-16 -73 -69 -123t-124 -58q-222 -25 -671 -25t-671 25q-71 8 -124.5 58 t-69.5 123q-14 65 -21.5 147.5t-8.5 136.5t-1 150zM640 320q0 -38 33 -56q16 -8 31 -8q20 0 34 10l512 320q30 17 30 54t-30 54l-512 320q-31 20 -65 2q-33 -18 -33 -56v-640z" />
+<glyph unicode="&#xf16b;" horiz-adv-x="1792" d="M64 558l338 271l494 -305l-342 -285zM64 1099l490 319l342 -285l-494 -304zM407 166v108l147 -96l342 284v2l1 -1l1 1v-2l343 -284l147 96v-108l-490 -293v-1l-1 1l-1 -1v1zM896 524l494 305l338 -271l-489 -319zM896 1133l343 285l489 -319l-338 -270z" />
+<glyph unicode="&#xf16c;" horiz-adv-x="1408" d="M0 -255v736h121v-618h928v618h120v-701l-1 -35v-1h-1132l-35 1h-1zM221 -17v151l707 1v-151zM227 243l14 150l704 -65l-13 -150zM270 563l39 146l683 -183l-39 -146zM395 928l77 130l609 -360l-77 -130zM707 1303l125 86l398 -585l-124 -85zM1136 1510l149 26l121 -697 l-149 -26z" />
+<glyph unicode="&#xf16d;" d="M0 69v1142q0 81 58 139t139 58h1142q81 0 139 -58t58 -139v-1142q0 -81 -58 -139t-139 -58h-1142q-81 0 -139 58t-58 139zM171 110q0 -26 17.5 -43.5t43.5 -17.5h1069q25 0 43 17.5t18 43.5v648h-135q20 -63 20 -131q0 -126 -64 -232.5t-174 -168.5t-240 -62 q-197 0 -337 135.5t-140 327.5q0 68 20 131h-141v-648zM461 643q0 -124 90.5 -211.5t217.5 -87.5q128 0 218.5 87.5t90.5 211.5t-90.5 211.5t-218.5 87.5q-127 0 -217.5 -87.5t-90.5 -211.5zM1050 1003q0 -29 20 -49t49 -20h174q29 0 49 20t20 49v165q0 28 -20 48.5 t-49 20.5h-174q-29 0 -49 -20.5t-20 -48.5v-165z" />
+<glyph unicode="&#xf16e;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM274 640q0 -88 62 -150t150 -62t150 62t62 150t-62 150t-150 62t-150 -62t-62 -150zM838 640q0 -88 62 -150 t150 -62t150 62t62 150t-62 150t-150 62t-150 -62t-62 -150z" />
+<glyph unicode="&#xf170;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM309 384h94l104 160h522l104 -160h94l-459 691zM567 608l201 306l201 -306h-402z" />
+<glyph unicode="&#xf171;" horiz-adv-x="1408" d="M0 1222q3 26 17.5 48.5t31.5 37.5t45 30t46 22.5t48 18.5q125 46 313 64q379 37 676 -50q155 -46 215 -122q16 -20 16.5 -51t-5.5 -54q-26 -167 -111 -655q-5 -30 -27 -56t-43.5 -40t-54.5 -31q-252 -126 -610 -88q-248 27 -394 139q-15 12 -25.5 26.5t-17 35t-9 34 t-6 39.5t-5.5 35q-9 50 -26.5 150t-28 161.5t-23.5 147.5t-22 158zM173 285l6 16l18 9q223 -148 506.5 -148t507.5 148q21 -6 24 -23t-5 -45t-8 -37q-8 -26 -15.5 -76.5t-14 -84t-28.5 -70t-58 -56.5q-86 -48 -189.5 -71.5t-202 -22t-201.5 18.5q-46 8 -81.5 18t-76.5 27 t-73 43.5t-52 61.5q-25 96 -57 292zM243 1240q30 -28 76 -45.5t73.5 -22t87.5 -11.5q228 -29 448 -1q63 8 89.5 12t72.5 21.5t75 46.5q-20 27 -56 44.5t-58 22t-71 12.5q-291 47 -566 -2q-43 -7 -66 -12t-55 -22t-50 -43zM481 657q4 -91 77.5 -155t165.5 -56q91 8 152 84 t50 168q-14 107 -113 164t-197 13q-63 -28 -100.5 -88.5t-34.5 -129.5zM599 710q14 41 52 58q36 18 72.5 12t64 -35.5t27.5 -67.5q8 -63 -50.5 -101t-111.5 -6q-39 17 -53.5 58t-0.5 82z" />
+<glyph unicode="&#xf172;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM260 1060q8 -68 19 -138t29 -171t24 -137q1 -5 5 -31t7 -36t12 -27t22 -28q105 -80 284 -100q259 -28 440 63 q24 13 39.5 23t31 29t19.5 40q48 267 80 473q9 53 -8 75q-43 55 -155 88q-216 63 -487 36q-132 -12 -226 -46q-38 -15 -59.5 -25t-47 -34t-29.5 -54zM385 384q26 -154 41 -210q47 -81 204 -108q249 -46 428 53q34 19 49 51.5t22.5 85.5t12.5 71q0 7 5.5 26.5t3 32 t-17.5 16.5q-161 -106 -365 -106t-366 106l-12 -6zM436 1073q13 19 36 31t40 15.5t47 8.5q198 35 408 1q33 -5 51 -8.5t43 -16t39 -31.5q-20 -21 -53.5 -34t-53 -16t-63.5 -8q-155 -20 -324 0q-44 6 -63 9.5t-52.5 16t-54.5 32.5zM607 653q-2 49 25.5 93t72.5 64 q70 31 141.5 -10t81.5 -118q8 -66 -36 -121t-110 -61t-119 40t-56 113zM687.5 660.5q0.5 -52.5 43.5 -70.5q39 -23 81 4t36 72q0 43 -41 66t-77 1q-43 -20 -42.5 -72.5z" />
+<glyph unicode="&#xf173;" horiz-adv-x="1024" d="M78 779v217q91 30 155 84q64 55 103 132q39 78 54 196h219v-388h364v-241h-364v-394q0 -136 14 -172q13 -37 52 -60q50 -31 117 -31q117 0 232 76v-242q-102 -48 -178 -65q-77 -19 -173 -19q-105 0 -186 27q-78 25 -138 75q-58 51 -79 105q-22 54 -22 161v539h-170z" />
+<glyph unicode="&#xf174;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM413 744h127v-404q0 -78 17 -121q17 -42 59 -78q43 -37 104 -57q62 -20 140 -20q67 0 129 14q57 13 134 49v181 q-88 -56 -174 -56q-51 0 -88 23q-29 17 -39 45q-11 30 -11 129v295h274v181h-274v291h-164q-11 -90 -40 -147t-78 -99q-48 -40 -116 -63v-163z" />
+<glyph unicode="&#xf175;" horiz-adv-x="768" d="M3 237q9 19 29 19h224v1248q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1248h224q21 0 29 -19t-5 -35l-350 -384q-10 -10 -23 -10q-14 0 -24 10l-355 384q-13 16 -5 35z" />
+<glyph unicode="&#xf176;" horiz-adv-x="768" d="M3 1043q-8 19 5 35l350 384q10 10 23 10q14 0 24 -10l355 -384q13 -16 5 -35q-9 -19 -29 -19h-224v-1248q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v1248h-224q-21 0 -29 19z" />
+<glyph unicode="&#xf177;" horiz-adv-x="1792" d="M64 637q0 14 10 24l384 354q16 14 35 6q19 -9 19 -29v-224h1248q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-1248v-224q0 -21 -19 -29t-35 5l-384 350q-10 10 -10 23z" />
+<glyph unicode="&#xf178;" horiz-adv-x="1792" d="M0 544v192q0 14 9 23t23 9h1248v224q0 21 19 29t35 -5l384 -350q10 -10 10 -23q0 -14 -10 -24l-384 -354q-16 -14 -35 -6q-19 9 -19 29v224h-1248q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf179;" horiz-adv-x="1408" d="M0 634q0 228 113 374q112 144 284 144q72 0 177 -30q104 -30 138 -30q45 0 143 34q102 34 173 34q119 0 213 -65q52 -36 104 -100q-79 -67 -114 -118q-65 -94 -65 -207q0 -124 69 -223t158 -126q-39 -125 -123 -250q-129 -196 -257 -196q-49 0 -140 32q-86 32 -151 32 q-61 0 -142 -33q-81 -34 -132 -34q-152 0 -301 259q-147 261 -147 503zM683 1131q3 149 78 257q74 107 250 148q1 -3 2.5 -11t2.5 -11q0 -4 0.5 -10t0.5 -10q0 -61 -29 -136q-30 -75 -93 -138q-54 -54 -108 -72q-37 -11 -104 -17z" />
+<glyph unicode="&#xf17a;" horiz-adv-x="1664" d="M0 -27v557h682v-651zM0 614v565l682 94v-659h-682zM757 -131v661h907v-786zM757 614v669l907 125v-794h-907z" />
+<glyph unicode="&#xf17b;" horiz-adv-x="1408" d="M0 337v430q0 42 30 72t73 30q42 0 72 -30t30 -72v-430q0 -43 -29.5 -73t-72.5 -30t-73 30t-30 73zM241 886q0 117 64 215.5t172 153.5l-71 131q-7 13 5 20q13 6 20 -6l72 -132q95 42 201 42t201 -42l72 132q7 12 20 6q12 -7 5 -20l-71 -131q107 -55 171 -153.5t64 -215.5 h-925zM245 184v666h918v-666q0 -46 -32 -78t-77 -32h-75v-227q0 -43 -30 -73t-73 -30t-73 30t-30 73v227h-138v-227q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73l-1 227h-74q-46 0 -78 32t-32 78zM455 1092q0 -16 11 -27.5t27 -11.5t27.5 11.5t11.5 27.5t-11.5 27.5 t-27.5 11.5t-27 -11.5t-11 -27.5zM876 1092q0 -16 11.5 -27.5t27.5 -11.5t27 11.5t11 27.5t-11 27.5t-27 11.5t-27.5 -11.5t-11.5 -27.5zM1203 337v430q0 43 30 72.5t72 29.5q43 0 73 -29.5t30 -72.5v-430q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73z" />
+<glyph unicode="&#xf17c;" d="M11 -115q-10 23 7 66.5t18 54.5q1 16 -4 40t-10 42.5t-4.5 36.5t10.5 27q14 12 57 14t60 12q30 18 42 35t12 51q21 -73 -32 -106q-32 -20 -83 -15q-34 3 -43 -10q-13 -15 5 -57q2 -6 8 -18t8.5 -18t4.5 -17t1 -22q0 -15 -17 -49t-14 -48q3 -17 37 -26q20 -6 84.5 -18.5 t99.5 -20.5q24 -6 74 -22t82.5 -23t55.5 -4q43 6 64.5 28t23 48t-7.5 58.5t-19 52t-20 36.5q-121 190 -169 242q-68 74 -113 40q-11 -9 -15 15q-3 16 -2 38q1 29 10 52t24 47t22 42q8 21 26.5 72t29.5 78t30 61t39 54q110 143 124 195q-12 112 -16 310q-2 90 24 151.5 t106 104.5q39 21 104 21q53 1 106 -13.5t89 -41.5q57 -42 91.5 -121.5t29.5 -147.5q-5 -95 30 -214q34 -113 133 -218q55 -59 99.5 -163t59.5 -191q8 -49 5 -84.5t-12 -55.5t-20 -22q-10 -2 -23.5 -19t-27 -35.5t-40.5 -33.5t-61 -14q-18 1 -31.5 5t-22.5 13.5t-13.5 15.5 t-11.5 20.5t-9 19.5q-22 37 -41 30t-28 -49t7 -97q20 -70 1 -195q-10 -65 18 -100.5t73 -33t85 35.5q59 49 89.5 66.5t103.5 42.5q53 18 77 36.5t18.5 34.5t-25 28.5t-51.5 23.5q-33 11 -49.5 48t-15 72.5t15.5 47.5q1 -31 8 -56.5t14.5 -40.5t20.5 -28.5t21 -19t21.5 -13 t16.5 -9.5q20 -12 31 -24.5t12 -24t-2.5 -22.5t-15.5 -22t-23.5 -19.5t-30 -18.5t-31.5 -16.5t-32 -15.5t-27 -13q-38 -19 -85.5 -56t-75.5 -64q-17 -16 -68 -19.5t-89 14.5q-18 9 -29.5 23.5t-16.5 25.5t-22 19.5t-47 9.5q-44 1 -130 1q-19 0 -57 -1.5t-58 -2.5 q-44 -1 -79.5 -15t-53.5 -30t-43.5 -28.5t-53.5 -11.5q-29 1 -111 31t-146 43q-19 4 -51 9.5t-50 9t-39.5 9.5t-33.5 14.5t-17 19.5zM321 495q-36 -65 10 -166q5 -12 25 -28t24 -20q20 -23 104 -90.5t93 -76.5q16 -15 17.5 -38t-14 -43t-45.5 -23q8 -15 29 -44.5t28 -54 t7 -70.5q46 24 7 92q-4 8 -10.5 16t-9.5 12t-2 6q3 5 13 9.5t20 -2.5q46 -52 166 -36q133 15 177 87q23 38 34 30q12 -6 10 -52q-1 -25 -23 -92q-9 -23 -6 -37.5t24 -15.5q3 19 14.5 77t13.5 90q2 21 -6.5 73.5t-7.5 97t23 70.5q15 18 51 18q1 37 34.5 53t72.5 10.5 t60 -22.5q0 18 -55 42q4 15 7.5 27.5t5 26t3 21.5t0.5 22.5t-1 19.5t-3.5 22t-4 20.5t-5 25t-5.5 26.5q-10 48 -47 103t-72 75q24 -20 57 -83q87 -162 54 -278q-11 -40 -50 -42q-31 -4 -38.5 18.5t-8 83.5t-11.5 107q-9 39 -19.5 69t-19.5 45.5t-15.5 24.5t-13 15t-7.5 7 q-14 62 -31 103t-29.5 56t-23.5 33t-15 40q-4 21 6 53.5t4.5 49.5t-44.5 25q-15 3 -44.5 18t-35.5 16q-8 1 -11 26t8 51t36 27q37 3 51 -30t4 -58q-11 -19 -2 -26.5t30 -0.5q13 4 13 36v37q-5 30 -13.5 50t-21 30.5t-23.5 15t-27 7.5q-107 -8 -89 -134q0 -15 -1 -15 q-9 9 -29.5 10.5t-33 -0.5t-15.5 5q1 57 -16 90t-45 34q-27 1 -41.5 -27.5t-16.5 -59.5q-1 -15 3.5 -37t13 -37.5t15.5 -13.5q10 3 16 14q4 9 -7 8q-7 0 -15.5 14.5t-9.5 33.5q-1 22 9 37t34 14q17 0 27 -21t9.5 -39t-1.5 -22q-22 -15 -31 -29q-8 -12 -27.5 -23.5 t-20.5 -12.5q-13 -14 -15.5 -27t7.5 -18q14 -8 25 -19.5t16 -19t18.5 -13t35.5 -6.5q47 -2 102 15q2 1 23 7t34.5 10.5t29.5 13t21 17.5q9 14 20 8q5 -3 6.5 -8.5t-3 -12t-16.5 -9.5q-20 -6 -56.5 -21.5t-45.5 -19.5q-44 -19 -70 -23q-25 -5 -79 2q-10 2 -9 -2t17 -19 q25 -23 67 -22q17 1 36 7t36 14t33.5 17.5t30 17t24.5 12t17.5 2.5t8.5 -11q0 -2 -1 -4.5t-4 -5t-6 -4.5t-8.5 -5t-9 -4.5t-10 -5t-9.5 -4.5q-28 -14 -67.5 -44t-66.5 -43t-49 -1q-21 11 -63 73q-22 31 -25 22q-1 -3 -1 -10q0 -25 -15 -56.5t-29.5 -55.5t-21 -58t11.5 -63 q-23 -6 -62.5 -90t-47.5 -141q-2 -18 -1.5 -69t-5.5 -59q-8 -24 -29 -3q-32 31 -36 94q-2 28 4 56q4 19 -1 18zM372 630q4 -1 12.5 7t12.5 18q1 3 2 7t2 6t1.5 4.5t0.5 4v3t-1 2.5t-3 2q-4 1 -6 -3t-4.5 -12.5t-5.5 -13.5t-10 -13q-7 -10 -1 -12zM603 1190q2 -5 5 -6 q10 0 7 -15q-3 -20 8 -20q3 0 3 3q3 17 -2.5 30t-11.5 15q-9 2 -9 -7zM634 1110q0 12 19 15h10q-11 -1 -15.5 -10.5t-8.5 -9.5q-5 -1 -5 5zM721 1122q24 11 32 -2q3 -6 -3 -9q-4 -1 -11.5 6.5t-17.5 4.5zM835 1196l4 -2q14 -4 18 -31q0 -3 8 2l2 3q0 11 -5 19.5t-11 12.5 t-9 3q-14 -1 -7 -7zM851 1381.5q-1 -2.5 3 -8.5q4 -3 8 0t11 9t15 9q1 1 9 1t15 2t9 7q0 2 -2.5 5t-9 7t-9.5 6q-15 15 -24 15q-9 -1 -11.5 -7.5t-1 -13t-0.5 -12.5q-1 -4 -6 -10.5t-6 -9zM981 1002q-14 -16 7 -43.5t39 -31.5q9 -1 14.5 8t3.5 20q-2 8 -6.5 11.5t-13 5 t-14.5 5.5q-5 3 -9.5 8t-7 8t-5.5 6.5t-4 4t-4 -1.5z" />
+<glyph unicode="&#xf17d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM112 640q0 -124 44 -236.5t124 -201.5q50 89 123.5 166.5t142.5 124.5t130.5 81t99.5 48l37 13 q4 1 13 3.5t13 4.5q-21 49 -53 111q-311 -93 -673 -93q-1 -7 -1 -21zM126 775q302 0 606 80q-120 213 -244 378q-138 -65 -234 -186t-128 -272zM350 134q184 -150 418 -150q132 0 256 52q-42 241 -140 498h-2l-2 -1q-16 -6 -43 -16.5t-101 -49t-137 -82t-131 -114.5 t-103 -148zM609 1276q1 1 2 1q-1 0 -2 -1zM613 1277q131 -170 246 -382q69 26 130 60.5t96.5 61.5t65.5 57t37.5 40.5l12.5 17.5q-185 164 -433 164q-76 0 -155 -19zM909 797q25 -53 44 -95q2 -6 6.5 -17.5t7.5 -16.5q36 5 74.5 7t73.5 2t69 -1.5t64 -4t56.5 -5.5t48 -6.5 t36.5 -6t25 -4.5l10 -2q-3 232 -149 410l-1 -1q-9 -12 -19 -24.5t-43.5 -44.5t-71 -60.5t-100 -65t-131.5 -64.5zM1007 565q87 -239 128 -469q111 75 185 189.5t96 250.5q-210 60 -409 29z" />
+<glyph unicode="&#xf17e;" d="M0 1024q0 159 112.5 271.5t271.5 112.5q130 0 234 -80q77 16 150 16q143 0 273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -73 -16 -150q80 -104 80 -234q0 -159 -112.5 -271.5t-271.5 -112.5q-130 0 -234 80q-77 -16 -150 -16q-143 0 -273.5 55.5t-225 150t-150 225 t-55.5 273.5q0 73 16 150q-80 104 -80 234zM376 399q0 -92 122 -157.5t291 -65.5q73 0 140 18.5t122.5 53.5t88.5 93.5t33 131.5q0 50 -19.5 91.5t-48.5 68.5t-73 49t-82.5 34t-87.5 23l-104 24q-30 7 -44 10.5t-35 11.5t-30 16t-16.5 21t-7.5 30q0 77 144 77q43 0 77 -12 t54 -28.5t38 -33.5t40 -29t48 -12q47 0 75.5 32t28.5 77q0 55 -56 99.5t-142 67.5t-182 23q-68 0 -132 -15.5t-119.5 -47t-89 -87t-33.5 -128.5q0 -61 19 -106.5t56 -75.5t80 -48.5t103 -32.5l146 -36q90 -22 112 -36q32 -20 32 -60q0 -39 -40 -64.5t-105 -25.5 q-51 0 -91.5 16t-65 38.5t-45.5 45t-46 38.5t-54 16q-50 0 -75.5 -30t-25.5 -75z" />
+<glyph unicode="&#xf180;" horiz-adv-x="1664" d="M0 640q0 75 53 128l587 587q53 53 128 53t128 -53l265 -265l-398 -399l-188 188q-42 42 -99 42q-59 0 -100 -41l-120 -121q-42 -40 -42 -99q0 -58 42 -100l406 -408q30 -28 67 -37l6 -4h28q60 0 99 41l619 619l2 -3q53 -53 53 -128t-53 -128l-587 -587 q-52 -53 -127.5 -53t-128.5 53l-587 587q-53 53 -53 128zM302 660q0 21 14 35l121 120q13 15 35 15t36 -15l252 -252l574 575q15 15 36 15t36 -15l120 -120q14 -15 14 -36t-14 -36l-730 -730q-17 -15 -37 -15q-4 0 -6 1q-18 2 -30 14l-407 408q-14 15 -14 36z" />
+<glyph unicode="&#xf181;" d="M0 -64v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM160 192q0 -14 9 -23t23 -9h480q14 0 23 9t9 23v1024q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-1024zM832 576q0 -14 9 -23t23 -9h480q14 0 23 9t9 23 v640q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-640z" />
+<glyph unicode="&#xf182;" horiz-adv-x="1280" d="M0 480q0 29 16 53l256 384q73 107 176 107h384q103 0 176 -107l256 -384q16 -24 16 -53q0 -40 -28 -68t-68 -28q-51 0 -80 43l-227 341h-45v-132l247 -411q9 -15 9 -33q0 -26 -19 -45t-45 -19h-192v-272q0 -46 -33 -79t-79 -33h-160q-46 0 -79 33t-33 79v272h-192 q-26 0 -45 19t-19 45q0 18 9 33l247 411v132h-45l-227 -341q-29 -43 -80 -43q-40 0 -68 28t-28 68zM416 1280q0 93 65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf183;" horiz-adv-x="1024" d="M0 416v416q0 80 56 136t136 56h640q80 0 136 -56t56 -136v-416q0 -40 -28 -68t-68 -28t-68 28t-28 68v352h-64v-912q0 -46 -33 -79t-79 -33t-79 33t-33 79v464h-64v-464q0 -46 -33 -79t-79 -33t-79 33t-33 79v912h-64v-352q0 -40 -28 -68t-68 -28t-68 28t-28 68z M288 1280q0 93 65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf184;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM399.5 766q8.5 -37 24.5 -59l349 -473l350 473q16 22 24.5 59t-6 85t-61.5 79q-40 26 -83 25.5 t-73.5 -17.5t-54.5 -45q-36 -40 -96 -40q-59 0 -95 40q-24 28 -54.5 45t-73.5 17.5t-84 -25.5q-46 -31 -60.5 -79t-6 -85z" />
+<glyph unicode="&#xf185;" horiz-adv-x="1792" d="M44 363q-5 17 4 29l180 248l-180 248q-9 13 -4 29q4 15 20 20l292 96v306q0 16 13 26q15 10 29 4l292 -94l180 248q9 12 26 12t26 -12l180 -248l292 94q14 6 29 -4q13 -10 13 -26v-306l292 -96q16 -5 20 -20q5 -16 -4 -29l-180 -248l180 -248q9 -12 4 -29q-4 -15 -20 -20 l-292 -96v-306q0 -16 -13 -26q-15 -10 -29 -4l-292 94l-180 -248q-10 -13 -26 -13t-26 13l-180 248l-292 -94q-14 -6 -29 4q-13 10 -13 26v306l-292 96q-16 5 -20 20zM320 640q0 -117 45.5 -223.5t123 -184t184 -123t223.5 -45.5t223.5 45.5t184 123t123 184t45.5 223.5 t-45.5 223.5t-123 184t-184 123t-223.5 45.5t-223.5 -45.5t-184 -123t-123 -184t-45.5 -223.5z" />
+<glyph unicode="&#xf186;" d="M0 640q0 153 57.5 292.5t156 241.5t235.5 164.5t290 68.5q44 2 61 -39q18 -41 -15 -72q-86 -78 -131.5 -181.5t-45.5 -218.5q0 -148 73 -273t198 -198t273 -73q118 0 228 51q41 18 72 -13q14 -14 17.5 -34t-4.5 -38q-94 -203 -283.5 -324.5t-413.5 -121.5q-156 0 -298 61 t-245 164t-164 245t-61 298zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51q144 0 273.5 61.5t220.5 171.5q-54 -9 -110 -9q-182 0 -337 90t-245 245t-90 337q0 192 104 357q-201 -60 -328.5 -229t-127.5 -384z" />
+<glyph unicode="&#xf187;" horiz-adv-x="1792" d="M64 1088v256q0 26 19 45t45 19h1536q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM128 -64v960q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-960q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM704 704q0 -26 19 -45t45 -19h256 q26 0 45 19t19 45t-19 45t-45 19h-256q-26 0 -45 -19t-19 -45z" />
+<glyph unicode="&#xf188;" horiz-adv-x="1664" d="M32 576q0 26 19 45t45 19h224v294l-173 173q-19 19 -19 45t19 45t45 19t45 -19l173 -173h844l173 173q19 19 45 19t45 -19t19 -45t-19 -45l-173 -173v-294h224q26 0 45 -19t19 -45t-19 -45t-45 -19h-224q0 -171 -67 -290l208 -209q19 -19 19 -45t-19 -45q-18 -19 -45 -19 t-45 19l-198 197q-5 -5 -15 -13t-42 -28.5t-65 -36.5t-82 -29t-97 -13v896h-128v-896q-51 0 -101.5 13.5t-87 33t-66 39t-43.5 32.5l-15 14l-183 -207q-20 -21 -48 -21q-24 0 -43 16q-19 18 -20.5 44.5t15.5 46.5l202 227q-58 114 -58 274h-224q-26 0 -45 19t-19 45z M512 1152q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5h-640z" />
+<glyph unicode="&#xf189;" horiz-adv-x="1920" d="M-1 1004q0 11 3 16l4 6q15 19 57 19l274 2q12 -2 23 -6.5t16 -8.5l5 -3q16 -11 24 -32q20 -50 46 -103.5t41 -81.5l16 -29q29 -60 56 -104t48.5 -68.5t41.5 -38.5t34 -14t27 5q2 1 5 5t12 22t13.5 47t9.5 81t0 125q-2 40 -9 73t-14 46l-6 12q-25 34 -85 43q-13 2 5 24 q17 19 38 30q53 26 239 24q82 -1 135 -13q20 -5 33.5 -13.5t20.5 -24t10.5 -32t3.5 -45.5t-1 -55t-2.5 -70.5t-1.5 -82.5q0 -11 -1 -42t-0.5 -48t3.5 -40.5t11.5 -39t22.5 -24.5q8 -2 17 -4t26 11t38 34.5t52 67t68 107.5q60 104 107 225q4 10 10 17.5t11 10.5l4 3l5 2.5 t13 3t20 0.5l288 2q39 5 64 -2.5t31 -16.5l6 -10q23 -64 -150 -294q-24 -32 -65 -85q-78 -100 -90 -131q-17 -41 14 -81q17 -21 81 -82h1l1 -1l1 -1l2 -2q141 -131 191 -221q3 -5 6.5 -12.5t7 -26.5t-0.5 -34t-25 -27.5t-59 -12.5l-256 -4q-24 -5 -56 5t-52 22l-20 12 q-30 21 -70 64t-68.5 77.5t-61 58t-56.5 15.5q-3 -1 -8 -3.5t-17 -14.5t-21.5 -29.5t-17 -52t-6.5 -77.5q0 -15 -3.5 -27.5t-7.5 -18.5l-4 -5q-18 -19 -53 -22h-115q-71 -4 -146 16.5t-131.5 53t-103 66t-70.5 57.5l-25 24q-10 10 -27.5 30t-71.5 91t-106 151t-122.5 211 t-130.5 272q-6 16 -6 27z" />
+<glyph unicode="&#xf18a;" horiz-adv-x="1792" d="M0 391q0 115 69.5 245t197.5 258q169 169 341.5 236t246.5 -7q65 -64 20 -209q-4 -14 -1 -20t10 -7t14.5 0.5t13.5 3.5l6 2q139 59 246 59t153 -61q45 -63 0 -178q-2 -13 -4.5 -20t4.5 -12.5t12 -7.5t17 -6q57 -18 103 -47t80 -81.5t34 -116.5q0 -68 -37 -139.5 t-109 -137t-168.5 -117.5t-226 -83t-270.5 -31t-275 33.5t-240.5 93t-171.5 151t-65 199.5zM181 320q9 -96 89 -170t208.5 -109t274.5 -21q223 23 369.5 141.5t132.5 264.5q-9 96 -89 170t-208.5 109t-274.5 21q-223 -23 -369.5 -141.5t-132.5 -264.5zM413.5 230.5 q-40.5 92.5 6.5 187.5q47 93 151.5 139t210.5 19q111 -29 158.5 -119.5t2.5 -190.5q-45 -102 -158 -150t-224 -12q-107 34 -147.5 126.5zM495 257.5q9 -34.5 43 -50.5t74.5 -2.5t62.5 47.5q21 34 11 69t-45 50q-34 14 -73 1t-60 -46q-22 -34 -13 -68.5zM705 399 q-17 -31 13 -45q14 -5 29 0.5t22 18.5q8 13 3.5 26.5t-17.5 18.5q-14 5 -28.5 -0.5t-21.5 -18.5zM1165 1274q-6 28 9.5 51.5t43.5 29.5q123 26 244 -11.5t208 -134.5q87 -96 112.5 -222.5t-13.5 -241.5q-9 -27 -34 -40t-52 -4t-40 34t-5 52q28 82 10 172t-80 158 q-62 69 -148 95.5t-173 8.5q-28 -6 -52 9.5t-30 43.5zM1224 1047q-5 24 8 44.5t37 25.5q60 13 119 -5.5t101 -65.5t54.5 -108.5t-6.5 -117.5q-8 -23 -29.5 -34t-44.5 -4q-23 8 -34 29.5t-4 44.5q20 63 -24 111t-107 35q-24 -5 -45 8t-25 37z" />
+<glyph unicode="&#xf18b;" d="M0 638q0 187 83.5 349.5t229.5 269.5t325 137v-485q0 -252 -126.5 -459.5t-330.5 -306.5q-181 215 -181 495zM398 -34q138 87 235.5 211t131.5 268q35 -144 132.5 -268t235.5 -211q-171 -94 -368 -94q-196 0 -367 94zM898 909v485q179 -30 325 -137t229.5 -269.5 t83.5 -349.5q0 -280 -181 -495q-204 99 -330.5 306.5t-126.5 459.5z" />
+<glyph unicode="&#xf18c;" horiz-adv-x="1408" d="M0 -211q0 19 13 31.5t32 12.5q173 1 322.5 107.5t251.5 294.5q-36 -14 -72 -23t-83 -13t-91 2.5t-93 28.5t-92 59t-84.5 100t-74.5 146q114 47 214 57t167.5 -7.5t124.5 -56.5t88.5 -77t56.5 -82q53 131 79 291q-7 -1 -18 -2.5t-46.5 -2.5t-69.5 0.5t-81.5 10t-88.5 23 t-84 42.5t-75 65t-54.5 94.5t-28.5 127.5q70 28 133.5 36.5t112.5 -1t92 -30t73.5 -50t56 -61t42 -63t27.5 -56t16 -39.5l4 -16q12 122 12 195q-8 6 -21.5 16t-49 44.5t-63.5 71.5t-54 93t-33 112.5t12 127t70 138.5q73 -25 127.5 -61.5t84.5 -76.5t48 -85t20.5 -89 t-0.5 -85.5t-13 -76.5t-19 -62t-17 -42l-7 -15q1 -5 1 -50.5t-1 -71.5q3 7 10 18.5t30.5 43t50.5 58t71 55.5t91.5 44.5t112 14.5t132.5 -24q-2 -78 -21.5 -141.5t-50 -104.5t-69.5 -71.5t-81.5 -45.5t-84.5 -24t-80 -9.5t-67.5 1t-46.5 4.5l-17 3q-23 -147 -73 -283 q6 7 18 18.5t49.5 41t77.5 52.5t99.5 42t117.5 20t129 -23.5t137 -77.5q-32 -80 -76 -138t-91 -88.5t-99 -46.5t-101.5 -14.5t-96.5 8.5t-86.5 22t-69.5 27.5t-46 22.5l-17 10q-113 -228 -289.5 -359.5t-384.5 -132.5q-19 0 -32 13t-13 32z" />
+<glyph unicode="&#xf18d;" horiz-adv-x="1280" d="M21 217v66h1238v-66q0 -85 -57.5 -144.5t-138.5 -59.5h-57l-260 -269v269h-529q-81 0 -138.5 59.5t-57.5 144.5zM21 354v255h1238v-255h-1238zM21 682v255h1238v-255h-1238zM21 1010v67q0 84 57.5 143.5t138.5 59.5h846q81 0 138.5 -59.5t57.5 -143.5v-67h-1238z" />
+<glyph unicode="&#xf18e;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM384 544v192q0 13 9.5 22.5t22.5 9.5h352v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-352q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf190;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM384 640q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h352q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-352v-192q0 -14 -9 -23t-23 -9q-12 0 -24 10l-319 319q-9 9 -9 23z" />
+<glyph unicode="&#xf191;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM448 640q0 33 27 52l448 320q17 12 37 12q26 0 45 -19t19 -45v-640q0 -26 -19 -45t-45 -19q-20 0 -37 12l-448 320q-27 19 -27 52z" />
+<glyph unicode="&#xf192;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 640q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181z" />
+<glyph unicode="&#xf193;" horiz-adv-x="1664" d="M0 320q0 181 104.5 330t274.5 211l17 -131q-122 -54 -195 -165.5t-73 -244.5q0 -185 131.5 -316.5t316.5 -131.5q126 0 232.5 65t165 175.5t49.5 236.5l102 -204q-58 -179 -210 -290t-339 -111q-156 0 -288.5 77.5t-210 210t-77.5 288.5zM416 1348q-2 16 6 42 q14 51 57 82.5t97 31.5q66 0 113 -47t47 -113q0 -69 -52 -117.5t-120 -41.5l37 -289h423v-128h-407l16 -128h455q40 0 57 -35l228 -455l198 99l58 -114l-256 -128q-13 -7 -29 -7q-40 0 -57 35l-239 477h-472q-24 0 -42.5 16.5t-21.5 40.5z" />
+<glyph unicode="&#xf194;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 806q16 -8 25.5 -26t21.5 -20q21 -3 54.5 8.5t58 10.5t41.5 -30q11 -18 18.5 -38.5t15 -48t12.5 -40.5 q17 -46 53 -187q36 -146 57 -197q42 -99 103 -125q43 -12 85 -1.5t76 31.5q131 77 250 237q104 139 172.5 292.5t82.5 226.5q16 85 -21 132q-52 65 -187 45q-17 -3 -41 -12.5t-57.5 -30.5t-64.5 -48.5t-59.5 -70t-44.5 -91.5q80 7 113.5 -16t26.5 -99q-5 -52 -52 -143 q-43 -78 -71 -99q-44 -32 -87 14q-23 24 -37.5 64.5t-19 73t-10 84t-8.5 71.5q-23 129 -34 164q-12 37 -35.5 69t-50.5 40q-57 16 -127 -25q-54 -32 -136.5 -106t-122.5 -102v-7z" />
+<glyph unicode="&#xf195;" horiz-adv-x="1152" d="M0 608v128q0 23 23 31l233 71v93l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26v128q0 23 23 31l233 71v250q0 14 9 23t23 9h160q14 0 23 -9t9 -23v-181l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31l-393 -121v-93l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31 l-393 -121v-487q188 13 318 151t130 328q0 14 9 23t23 9h160q14 0 23 -9t9 -23q0 -191 -94.5 -353t-256.5 -256.5t-353 -94.5h-160q-14 0 -23 9t-9 23v611l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26z" />
+<glyph unicode="&#xf196;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832zM256 672v64q0 14 9 23t23 9h352v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-352h352q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-352v-352q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v352h-352q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf197;" horiz-adv-x="2176" d="M0 576q0 12 38.5 20.5t96.5 10.5q-7 25 -7 49q0 33 9.5 56.5t22.5 23.5h64v64h128q158 0 268 -64h1113q42 -7 106.5 -18t80.5 -14q89 -15 150 -40.5t83.5 -47.5t22.5 -40t-22.5 -40t-83.5 -47.5t-150 -40.5q-16 -3 -80.5 -14t-106.5 -18h-1113q-110 -64 -268 -64h-128v64 h-64q-13 0 -22.5 23.5t-9.5 56.5q0 24 7 49q-58 2 -96.5 10.5t-38.5 20.5zM323 336h29q157 0 273 64h1015q-217 -38 -456 -80q-57 0 -113 -24t-83 -48l-28 -24l-288 -288q-26 -26 -70.5 -45t-89.5 -19h-96zM323 816l93 464h96q46 0 90 -19t70 -45l288 -288q4 -4 11 -10.5 t30.5 -23t48.5 -29t61.5 -23t72.5 -10.5l456 -80h-1015q-116 64 -273 64h-29zM1739 484l81 -30q68 48 68 122t-68 122l-81 -30q53 -36 53 -92t-53 -92z" />
+<glyph unicode="&#xf198;" horiz-adv-x="1664" d="M0 796q0 47 27.5 85t71.5 53l157 53l-53 159q-8 24 -8 47q0 60 42 102.5t102 42.5q47 0 85 -27t53 -72l54 -160l310 105l-54 160q-8 24 -8 47q0 59 42.5 102t101.5 43q47 0 85.5 -27.5t53.5 -71.5l53 -161l162 55q21 6 43 6q60 0 102.5 -39.5t42.5 -98.5q0 -45 -30 -81.5 t-74 -51.5l-157 -54l105 -316l164 56q24 8 46 8q62 0 103.5 -40.5t41.5 -101.5q0 -97 -93 -130l-172 -59l56 -167q7 -21 7 -47q0 -59 -42 -102t-101 -43q-47 0 -85.5 27t-53.5 72l-55 165l-310 -106l55 -164q8 -24 8 -47q0 -59 -42 -102t-102 -43q-47 0 -85 27t-53 72 l-55 163l-153 -53q-29 -9 -50 -9q-61 0 -101.5 40t-40.5 101q0 47 27.5 85t71.5 53l156 53l-105 313l-156 -54q-26 -8 -48 -8q-60 0 -101 40.5t-41 100.5zM620 811l105 -313l310 105l-105 315z" />
+<glyph unicode="&#xf199;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 352q0 -40 28 -68t68 -28h832q40 0 68 28t28 68v436q-31 -35 -64 -55q-34 -22 -132.5 -85t-151.5 -99 q-98 -69 -164 -69t-164 69q-46 32 -141.5 92.5t-142.5 92.5q-12 8 -33 27t-31 27v-436zM256 928q0 -37 30.5 -76.5t67.5 -64.5q47 -32 137.5 -89t129.5 -83q3 -2 17 -11.5t21 -14t21 -13t23.5 -13t21.5 -9.5t22.5 -7.5t20.5 -2.5t20.5 2.5t22.5 7.5t21.5 9.5t23.5 13t21 13 t21 14t17 11.5l267 174q35 23 66.5 62.5t31.5 73.5q0 41 -27.5 70t-68.5 29h-832q-40 0 -68 -28t-28 -68z" />
+<glyph unicode="&#xf19a;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM41 640q0 -173 68 -331.5t182.5 -273t273 -182.5t331.5 -68t331.5 68t273 182.5t182.5 273t68 331.5 t-68 331.5t-182.5 273t-273 182.5t-331.5 68t-331.5 -68t-273 -182.5t-182.5 -273t-68 -331.5zM127 640q0 163 67 313l367 -1005q-196 95 -315 281t-119 411zM254 1062q105 160 274.5 253.5t367.5 93.5q147 0 280.5 -53t238.5 -149h-10q-55 0 -92 -40.5t-37 -95.5 q0 -12 2 -24t4 -21.5t8 -23t9 -21t12 -22.5t12.5 -21t14.5 -24t14 -23q63 -107 63 -212q0 -19 -2.5 -38.5t-10 -49.5t-11.5 -44t-17.5 -59t-17.5 -58l-76 -256l-278 826q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-75 1 -202 10q-12 1 -20.5 -5t-11.5 -15 t-1.5 -18.5t9 -16.5t19.5 -8l80 -8l120 -328l-168 -504l-280 832q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-7 0 -23 0.5t-26 0.5zM679 -97l230 670l237 -647q1 -6 5 -11q-126 -44 -255 -44q-112 0 -217 32zM1282 -24l235 678q59 169 59 276q0 42 -6 79 q95 -174 95 -369q0 -209 -104 -385.5t-279 -278.5z" />
+<glyph unicode="&#xf19b;" horiz-adv-x="1792" d="M0 455q0 140 100.5 263.5t275 205.5t391.5 108v-172q-217 -38 -356.5 -150t-139.5 -255q0 -152 154.5 -267t388.5 -145v1360l272 133v-1536l-272 -128q-228 20 -414 102t-293 208.5t-107 272.5zM1134 860v172q277 -33 481 -157l140 79l37 -390l-525 114l147 83 q-119 70 -280 99z" />
+<glyph unicode="&#xf19c;" horiz-adv-x="2048" d="M0 -128q0 26 20.5 45t48.5 19h1782q28 0 48.5 -19t20.5 -45v-128h-1920v128zM0 1024v128l960 384l960 -384v-128h-128q0 -26 -20.5 -45t-48.5 -19h-1526q-28 0 -48.5 19t-20.5 45h-128zM128 0v64q0 26 20.5 45t48.5 19h59v768h256v-768h128v768h256v-768h128v768h256 v-768h128v768h256v-768h59q28 0 48.5 -19t20.5 -45v-64h-1664z" />
+<glyph unicode="&#xf19d;" horiz-adv-x="2304" d="M0 1024q0 23 22 31l1120 352q4 1 10 1t10 -1l1120 -352q22 -8 22 -31t-22 -31l-1120 -352q-4 -1 -10 -1t-10 1l-652 206q-43 -34 -71 -111.5t-34 -178.5q63 -36 63 -109q0 -69 -58 -107l58 -433q2 -14 -8 -25q-9 -11 -24 -11h-192q-15 0 -24 11q-10 11 -8 25l58 433 q-58 38 -58 107q0 73 65 111q11 207 98 330l-333 104q-22 8 -22 31zM512 384l18 316l574 -181q22 -7 48 -7t48 7l574 181l18 -316q4 -69 -82 -128t-235 -93.5t-323 -34.5t-323 34.5t-235 93.5t-82 128z" />
+<glyph unicode="&#xf19e;" d="M109 1536q58 -15 108 -15q43 0 111 15q63 -111 133.5 -229.5t167 -276.5t138.5 -227q37 61 109.5 177.5t117.5 190t105 176t107 189.5q54 -14 107 -14q56 0 114 14q-28 -39 -60 -88.5t-49.5 -78.5t-56.5 -96t-49 -84q-146 -248 -353 -610l13 -707q-62 11 -105 11 q-41 0 -105 -11l13 707q-40 69 -168.5 295.5t-216.5 374.5t-181 287z" />
+<glyph unicode="&#xf1a0;" horiz-adv-x="1280" d="M111 182q0 81 44.5 150t118.5 115q131 82 404 100q-32 41 -47.5 73.5t-15.5 73.5q0 40 21 85q-46 -4 -68 -4q-148 0 -249.5 96.5t-101.5 244.5q0 82 36 159t99 131q76 66 182 98t218 32h417l-137 -88h-132q75 -63 113 -133t38 -160q0 -72 -24.5 -129.5t-59.5 -93 t-69.5 -65t-59 -61.5t-24.5 -66q0 -36 32 -70.5t77 -68t90.5 -73.5t77.5 -104t32 -142q0 -91 -49 -173q-71 -122 -209.5 -179.5t-298.5 -57.5q-132 0 -246.5 41.5t-172.5 137.5q-36 59 -36 131zM297 228q0 -56 23.5 -102t61 -75.5t87 -50t100 -29t101.5 -8.5q58 0 111.5 13 t99 39t73 73t27.5 109q0 25 -7 49t-14.5 42t-27 41.5t-29.5 35t-38.5 34.5t-36.5 29t-41.5 30t-36.5 26q-16 2 -49 2q-53 0 -104.5 -7t-107 -25t-97 -46t-68.5 -74.5t-27 -105.5zM403 1222q0 -46 10 -97.5t31.5 -103t52 -92.5t75 -67t96.5 -26q37 0 77.5 16.5t65.5 43.5 q53 56 53 159q0 59 -17 125.5t-48 129t-84 103.5t-117 41q-42 0 -82.5 -19.5t-66.5 -52.5q-46 -59 -46 -160z" />
+<glyph unicode="&#xf1a1;" horiz-adv-x="1984" d="M0 722q0 94 66 160t160 66q83 0 148 -55q248 158 592 164l134 423q4 14 17.5 21.5t28.5 4.5l347 -82q22 50 68.5 81t102.5 31q77 0 131.5 -54.5t54.5 -131.5t-54.5 -132t-131.5 -55q-76 0 -130.5 54t-55.5 131l-315 74l-116 -366q327 -14 560 -166q64 58 151 58 q94 0 160 -66t66 -160q0 -62 -31 -114t-83 -82q5 -33 5 -61q0 -121 -68.5 -230.5t-197.5 -193.5q-125 -82 -285.5 -125.5t-335.5 -43.5q-176 0 -336.5 43.5t-284.5 125.5q-129 84 -197.5 193t-68.5 231q0 29 5 66q-48 31 -77 81.5t-29 109.5zM77 722q0 -67 51 -111 q49 131 180 235q-36 25 -82 25q-62 0 -105.5 -43.5t-43.5 -105.5zM178 465q0 -101 59.5 -194t171.5 -166q116 -75 265.5 -115.5t313.5 -40.5t313.5 40.5t265.5 115.5q112 73 171.5 166t59.5 194t-59.5 193.5t-171.5 165.5q-116 75 -265.5 115.5t-313.5 40.5t-313.5 -40.5 t-265.5 -115.5q-112 -73 -171.5 -165.5t-59.5 -193.5zM555 572q0 57 41.5 98t97.5 41t96.5 -41t40.5 -98q0 -56 -40.5 -96t-96.5 -40q-57 0 -98 40t-41 96zM661 209.5q0 16.5 11 27.5t27 11t27 -11q77 -77 265 -77h2q188 0 265 77q11 11 27 11t27 -11t11 -27.5t-11 -27.5 q-99 -99 -319 -99h-2q-220 0 -319 99q-11 11 -11 27.5zM1153 572q0 57 41.5 98t97.5 41t96.5 -41t40.5 -98q0 -56 -40.5 -96t-96.5 -40q-57 0 -98 40t-41 96zM1555 1350q0 -45 32 -77t77 -32t77 32t32 77t-32 77t-77 32t-77 -32t-32 -77zM1672 843q131 -105 178 -238 q57 46 57 117q0 62 -43.5 105.5t-105.5 43.5q-49 0 -86 -28z" />
+<glyph unicode="&#xf1a2;" d="M0 193v894q0 133 94 227t226 94h896q132 0 226 -94t94 -227v-894q0 -133 -94 -227t-226 -94h-896q-132 0 -226 94t-94 227zM155 709q0 -37 19.5 -67.5t52.5 -45.5q-7 -25 -7 -54q0 -98 74 -181.5t201.5 -132t278.5 -48.5q150 0 277.5 48.5t201.5 132t74 181.5q0 27 -6 54 q35 14 57 45.5t22 70.5q0 51 -36 87.5t-87 36.5q-60 0 -98 -48q-151 107 -375 115l83 265l206 -49q1 -50 36.5 -85t84.5 -35q50 0 86 35.5t36 85.5t-36 86t-86 36q-36 0 -66 -20.5t-45 -53.5l-227 54q-9 2 -17.5 -2.5t-11.5 -14.5l-95 -302q-224 -4 -381 -113q-36 43 -93 43 q-51 0 -87 -36.5t-36 -87.5zM493 613q0 37 26 63t63 26t63 -26t26 -63t-26 -64t-63 -27t-63 27t-26 64zM560 375q0 11 8 18q7 7 17.5 7t17.5 -7q49 -51 172 -51h1h1q122 0 173 51q7 7 17.5 7t17.5 -7t7 -18t-7 -18q-65 -64 -208 -64h-1h-1q-143 0 -207 64q-8 7 -8 18z M882 613q0 37 26 63t63 26t63 -26t26 -63t-26 -64t-63 -27t-63 27t-26 64zM1143 1120q0 30 21 51t50 21q30 0 51 -21t21 -51q0 -29 -21 -50t-51 -21q-29 0 -50 21t-21 50z" />
+<glyph unicode="&#xf1a3;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 502q0 -82 57.5 -139t139.5 -57q81 0 138.5 56.5t57.5 136.5v280q0 19 13.5 33t33.5 14 q19 0 32.5 -14t13.5 -33v-54l60 -28l90 27v62q0 79 -58 135t-138 56t-138 -55.5t-58 -134.5v-283q0 -20 -14 -33.5t-33 -13.5t-32.5 13.5t-13.5 33.5v120h-151v-122zM806 500q0 -80 58 -137t139 -57t138.5 57t57.5 139v122h-150v-126q0 -20 -13.5 -33.5t-33.5 -13.5 q-19 0 -32.5 14t-13.5 33v123l-90 -26l-60 28v-123z" />
+<glyph unicode="&#xf1a4;" horiz-adv-x="1920" d="M0 336v266h328v-262q0 -43 30 -72.5t72 -29.5t72 29.5t30 72.5v620q0 171 126.5 292t301.5 121q176 0 302 -122t126 -294v-136l-195 -58l-131 61v118q0 42 -30 72t-72 30t-72 -30t-30 -72v-612q0 -175 -126 -299t-303 -124q-178 0 -303.5 125.5t-125.5 303.5zM1062 332 v268l131 -61l195 58v-270q0 -42 30 -71.5t72 -29.5t72 29.5t30 71.5v275h328v-266q0 -178 -125.5 -303.5t-303.5 -125.5q-177 0 -303 124.5t-126 300.5z" />
+<glyph unicode="&#xf1a5;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM64 640h704v-704h480q93 0 158.5 65.5t65.5 158.5v480h-704v704h-480q-93 0 -158.5 -65.5t-65.5 -158.5v-480z " />
+<glyph unicode="&#xf1a6;" horiz-adv-x="2048" d="M0 271v697h328v286h204v-983h-532zM205 435h123v369h-123v-369zM614 271h205v697h-205v-697zM614 1050h205v204h-205v-204zM901 26v163h328v82h-328v697h533v-942h-533zM1106 435h123v369h-123v-369zM1516 26v163h327v82h-327v697h532v-942h-532zM1720 435h123v369h-123 v-369z" />
+<glyph unicode="&#xf1a7;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM293 388l211 41v206q55 -19 116 -19q125 0 213.5 95t88.5 229t-88.5 229t-213.5 95q-74 0 -141 -36h-186v-840z M504 804v277q28 17 70 17q53 0 91 -45t38 -109t-38 -109.5t-91 -45.5q-43 0 -70 15zM636 -39l211 41v206q51 -19 117 -19q125 0 213 95t88 229t-88 229t-213 95q-20 0 -39 -3q-23 -78 -78 -136q-87 -95 -211 -101v-636zM847 377v277q28 17 70 17q53 0 91 -45.5t38 -109.5 t-38 -109t-91 -45q-43 0 -70 15z" />
+<glyph unicode="&#xf1a8;" horiz-adv-x="2038" d="M41 455q0 15 8.5 26.5t22.5 14.5l486 106q-8 14 -8 25t5.5 17.5t16 11.5t20 7t23 4.5t18.5 4.5q4 1 15.5 7.5t17.5 6.5q15 0 28 -16t20 -33q163 37 172 37q17 0 29.5 -11t12.5 -28q0 -15 -8.5 -26t-23.5 -14l-182 -40l-1 -16q-1 -26 81.5 -117.5t104.5 -91.5q47 0 119 80 t72 129q0 36 -23.5 53t-51 18.5t-51 11.5t-23.5 34q0 16 10 34l-68 19q43 44 43 117q0 26 -5 58q82 16 144 16q44 0 71.5 -1.5t48.5 -8.5t31 -13.5t20.5 -24.5t15.5 -33.5t17 -47.5t24 -60l50 25q-3 -40 -23 -60t-42.5 -21t-40 -6.5t-16.5 -20.5l1 -21q75 3 143.5 -20.5 t118 -58.5t101 -94.5t84 -108t75.5 -120.5q33 -56 78.5 -109t75.5 -80.5t99 -88.5q-48 -30 -108.5 -57.5t-138.5 -59t-114 -47.5q-44 37 -74 115t-43.5 164.5t-33 180.5t-42.5 168.5t-72.5 123t-122.5 48.5l-10 -2l-6 -4q4 -5 13 -14q6 -5 28 -23.5t25.5 -22t19 -18 t18 -20.5t11.5 -21t10.5 -27.5t4.5 -31t4 -40.5l1 -33q1 -26 -2.5 -57.5t-7.5 -52t-12.5 -58.5t-11.5 -53q-35 1 -101 -9.5t-98 -10.5q-39 0 -72 10q-2 16 -2 47q0 74 3 96q2 13 31.5 41.5t57 59t26.5 51.5q-24 2 -43 -24q-36 -53 -111.5 -99.5t-136.5 -46.5q-25 0 -75.5 63 t-106.5 139.5t-84 96.5q-6 4 -27 30q-482 -112 -513 -112q-16 0 -28 11t-12 27zM764 676q10 1 32.5 7t34.5 6q19 0 35 -10l-96 -20zM822 568l48 12l109 -177l-73 -48zM859 884q16 30 36 46.5t54 29.5t65.5 36t46 36.5t50 55t43.5 50.5q12 -9 28 -31.5t32 -36.5t38 -13l12 1 v-76l22 -1q247 95 371 190q28 21 50 39t42.5 37.5t33 31t29.5 34t24 31t24.5 37t23 38t27 47.5t29.5 53l7 9q-2 -53 -43 -139q-79 -165 -205 -264t-306 -142q-14 -3 -42 -7.5t-50 -9.5t-39 -14q3 -19 24.5 -46t21.5 -34q0 -11 -26 -30q-5 5 -13.5 15.5t-12 14.5t-10.5 11.5 t-10 10.5l-8 8t-8.5 7.5t-8 5t-8.5 4.5q-7 3 -14.5 5t-20.5 2.5t-22 0.5h-32.5h-37.5q-126 0 -217 -43zM1061 45h31l10 -83l-41 -12v95zM1061 -79q39 26 131.5 47.5t146.5 21.5q9 0 22.5 -15.5t28 -42.5t26 -50t24 -51t14.5 -33q-121 -45 -244 -45q-61 0 -125 11zM1116 29 q21 2 60.5 8.5t72 10t60.5 3.5h14q3 -15 3 -16q0 -7 -17.5 -14.5t-46 -13t-54 -9.5t-53.5 -7.5t-32 -4.5zM1947 1528l1 3l2 4l-1 -5zM1950 1535v1v-1zM1950 1535l1 1z" />
+<glyph unicode="&#xf1a9;" d="M0 520q0 89 19.5 172.5t49 145.5t70.5 118.5t78.5 94t78.5 69.5t64.5 46.5t42.5 24.5q14 8 51 26.5t54.5 28.5t48 30t60.5 44q36 28 58 72.5t30 125.5q129 -155 186 -193q44 -29 130 -68t129 -66q21 -13 39 -25t60.5 -46.5t76 -70.5t75 -95t69 -122t47 -148.5 t19.5 -177.5q0 -164 -62 -304.5t-166 -236t-242.5 -149.5t-290.5 -54t-293 57.5t-247.5 157t-170.5 241.5t-64 302zM333 256q-2 -112 74 -164q29 -20 62.5 -28.5t103.5 -8.5q57 0 132 32.5t134 71t120 70.5t93 31q26 -1 65 -31.5t71.5 -67t68 -67.5t55.5 -32q35 -3 58.5 14 t55.5 63q28 41 42.5 101t14.5 106q0 22 -5 44.5t-16.5 45t-34 36.5t-52.5 14q-33 0 -97 -41.5t-129 -83.5t-101 -42q-27 -1 -63.5 19t-76 49t-83.5 58t-100 49t-111 19q-115 -1 -197 -78.5t-84 -178.5zM685.5 -76q-0.5 -10 7.5 -20q34 -32 87.5 -46t102.5 -12.5t99 4.5 q41 4 84.5 20.5t65 30t28.5 20.5q12 12 7 29q-5 19 -24 5q-30 -22 -87 -39t-131 -17q-129 0 -193 49q-5 4 -13 4q-11 0 -26 -12q-7 -6 -7.5 -16zM852 31q9 -8 17.5 -4.5t31.5 23.5q3 2 10.5 8.5t10.5 8.5t10 7t11.5 7t12.5 5t15 4.5t16.5 2.5t20.5 1q27 0 44.5 -7.5 t23 -14.5t13.5 -22q10 -17 12.5 -20t12.5 1q23 12 14 34q-19 47 -39 61q-23 15 -76 15q-47 0 -71 -10q-29 -12 -78 -56q-26 -24 -12 -44z" />
+<glyph unicode="&#xf1aa;" d="M0 78q0 72 44.5 128t113.5 72q-22 86 1 173t88 152l12 12l151 -152l-11 -11q-37 -37 -37 -89t37 -90q37 -37 89 -37t89 37l30 30l151 152l161 160l151 -152l-160 -160l-151 -152l-30 -30q-65 -64 -151.5 -87t-171.5 -2q-16 -70 -72 -115t-129 -45q-85 0 -145 60.5 t-60 145.5zM2 1202q0 85 60 145.5t145 60.5q76 0 133.5 -49t69.5 -123q84 20 169.5 -3.5t149.5 -87.5l12 -12l-152 -152l-12 12q-37 37 -89 37t-89 -37t-37 -89.5t37 -89.5l29 -29l152 -152l160 -160l-151 -152l-161 160l-151 152l-30 30q-68 67 -90 159.5t5 179.5 q-70 15 -115 71t-45 129zM446 803l161 160l152 152l29 30q67 67 159 89.5t178 -3.5q11 75 68.5 126t135.5 51q85 0 145 -60.5t60 -145.5q0 -77 -51 -135t-127 -69q26 -85 3 -176.5t-90 -158.5l-12 -12l-151 152l12 12q37 37 37 89t-37 89t-89 37t-89 -37l-30 -30l-152 -152 l-160 -160zM776 793l152 152l160 -160l152 -152l29 -30q64 -64 87.5 -150.5t2.5 -171.5q76 -11 126.5 -68.5t50.5 -134.5q0 -85 -60 -145.5t-145 -60.5q-74 0 -131 47t-71 118q-86 -28 -179.5 -6t-161.5 90l-11 12l151 152l12 -12q37 -37 89 -37t89 37t37 89t-37 89l-30 30 l-152 152z" />
+<glyph unicode="&#xf1ab;" d="M0 -16v1078q3 9 4 10q5 6 20 11q106 35 149 50v384l558 -198q2 0 160.5 55t316 108.5t161.5 53.5q20 0 20 -21v-418l147 -47v-1079l-774 246q-14 -6 -375 -127.5t-368 -121.5q-13 0 -18 13q0 1 -1 3zM39 15l694 232v1032l-694 -233v-1031zM147 293q6 4 82 92 q21 24 85.5 115t78.5 118q17 30 51 98.5t36 77.5q-8 1 -110 -33q-8 -2 -27.5 -7.5t-34.5 -9.5t-17 -5q-2 -2 -2 -10.5t-1 -9.5q-5 -10 -31 -15q-23 -7 -47 0q-18 4 -28 21q-4 6 -5 23q6 2 24.5 5t29.5 6q58 16 105 32q100 35 102 35q10 2 43 19.5t44 21.5q9 3 21.5 8 t14.5 5.5t6 -0.5q2 -12 -1 -33q0 -2 -12.5 -27t-26.5 -53.5t-17 -33.5q-25 -50 -77 -131l64 -28q12 -6 74.5 -32t67.5 -28q4 -1 10.5 -25.5t4.5 -30.5q-1 -3 -12.5 0.5t-31.5 11.5l-20 9q-44 20 -87 49q-7 5 -41 31.5t-38 28.5q-67 -103 -134 -181q-81 -95 -105 -110 q-4 -2 -19.5 -4t-18.5 0zM268 933l1 3q3 -3 19.5 -5t26.5 0t58 16q36 12 55 14q17 0 21 -17q3 -15 -4 -28q-12 -23 -50 -38q-30 -12 -60 -12q-26 3 -49 26q-14 15 -18 41zM310 -116q0 8 5 13.5t13 5.5q4 0 18 -7.5t30.5 -16.5t20.5 -11q73 -37 159.5 -61.5t157.5 -24.5 q95 0 167 14.5t157 50.5q15 7 30.5 15.5t34 19t28.5 16.5l-43 73l158 -13l-54 -160l-40 66q-130 -83 -276 -108q-58 -12 -91 -12h-84q-79 0 -199.5 39t-183.5 85q-8 7 -8 16zM777 1294l573 -184v380zM885 453l102 -31l45 110l211 -65l37 -135l102 -31l-181 657l-100 31z M1071 630l76 185l63 -227z" />
+<glyph unicode="&#xf1ac;" horiz-adv-x="1792" d="M0 -96v1088q0 66 47 113t113 47h128q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-128q-66 0 -113 47t-47 113zM512 -96v1536q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-163q58 -34 93 -93t35 -128v-768q0 -106 -75 -181 t-181 -75h-864q-66 0 -113 47t-47 113zM640 896h896v256h-160q-40 0 -68 28t-28 68v160h-640v-512zM736 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM736 256q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9 h-128q-14 0 -23 -9t-9 -23v-128zM736 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 256q0 -14 9 -23t23 -9h128 q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM1248 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23 v-128zM1248 256q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM1248 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128z" />
+<glyph unicode="&#xf1ad;" d="M0 -192v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM256 160q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 1184q0 -14 9 -23 t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 96v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23zM512 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9 t-9 -23v-64zM512 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 928q0 -14 9 -23 t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 160q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9 t-9 -23v-64zM1024 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64z" />
+<glyph unicode="&#xf1ae;" horiz-adv-x="1280" d="M64 1056q0 40 28 68t68 28t68 -28l228 -228h368l228 228q28 28 68 28t68 -28t28 -68t-28 -68l-292 -292v-824q0 -46 -33 -79t-79 -33t-79 33t-33 79v384h-64v-384q0 -46 -33 -79t-79 -33t-79 33t-33 79v824l-292 292q-28 28 -28 68zM416 1152q0 93 65.5 158.5t158.5 65.5 t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf1b0;" horiz-adv-x="1664" d="M0 724q0 80 42 139.5t119 59.5q76 0 141.5 -55.5t100.5 -134t35 -152.5q0 -80 -42 -139t-119 -59q-76 0 -141.5 55.5t-100.5 133.5t-35 152zM256 19q0 86 56 191.5t139.5 192.5t187.5 146t193 59q118 0 255 -97.5t229 -237t92 -254.5q0 -46 -17 -76.5t-48.5 -45 t-64.5 -20t-76 -5.5q-68 0 -187.5 45t-182.5 45q-66 0 -192.5 -44.5t-200.5 -44.5q-183 0 -183 146zM333 1163q0 60 19 113.5t63 92.5t105 39q77 0 138.5 -57.5t91.5 -135t30 -151.5q0 -60 -19 -113.5t-63 -92.5t-105 -39q-76 0 -138 57.5t-92 135.5t-30 151zM884 1064 q0 74 30 151.5t91.5 135t138.5 57.5q61 0 105 -39t63 -92.5t19 -113.5q0 -73 -30 -151t-92 -135.5t-138 -57.5q-61 0 -105 39t-63 92.5t-19 113.5zM1226 581q0 74 35 152.5t100.5 134t141.5 55.5q77 0 119 -59.5t42 -139.5q0 -74 -35 -152t-100.5 -133.5t-141.5 -55.5 q-77 0 -119 59t-42 139z" />
+<glyph unicode="&#xf1b1;" horiz-adv-x="768" d="M64 1008q0 128 42.5 249.5t117.5 200t160 78.5t160 -78.5t117.5 -200t42.5 -249.5q0 -145 -57 -243.5t-152 -135.5l45 -821q2 -26 -16 -45t-44 -19h-192q-26 0 -44 19t-16 45l45 821q-95 37 -152 135.5t-57 243.5z" />
+<glyph unicode="&#xf1b2;" horiz-adv-x="1792" d="M0 256v768q0 40 23 73t61 47l704 256q22 8 44 8t44 -8l704 -256q38 -14 61 -47t23 -73v-768q0 -35 -18 -65t-49 -47l-704 -384q-28 -16 -61 -16t-61 16l-704 384q-31 17 -49 47t-18 65zM134 1026l698 -254l698 254l-698 254zM896 -93l640 349v636l-640 -233v-752z" />
+<glyph unicode="&#xf1b3;" horiz-adv-x="2304" d="M0 96v416q0 38 21.5 70t56.5 48l434 186v400q0 38 21.5 70t56.5 48l448 192q23 10 50 10t50 -10l448 -192q35 -16 56.5 -48t21.5 -70v-400l434 -186q36 -16 57 -48t21 -70v-416q0 -36 -19 -67t-52 -47l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-5 2 -7 4q-2 -2 -7 -4 l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-33 16 -52 47t-19 67zM172 531l404 -173l404 173l-404 173zM640 -96l384 192v314l-384 -164v-342zM647 1219l441 -189l441 189l-441 189zM1152 651l384 165v266l-384 -164v-267zM1196 531l404 -173l404 173l-404 173zM1664 -96 l384 192v314l-384 -164v-342z" />
+<glyph unicode="&#xf1b4;" horiz-adv-x="2048" d="M0 22v1260h594q87 0 155 -14t126.5 -47.5t90 -96.5t31.5 -154q0 -181 -172 -263q114 -32 172 -115t58 -204q0 -75 -24.5 -136.5t-66 -103.5t-98.5 -71t-121 -42t-134 -13h-611zM277 236h296q205 0 205 167q0 180 -199 180h-302v-347zM277 773h281q78 0 123.5 36.5 t45.5 113.5q0 144 -190 144h-260v-294zM1137 477q0 208 130.5 345.5t336.5 137.5q138 0 240.5 -68t153 -179t50.5 -248q0 -17 -2 -47h-658q0 -111 57.5 -171.5t166.5 -60.5q63 0 122 32t76 87h221q-100 -307 -427 -307q-214 0 -340.5 132t-126.5 347zM1337 1073h511v124 h-511v-124zM1388 576h408q-18 195 -200 195q-90 0 -146 -52.5t-62 -142.5z" />
+<glyph unicode="&#xf1b5;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 254h382q117 0 197 57.5t80 170.5q0 158 -143 200q107 52 107 164q0 57 -19.5 96.5t-56.5 60.5t-79 29.5 t-97 8.5h-371v-787zM301 388v217h189q124 0 124 -113q0 -104 -128 -104h-185zM301 723v184h163q119 0 119 -90q0 -94 -106 -94h-176zM838 538q0 -135 79 -217t213 -82q205 0 267 191h-138q-11 -34 -47.5 -54t-75.5 -20q-68 0 -104 38t-36 107h411q1 10 1 30 q0 132 -74.5 220.5t-203.5 88.5q-128 0 -210 -86t-82 -216zM964 911v77h319v-77h-319zM996 600q4 56 39 89t91 33q113 0 124 -122h-254z" />
+<glyph unicode="&#xf1b6;" horiz-adv-x="2048" d="M0 764q0 86 61 146.5t146 60.5q73 0 130 -46t73 -117l783 -315q49 29 106 29q14 0 21 -1l173 248q1 114 82 194.5t195 80.5q115 0 196.5 -81t81.5 -196t-81.5 -196.5t-196.5 -81.5l-265 -194q-8 -80 -67.5 -133.5t-138.5 -53.5q-73 0 -130 46t-73 117l-783 315 q-51 -30 -106 -30q-85 0 -146 61t-61 147zM55 764q0 -64 44.5 -108.5t107.5 -44.5q11 0 33 4l-64 26q-33 14 -52.5 44.5t-19.5 66.5q0 50 35.5 85.5t85.5 35.5q20 0 41 -8v1l76 -31q-20 37 -56.5 59t-78.5 22q-63 0 -107.5 -44.5t-44.5 -107.5zM1164 244q19 -37 55.5 -59 t79.5 -22q63 0 107.5 44.5t44.5 107.5t-44.5 108t-107.5 45q-13 0 -33 -4q2 -1 20 -8t21.5 -8.5t18.5 -8.5t19 -10t16 -11t15.5 -13.5t11 -14.5t10 -18t5 -21t2.5 -25q0 -50 -35.5 -85.5t-85.5 -35.5q-14 0 -31.5 4.5t-29 9t-31.5 13.5t-28 12zM1584 767q0 -77 54.5 -131.5 t131.5 -54.5t132 54.5t55 131.5t-55 131.5t-132 54.5q-76 0 -131 -54.5t-55 -131.5zM1623 767q0 62 43.5 105.5t104.5 43.5t105 -44t44 -105t-43.5 -104.5t-105.5 -43.5q-61 0 -104.5 43.5t-43.5 104.5z" />
+<glyph unicode="&#xf1b7;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 693q0 -53 38 -91t92 -38q36 0 66 18l489 -197q10 -44 45.5 -73t81.5 -29q50 0 86.5 34t41.5 83l167 122 q71 0 122 50.5t51 122.5t-51 123t-122 51q-72 0 -122.5 -50.5t-51.5 -121.5l-108 -155q-2 0 -6.5 0.5t-6.5 0.5q-35 0 -67 -19l-489 197q-10 44 -45.5 73t-80.5 29q-54 0 -92 -38t-38 -92zM162 693q0 40 28 68t68 28q27 0 49.5 -14t34.5 -37l-48 19q-29 11 -56.5 -2 t-38.5 -41q-12 -29 -0.5 -57t39.5 -40v-1l40 -16q-14 -2 -20 -2q-40 0 -68 27.5t-28 67.5zM855 369q5 -2 47 -19q29 -12 58 0.5t41 41.5q11 29 -1 57.5t-41 40.5l-40 16q14 2 21 2q39 0 67 -27.5t28 -67.5t-28 -67.5t-67 -27.5q-59 0 -85 51zM1118 695q0 48 34 82t83 34 q48 0 82 -34t34 -82t-34 -82t-82 -34q-49 0 -83 34t-34 82zM1142 696q0 -39 27.5 -66t65.5 -27t65.5 27t27.5 66q0 38 -27.5 65.5t-65.5 27.5t-65.5 -27.5t-27.5 -65.5z" />
+<glyph unicode="&#xf1b8;" horiz-adv-x="1792" d="M16 970l433 -17l180 -379l-147 92q-63 -72 -111.5 -144.5t-72.5 -125t-39.5 -94.5t-18.5 -63l-4 -21l-190 357q-17 26 -18 56t6 47l8 18q35 63 114 188zM270.5 158q-3.5 28 4 65t12 55t21.5 64t19 53q78 -12 509 -28l-15 -368l-2 -22l-420 29q-36 3 -67 31.5t-47 65.5 q-11 27 -14.5 55zM294 1124l225 356q20 31 60 45t80 10q24 -2 48.5 -12t42 -21t41.5 -33t36 -34.5t36 -39.5t32 -35q-47 -63 -265 -435l-317 187zM782 1524l405 -1q31 3 58 -10.5t39 -28.5l11 -15q39 -61 112 -190l142 83l-220 -373l-419 20l151 86q-34 89 -75 166 t-75.5 123.5t-64.5 80t-47 46.5zM953 197l211 362l7 -173q170 -16 283 -5t170 33l56 22l-188 -359q-12 -29 -36.5 -46.5t-43.5 -20.5l-18 -4q-71 -7 -219 -12l8 -164zM1218 847l313 195l19 11l212 -363q18 -37 12.5 -76t-27.5 -74q-13 -20 -33 -37t-38 -28t-48.5 -22 t-47 -16t-51.5 -14t-46 -12q-34 72 -265 436z" />
+<glyph unicode="&#xf1b9;" horiz-adv-x="1984" d="M0 160v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5t179 63.5h704q98 0 179 -63.5t104 -157.5l105 -419h28q93 0 158.5 -65.5t65.5 -158.5v-384q0 -14 -9 -23t-23 -9h-128v-128q0 -80 -56 -136t-136 -56t-136 56t-56 136v128h-928v-128q0 -80 -56 -136 t-136 -56t-136 56t-56 136v128h-96q-14 0 -23 9t-9 23zM160 448q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113zM516 768h952l-89 357q-2 8 -14 17.5t-21 9.5h-704q-9 0 -21 -9.5t-14 -17.5zM1472 448q0 -66 47 -113t113 -47t113 47t47 113 t-47 113t-113 47t-113 -47t-47 -113z" />
+<glyph unicode="&#xf1ba;" horiz-adv-x="1984" d="M0 32v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5t179 63.5h128v224q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-224h64q98 0 179 -63.5t104 -157.5l105 -419h28q93 0 158.5 -65.5t65.5 -158.5v-384q0 -14 -9 -23t-23 -9h-128v-64q0 -80 -56 -136t-136 -56 t-136 56t-56 136v64h-928v-64q0 -80 -56 -136t-136 -56t-136 56t-56 136v64h-96q-14 0 -23 9t-9 23zM160 320q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113zM516 640h952l-89 357q-2 8 -14 17.5t-21 9.5h-704q-9 0 -21 -9.5t-14 -17.5zM1472 320 q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113z" />
+<glyph unicode="&#xf1bb;" d="M32 64q0 26 19 45l402 403h-229q-26 0 -45 19t-19 45t19 45l402 403h-197q-26 0 -45 19t-19 45t19 45l384 384q19 19 45 19t45 -19l384 -384q19 -19 19 -45t-19 -45t-45 -19h-197l402 -403q19 -19 19 -45t-19 -45t-45 -19h-229l402 -403q19 -19 19 -45t-19 -45t-45 -19 h-462q1 -17 6 -87.5t5 -108.5q0 -25 -18 -42.5t-43 -17.5h-320q-25 0 -43 17.5t-18 42.5q0 38 5 108.5t6 87.5h-462q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf1bc;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM237 886q0 -31 20.5 -52t51.5 -21q11 0 40 8q133 37 307 37q159 0 309.5 -34t253.5 -95q21 -12 40 -12 q29 0 50.5 20.5t21.5 51.5q0 47 -40 70q-126 73 -293 110.5t-343 37.5q-204 0 -364 -47q-23 -7 -38.5 -25.5t-15.5 -48.5zM289 637q0 -25 17.5 -42.5t42.5 -17.5q7 0 37 8q122 33 251 33q279 0 488 -124q24 -13 38 -13q25 0 42.5 17.5t17.5 42.5q0 40 -35 61 q-237 141 -548 141q-153 0 -303 -42q-48 -13 -48 -64zM321 406q0 -20 13.5 -34.5t35.5 -14.5q5 0 37 8q132 27 243 27q226 0 397 -103q19 -11 33 -11q19 0 33 13.5t14 34.5q0 32 -30 51q-193 115 -447 115q-133 0 -287 -34q-42 -9 -42 -52z" />
+<glyph unicode="&#xf1bd;" d="M0 11v1258q0 58 40.5 98.5t98.5 40.5h1258q58 0 98.5 -40.5t40.5 -98.5v-1258q0 -58 -40.5 -98.5t-98.5 -40.5h-1258q-58 0 -98.5 40.5t-40.5 98.5zM71 11q0 -28 20 -48t48 -20h1258q28 0 48 20t20 48v1258q0 28 -20 48t-48 20h-1258q-28 0 -48 -20t-20 -48v-1258z M121 11v141l711 195l-212 439q4 1 12 2.5t12 1.5q170 32 303.5 21.5t221 -46t143.5 -94.5q27 -28 -25 -42q-64 -16 -256 -62l-97 198q-111 7 -240 -16l188 -387l533 145v-496q0 -7 -5.5 -12.5t-12.5 -5.5h-1258q-7 0 -12.5 5.5t-5.5 12.5zM121 709v560q0 7 5.5 12.5 t12.5 5.5h1258q7 0 12.5 -5.5t5.5 -12.5v-428q-85 30 -188 52q-294 64 -645 12l-18 -3l-65 134h-233l85 -190q-132 -51 -230 -137zM246 413q-24 203 166 305l129 -270l-255 -61q-14 -3 -26 4.5t-14 21.5z" />
+<glyph unicode="&#xf1be;" horiz-adv-x="2304" d="M0 405l17 128q2 9 9 9t9 -9l20 -128l-20 -126q-2 -9 -9 -9t-9 9zM79 405l23 207q0 9 9 9q8 0 10 -9l26 -207l-26 -203q-2 -9 -10 -9q-9 0 -9 10zM169 405l21 245q2 12 12 12q11 0 11 -12l25 -245l-25 -237q0 -11 -11 -11q-10 0 -12 11zM259 405l21 252q0 13 13 13 q12 0 14 -13l23 -252l-23 -244q-2 -13 -14 -13q-13 0 -13 13zM350 405l20 234q0 6 4.5 10.5t10.5 4.5q14 0 16 -15l21 -234l-21 -246q-2 -16 -16 -16q-6 0 -10.5 4.5t-4.5 11.5zM401 159zM442 405l18 380q2 18 18 18q7 0 12 -5.5t5 -12.5l21 -380l-21 -246q0 -7 -5 -12.5 t-12 -5.5q-16 0 -18 18zM534 403l16 468q2 19 20 19q8 0 13.5 -5.5t5.5 -13.5l19 -468l-19 -244q0 -8 -5.5 -13.5t-13.5 -5.5q-18 0 -20 19zM628 405l16 506q0 9 6.5 15.5t14.5 6.5q9 0 15 -6.5t7 -15.5l18 -506l-18 -242q-2 -21 -22 -21q-19 0 -21 21zM723 405l14 -241 q1 -10 7.5 -16.5t15.5 -6.5q22 0 24 23l16 241l-16 523q-1 10 -7.5 17t-16.5 7q-9 0 -16 -7t-7 -17zM784 164zM817 405l14 510q0 11 7.5 18t17.5 7t17.5 -7t7.5 -18l15 -510l-15 -239q0 -10 -7.5 -17.5t-17.5 -7.5t-17 7t-8 18zM913 404l12 492q1 12 9 20t19 8t18.5 -8 t8.5 -20l14 -492l-14 -236q0 -11 -8 -19t-19 -8t-19 8t-9 19zM1010 405q0 -1 11 -236v-1q0 -10 6 -17q9 -11 23 -11q11 0 20 9q9 7 9 20l1 24l11 211l-12 586q0 16 -13 24q-8 5 -16 5t-16 -5q-13 -8 -13 -24l-1 -6zM1079 169zM1103 404l12 636v3q2 15 12 24q9 7 20 7 q8 0 15 -5q14 -8 16 -26l14 -639l-14 -231q0 -13 -9 -22t-22 -9t-22 9t-10 22l-6 114zM1204 174v899q0 23 28 33q85 34 181 34q195 0 338 -131.5t160 -323.5q53 22 110 22q117 0 200 -83t83 -201q0 -117 -83 -199.5t-200 -82.5h-786q-13 2 -22 11t-9 22z" />
+<glyph unicode="&#xf1c0;" d="M0 0v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 384v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 768 v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 1152v128q0 69 103 128t280 93.5t385 34.5t385 -34.5t280 -93.5t103 -128v-128q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5 t-103 128z" />
+<glyph unicode="&#xf1c1;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM257 60q9 40 56 91.5t132 96.5q14 9 23 -6q2 -2 2 -4 q52 85 107 197q68 136 104 262q-24 82 -30.5 159.5t6.5 127.5q11 40 42 40h21h1q23 0 35 -15q18 -21 9 -68q-2 -6 -4 -8q1 -3 1 -8v-30q-2 -123 -14 -192q55 -164 146 -238q33 -26 84 -56q59 7 117 7q147 0 177 -49q16 -22 2 -52q0 -1 -1 -2l-2 -2v-1q-6 -38 -71 -38 q-48 0 -115 20t-130 53q-221 -24 -392 -83q-153 -262 -242 -262q-15 0 -28 7l-24 12q-1 1 -6 5q-10 10 -6 36zM318 54q52 24 137 158q-51 -40 -87.5 -84t-49.5 -74zM592 313q135 54 284 81q-2 1 -13 9.5t-16 13.5q-76 67 -127 176q-27 -86 -83 -197q-30 -56 -45 -83z M714 842q1 7 7 44q0 3 7 43q1 4 4 8q-1 1 -1 2t-0.5 1.5t-0.5 1.5q-1 22 -13 36q0 -1 -1 -2v-2q-15 -42 -2 -132zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376zM1098 353q76 -28 124 -28q14 0 18 1q0 1 -2 3q-24 24 -140 24z" />
+<glyph unicode="&#xf1c2;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM233 661h70l164 -661h159l128 485q7 20 10 46q2 16 2 24 h4l3 -24q1 -3 3.5 -20t5.5 -26l128 -485h159l164 661h70v107h-300v-107h90l-99 -438q-5 -20 -7 -46l-2 -21h-4l-3 21q-1 5 -4 21t-5 25l-144 545h-114l-144 -545q-2 -9 -4.5 -24.5t-3.5 -21.5l-4 -21h-4l-2 21q-2 26 -7 46l-99 438h90v107h-300v-107zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c3;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM429 0h281v106h-75l103 161q5 7 10 16.5t7.5 13.5t3.5 4 h2q1 -4 5 -10q2 -4 4.5 -7.5t6 -8t6.5 -8.5l107 -161h-76v-106h291v106h-68l-192 273l195 282h67v107h-279v-107h74l-103 -159q-4 -7 -10 -16.5t-9 -13.5l-2 -3h-2q-1 4 -5 10q-6 11 -17 23l-106 159h76v107h-290v-107h68l189 -272l-194 -283h-68v-106zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c4;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM416 0h327v106h-93v167h137q76 0 118 15q67 23 106.5 87 t39.5 146q0 81 -37 141t-100 87q-48 19 -130 19h-368v-107h92v-555h-92v-106zM650 386v268h120q52 0 83 -18q56 -33 56 -115q0 -89 -62 -120q-31 -15 -78 -15h-119zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c5;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 0v192l192 192l128 -128l384 384l320 -320v-320 h-1024zM256 704q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c6;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-128v-128h-128v128h-512v-1536zM384 192q0 25 8 52q21 63 120 396 v128h128v-128h79q22 0 39 -13t23 -34l107 -349q8 -27 8 -52q0 -83 -72.5 -137.5t-183.5 -54.5t-183.5 54.5t-72.5 137.5zM512 192q0 -26 37.5 -45t90.5 -19t90.5 19t37.5 45t-37.5 45t-90.5 19t-90.5 -19t-37.5 -45zM512 896h128v128h-128v-128zM512 1152h128v128h-128v-128 zM640 768h128v128h-128v-128zM640 1024h128v128h-128v-128zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c7;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 288v192q0 14 9 23t23 9h131l166 167q16 15 35 7 q20 -8 20 -30v-544q0 -22 -20 -30q-8 -2 -12 -2q-12 0 -23 9l-166 167h-131q-14 0 -23 9t-9 23zM762 206.5q1 -26.5 20 -44.5q20 -17 44 -17q27 0 47 20q87 93 87 219t-87 219q-18 19 -45 20t-46 -17t-20 -44.5t18 -46.5q52 -57 52 -131t-52 -131q-19 -20 -18 -46.5z M973.5 54.5q2.5 -26.5 23.5 -42.5q18 -15 40 -15q31 0 50 24q129 159 129 363t-129 363q-16 21 -43 24t-47 -14q-21 -17 -23.5 -43.5t14.5 -47.5q100 -123 100 -282t-100 -282q-17 -21 -14.5 -47.5zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c8;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 256v384q0 52 38 90t90 38h384q52 0 90 -38t38 -90 v-384q0 -52 -38 -90t-90 -38h-384q-52 0 -90 38t-38 90zM960 403v90l265 266q9 9 23 9q4 0 12 -2q20 -8 20 -30v-576q0 -22 -20 -30q-8 -2 -12 -2q-14 0 -23 9zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c9;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM254 429q-14 19 0 38l226 301q8 11 21 12.5t24 -6.5 l51 -38q11 -8 12.5 -21t-6.5 -24l-182 -243l182 -243q8 -11 6.5 -24t-12.5 -21l-51 -38q-11 -8 -24 -6.5t-21 12.5zM636 43l138 831q2 13 13 20.5t24 5.5l63 -10q13 -2 20.5 -13t5.5 -24l-138 -831q-2 -13 -13 -20.5t-24 -5.5l-63 10q-13 2 -20.5 13t-5.5 24zM947.5 181 q-1.5 13 6.5 24l182 243l-182 243q-8 11 -6.5 24t12.5 21l51 38q11 8 24 6.5t21 -12.5l226 -301q14 -19 0 -38l-226 -301q-8 -11 -21 -12.5t-24 6.5l-51 38q-11 8 -12.5 21zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1ca;" d="M39 1286h283q26 -218 70 -398.5t104.5 -317t121.5 -235.5t140 -195q169 169 287 406q-142 72 -223 220t-81 333q0 192 104 314.5t284 122.5q178 0 273 -105.5t95 -297.5q0 -159 -58 -286q-7 -1 -19.5 -3t-46 -2t-63 6t-62 25.5t-50.5 51.5q31 103 31 184q0 87 -29 132 t-79 45q-53 0 -85 -49.5t-32 -140.5q0 -186 105 -293.5t267 -107.5q62 0 121 14v-198q-101 -23 -198 -23q-65 -136 -165.5 -271t-181.5 -215.5t-128 -106.5q-80 -45 -162 3q-28 17 -60.5 43.5t-85 83.5t-102.5 128.5t-107.5 184t-105.5 244t-91.5 314.5t-70.5 390z" />
+<glyph unicode="&#xf1cb;" horiz-adv-x="1792" d="M0 367v546q0 41 34 64l819 546q21 13 43 13t43 -13l819 -546q34 -23 34 -64v-546q0 -41 -34 -64l-819 -546q-21 -13 -43 -13t-43 13l-819 546q-34 23 -34 64zM154 511l193 129l-193 129v-258zM216 367l603 -402v359l-334 223zM216 913l269 -180l334 223v359zM624 640 l272 -182l272 182l-272 182zM973 -35l603 402l-269 180l-334 -223v-359zM973 956l334 -223l269 180l-603 402v-359zM1445 640l193 -129v258z" />
+<glyph unicode="&#xf1cc;" horiz-adv-x="2048" d="M0 407q0 110 55 203t147 147q-12 39 -12 82q0 115 82 196t199 81q95 0 172 -58q75 154 222.5 248t326.5 94q166 0 306 -80.5t221.5 -218.5t81.5 -301q0 -6 -0.5 -18t-0.5 -18q111 -46 179.5 -145.5t68.5 -221.5q0 -164 -118 -280.5t-285 -116.5q-4 0 -11.5 0.5t-10.5 0.5 h-1209h-1h-2h-5q-170 10 -288 125.5t-118 280.5zM468 498q0 -122 84 -193t208 -71q137 0 240 99q-16 20 -47.5 56.5t-43.5 50.5q-67 -65 -144 -65q-55 0 -93.5 33.5t-38.5 87.5q0 53 38.5 87t91.5 34q44 0 84.5 -21t73 -55t65 -75t69 -82t77 -75t97 -55t121.5 -21 q121 0 204.5 71.5t83.5 190.5q0 121 -84 192t-207 71q-143 0 -241 -97q14 -16 29.5 -34t34.5 -40t29 -34q66 64 142 64q52 0 92 -33t40 -84q0 -57 -37 -91.5t-94 -34.5q-43 0 -82.5 21t-72 55t-65.5 75t-69.5 82t-77.5 75t-96.5 55t-118.5 21q-122 0 -207 -70.5t-85 -189.5z " />
+<glyph unicode="&#xf1cd;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM128 640q0 -190 90 -361l194 194q-28 82 -28 167t28 167l-194 194q-90 -171 -90 -361zM512 640 q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM535 -38q171 -90 361 -90t361 90l-194 194q-82 -28 -167 -28t-167 28zM535 1318l194 -194q82 28 167 28t167 -28l194 194q-171 90 -361 90t-361 -90z M1380 473l194 -194q90 171 90 361t-90 361l-194 -194q28 -82 28 -167t-28 -167z" />
+<glyph unicode="&#xf1ce;" horiz-adv-x="1792" d="M0 640q0 222 101 414.5t276.5 317t390.5 155.5v-260q-221 -45 -366.5 -221t-145.5 -406q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5q0 230 -145.5 406t-366.5 221v260q215 -31 390.5 -155.5t276.5 -317t101 -414.5 q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348z" />
+<glyph unicode="&#xf1d0;" horiz-adv-x="1792" d="M19 662q8 217 116 406t305 318h5q0 -1 -1 -3q-8 -8 -28 -33.5t-52 -76.5t-60 -110.5t-44.5 -135.5t-14 -150.5t39 -157.5t108.5 -154q50 -50 102 -69.5t90.5 -11.5t69.5 23.5t47 32.5l16 16q39 51 53 116.5t6.5 122.5t-21 107t-26.5 80l-14 29q-10 25 -30.5 49.5t-43 41 t-43.5 29.5t-35 19l-13 6l104 115q39 -17 78 -52t59 -61l19 -27q1 48 -18.5 103.5t-40.5 87.5l-20 31l161 183l160 -181q-33 -46 -52.5 -102.5t-22.5 -90.5l-4 -33q22 37 61.5 72.5t67.5 52.5l28 17l103 -115q-44 -14 -85 -50t-60 -65l-19 -29q-31 -56 -48 -133.5t-7 -170 t57 -156.5q33 -45 77.5 -60.5t85 -5.5t76 26.5t57.5 33.5l21 16q60 53 96.5 115t48.5 121.5t10 121.5t-18 118t-37 107.5t-45.5 93t-45 72t-34.5 47.5l-13 17q-14 13 -7 13l10 -3q40 -29 62.5 -46t62 -50t64 -58t58.5 -65t55.5 -77t45.5 -88t38 -103t23.5 -117t10.5 -136 q3 -259 -108 -465t-312 -321t-456 -115q-185 0 -351 74t-283.5 198t-184 293t-60.5 353z" />
+<glyph unicode="&#xf1d1;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM44 640q0 -173 67.5 -331t181.5 -272t272 -181.5t331 -67.5t331 67.5t272 181.5t181.5 272t67.5 331 t-67.5 331t-181.5 272t-272 181.5t-331 67.5t-331 -67.5t-272 -181.5t-181.5 -272t-67.5 -331zM87 640q0 205 98 385l57 -33q-30 -56 -49 -112l82 -28q-35 -100 -35 -212q0 -109 36 -212l-83 -28q22 -60 49 -112l-57 -33q-98 180 -98 385zM206 217l58 34q29 -49 73 -99 l65 57q148 -168 368 -212l-17 -86q65 -12 121 -13v-66q-208 6 -385 109.5t-283 275.5zM207 1063q106 172 282 275.5t385 109.5v-66q-65 -2 -121 -13l17 -86q-220 -42 -368 -211l-65 56q-38 -42 -73 -98zM415 805q33 93 99 169l185 -162q59 68 147 86l-48 240q44 10 98 10 t98 -10l-48 -240q88 -18 147 -86l185 162q66 -76 99 -169l-233 -80q14 -42 14 -85t-14 -85l232 -80q-31 -92 -98 -169l-185 162q-57 -67 -147 -85l48 -241q-52 -10 -98 -10t-98 10l48 241q-90 18 -147 85l-185 -162q-67 77 -98 169l232 80q-14 42 -14 85t14 85zM918 -102 q56 1 121 13l-17 86q220 44 368 212l65 -57q44 50 73 99l58 -34q-106 -172 -283 -275.5t-385 -109.5v66zM918 1382v66q209 -6 385 -109.5t282 -275.5l-57 -33q-35 56 -73 98l-65 -56q-148 169 -368 211l17 86q-56 11 -121 13zM1516 428q36 103 36 212q0 112 -35 212l82 28 q-19 56 -49 112l57 33q98 -180 98 -385t-98 -385l-57 33q27 52 49 112z" />
+<glyph unicode="&#xf1d2;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 218q0 -45 20 -78.5t54 -51t72 -25.5t81 -8q224 0 224 188q0 67 -48 99t-126 46q-27 5 -51.5 20.5 t-24.5 39.5q0 44 49 52q77 15 122 70t45 134q0 24 -10 52q37 9 49 13v125q-78 -29 -135 -29q-50 29 -110 29q-86 0 -145 -57t-59 -143q0 -50 29.5 -102t73.5 -67v-3q-38 -17 -38 -85q0 -53 41 -77v-3q-113 -37 -113 -139zM382 225q0 64 98 64q102 0 102 -61q0 -66 -93 -66 q-107 0 -107 63zM395 693q0 90 77 90q36 0 55 -25.5t19 -63.5q0 -85 -74 -85q-77 0 -77 84zM755 1072q0 -36 25 -62.5t60 -26.5t59.5 27t24.5 62q0 36 -24 63.5t-60 27.5t-60.5 -27t-24.5 -64zM771 350h137q-2 27 -2 82v387q0 46 2 69h-137q3 -23 3 -71v-392q0 -50 -3 -75z M966 771q36 3 37 3q3 0 11 -0.5t12 -0.5v-2h-2v-217q0 -37 2.5 -64t11.5 -56.5t24.5 -48.5t43.5 -31t66 -12q64 0 108 24v121q-30 -21 -68 -21q-53 0 -53 82v225h52q9 0 26.5 -1t26.5 -1v117h-105q0 82 3 102h-140q4 -24 4 -55v-47h-60v-117z" />
+<glyph unicode="&#xf1d3;" horiz-adv-x="1792" d="M68 7q0 165 182 225v4q-67 41 -67 126q0 109 63 137v4q-72 24 -119.5 108.5t-47.5 165.5q0 139 95 231.5t235 92.5q96 0 178 -47q98 0 218 47v-202q-36 -12 -79 -22q16 -43 16 -84q0 -127 -73 -216.5t-197 -112.5q-40 -8 -59.5 -27t-19.5 -58q0 -31 22.5 -51.5t58 -32 t78.5 -22t86 -25.5t78.5 -37.5t58 -64t22.5 -98.5q0 -304 -363 -304q-69 0 -130 12.5t-116 41t-87.5 82t-32.5 127.5zM272 18q0 -101 172 -101q151 0 151 105q0 100 -165 100q-158 0 -158 -104zM293 775q0 -135 124 -135q119 0 119 137q0 61 -30 102t-89 41 q-124 0 -124 -145zM875 1389q0 59 39.5 103t98.5 44q58 0 96.5 -44.5t38.5 -102.5t-39 -101.5t-96 -43.5q-58 0 -98 43.5t-40 101.5zM901 220q4 45 4 134v609q0 94 -4 128h222q-4 -33 -4 -124v-613q0 -89 4 -134h-222zM1217 901v190h96v76q0 54 -6 89h227q-6 -41 -6 -165 h171v-190q-15 0 -43.5 2t-42.5 2h-85v-365q0 -131 87 -131q61 0 109 33v-196q-71 -39 -174 -39q-62 0 -107 20t-70 50t-39.5 78t-18.5 92t-4 103v351h2v4q-7 0 -19 1t-18 1q-21 0 -59 -6z" />
+<glyph unicode="&#xf1d4;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM368 1135l323 -589v-435h134v436l343 588h-150q-21 -39 -63.5 -118.5t-68 -128.5t-59.5 -118.5t-60 -128.5h-3 q-21 48 -44.5 97t-52 105.5t-46.5 92t-54 104.5t-49 95h-150z" />
+<glyph unicode="&#xf1d5;" horiz-adv-x="1280" d="M57 953q0 119 46.5 227t124.5 186t186 124t226 46q158 0 292.5 -78t212.5 -212.5t78 -292.5t-78 -292t-212.5 -212t-292.5 -78q-64 0 -131 14q-21 5 -32.5 23.5t-6.5 39.5q5 20 23 31.5t39 7.5q51 -13 108 -13q97 0 186 38t153 102t102 153t38 186t-38 186t-102 153 t-153 102t-186 38t-186 -38t-153 -102t-102 -153t-38 -186q0 -114 52 -218q10 -20 3.5 -40t-25.5 -30t-39.5 -3t-30.5 26q-64 123 -64 265zM113.5 38.5q10.5 121.5 29.5 217t54 186t69 155.5t74 125q61 90 132 165q-16 35 -16 77q0 80 56.5 136.5t136.5 56.5t136.5 -56.5 t56.5 -136.5t-57 -136.5t-136 -56.5q-60 0 -111 35q-62 -67 -115 -146q-247 -371 -202 -859q1 -22 -12.5 -38.5t-34.5 -18.5h-5q-20 0 -35 13.5t-17 33.5q-14 126 -3.5 247.5z" />
+<glyph unicode="&#xf1d6;" horiz-adv-x="1792" d="M18 264q0 275 252 466q-8 19 -8 52q0 20 11 49t24 45q-1 22 7.5 53t22.5 43q0 139 92.5 288.5t217.5 209.5q139 66 324 66q133 0 266 -55q49 -21 90 -48t71 -56t55 -68t42 -74t32.5 -84.5t25.5 -89.5t22 -98l1 -5q55 -83 55 -150q0 -14 -9 -40t-9 -38q0 -1 1.5 -3.5 t3.5 -5t2 -3.5q77 -114 120.5 -214.5t43.5 -208.5q0 -43 -19.5 -100t-55.5 -57q-9 0 -19.5 7.5t-19 17.5t-19 26t-16 26.5t-13.5 26t-9 17.5q-1 1 -3 1l-5 -4q-59 -154 -132 -223q20 -20 61.5 -38.5t69 -41.5t35.5 -65q-2 -4 -4 -16t-7 -18q-64 -97 -302 -97q-53 0 -110.5 9 t-98 20t-104.5 30q-15 5 -23 7q-14 4 -46 4.5t-40 1.5q-41 -45 -127.5 -65t-168.5 -20q-35 0 -69 1.5t-93 9t-101 20.5t-74.5 40t-32.5 64q0 40 10 59.5t41 48.5q11 2 40.5 13t49.5 12q4 0 14 2q2 2 2 4l-2 3q-48 11 -108 105.5t-73 156.5l-5 3q-4 0 -12 -20 q-18 -41 -54.5 -74.5t-77.5 -37.5h-1q-4 0 -6 4.5t-5 5.5q-23 54 -23 100z" />
+<glyph unicode="&#xf1d7;" horiz-adv-x="2048" d="M0 858q0 169 97.5 311t264 223.5t363.5 81.5q176 0 332.5 -66t262 -182.5t136.5 -260.5q-31 4 -70 4q-169 0 -311 -77t-223.5 -208.5t-81.5 -287.5q0 -78 23 -152q-35 -3 -68 -3q-26 0 -50 1.5t-55 6.5t-44.5 7t-54.5 10.5t-50 10.5l-253 -127l72 218q-290 203 -290 490z M380 1075q0 -39 33 -64.5t76 -25.5q41 0 66 24.5t25 65.5t-25 66t-66 25q-43 0 -76 -25.5t-33 -65.5zM816 404q0 143 81.5 264t223.5 191.5t311 70.5q161 0 303 -70.5t227.5 -192t85.5 -263.5q0 -117 -68.5 -223.5t-185.5 -193.5l55 -181l-199 109q-150 -37 -218 -37 q-169 0 -311 70.5t-223.5 191.5t-81.5 264zM888 1075q0 -39 33 -64.5t76 -25.5q41 0 65.5 24.5t24.5 65.5t-24.5 66t-65.5 25q-43 0 -76 -25.5t-33 -65.5zM1160 568q0 -28 22.5 -50.5t49.5 -22.5q40 0 65.5 22t25.5 51q0 28 -25.5 50t-65.5 22q-27 0 -49.5 -22.5 t-22.5 -49.5zM1559 568q0 -28 22.5 -50.5t49.5 -22.5q39 0 65 22t26 51q0 28 -26 50t-65 22q-27 0 -49.5 -22.5t-22.5 -49.5z" />
+<glyph unicode="&#xf1d8;" horiz-adv-x="1792" d="M0 508q-2 40 32 59l1664 960q15 9 32 9q20 0 36 -11q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-453 185l-242 -295q-18 -23 -49 -23q-13 0 -22 4q-19 7 -30.5 23.5t-11.5 36.5v349l864 1059l-1069 -925l-395 162q-37 14 -40 55z" />
+<glyph unicode="&#xf1d9;" horiz-adv-x="1792" d="M0 508q-3 39 32 59l1664 960q35 21 68 -2q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-527 215l-298 -327q-18 -21 -47 -21q-14 0 -23 4q-19 7 -30 23.5t-11 36.5v452l-472 193q-37 14 -40 55zM209 522l336 -137l863 639l-478 -797l492 -201 l221 1323z" />
+<glyph unicode="&#xf1da;" d="M0 832v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298t-61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12 q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45zM512 480v64q0 14 9 23t23 9h224v352 q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf1db;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5z" />
+<glyph unicode="&#xf1dc;" horiz-adv-x="1792" d="M62 1338q0 26 12 48t36 22q46 0 138.5 -3.5t138.5 -3.5q42 0 126.5 3.5t126.5 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17 -43.5t-38.5 -14.5t-49.5 -4t-43 -13q-35 -21 -35 -160l1 -320q0 -21 1 -32q13 -3 39 -3h699q25 0 38 3q1 11 1 32l1 320q0 139 -35 160 q-18 11 -58.5 12.5t-66 13t-25.5 49.5q0 26 12.5 48t37.5 22q44 0 132 -3.5t132 -3.5q43 0 129 3.5t129 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17.5 -44t-40 -14.5t-51.5 -3t-44 -12.5q-35 -23 -35 -161l1 -943q0 -119 34 -140q16 -10 46 -13.5t53.5 -4.5t41.5 -15.5t18 -44.5 q0 -26 -12 -48t-36 -22q-44 0 -132.5 3.5t-133.5 3.5q-44 0 -132 -3.5t-132 -3.5q-24 0 -37 20.5t-13 45.5q0 31 17 46t39 17t51 7t45 15q33 21 33 140l-1 391q0 21 -1 31q-13 4 -50 4h-675q-38 0 -51 -4q-1 -10 -1 -31l-1 -371q0 -142 37 -164q16 -10 48 -13t57 -3.5 t45 -15t20 -45.5q0 -26 -12.5 -48t-36.5 -22q-47 0 -139.5 3.5t-138.5 3.5q-43 0 -128 -3.5t-127 -3.5q-23 0 -35.5 21t-12.5 45q0 30 15.5 45t36 17.5t47.5 7.5t42 15q33 23 33 143l-1 57v813q0 3 0.5 26t0 36.5t-1.5 38.5t-3.5 42t-6.5 36.5t-11 31.5t-16 18 q-15 10 -45 12t-53 2t-41 14t-18 45z" />
+<glyph unicode="&#xf1dd;" horiz-adv-x="1280" d="M24 926q0 166 88 286q88 118 209 159q111 37 417 37h479q25 0 43 -18t18 -43v-73q0 -29 -18.5 -61t-42.5 -32q-50 0 -54 -1q-26 -6 -32 -31q-3 -11 -3 -64v-1152q0 -25 -18 -43t-43 -18h-108q-25 0 -43 18t-18 43v1218h-143v-1218q0 -25 -17.5 -43t-43.5 -18h-108 q-26 0 -43.5 18t-17.5 43v496q-147 12 -245 59q-126 58 -192 179q-64 117 -64 259z" />
+<glyph unicode="&#xf1de;" d="M0 736v64q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM128 -96v672h256v-672q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM128 960v416q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-416h-256zM512 224v64q0 40 28 68 t68 28h320q40 0 68 -28t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 64h256v-160q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v160zM640 448v928q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-928h-256zM1024 992v64q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1152 -96v928h256v-928q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1152 1216v160q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-160h-256z" />
+<glyph unicode="&#xf1e0;" d="M0 640q0 133 93.5 226.5t226.5 93.5q126 0 218 -86l360 180q-2 22 -2 34q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5t-93.5 -226.5t-226.5 -93.5q-126 0 -218 86l-360 -180q2 -22 2 -34t-2 -34l360 -180q92 86 218 86q133 0 226.5 -93.5t93.5 -226.5 t-93.5 -226.5t-226.5 -93.5t-226.5 93.5t-93.5 226.5q0 12 2 34l-360 180q-92 -86 -218 -86q-133 0 -226.5 93.5t-93.5 226.5z" />
+<glyph unicode="&#xf1e1;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 640q0 -88 62.5 -150.5t150.5 -62.5q83 0 145 57l241 -120q-2 -16 -2 -23q0 -88 63 -150.5t151 -62.5 t150.5 62.5t62.5 150.5t-62.5 151t-150.5 63q-84 0 -145 -58l-241 120q2 16 2 23t-2 23l241 120q61 -58 145 -58q88 0 150.5 63t62.5 151t-62.5 150.5t-150.5 62.5t-151 -62.5t-63 -150.5q0 -7 2 -23l-241 -120q-62 57 -145 57q-88 0 -150.5 -62.5t-62.5 -150.5z" />
+<glyph unicode="&#xf1e2;" horiz-adv-x="1792" d="M0 448q0 143 55.5 273.5t150 225t225 150t273.5 55.5q182 0 343 -89l64 64q19 19 45.5 19t45.5 -19l68 -68l243 244l46 -46l-244 -243l68 -68q19 -19 19 -45.5t-19 -45.5l-64 -64q89 -161 89 -343q0 -143 -55.5 -273.5t-150 -225t-225 -150t-273.5 -55.5t-273.5 55.5 t-225 150t-150 225t-55.5 273.5zM170 615q10 -24 35 -34q13 -5 24 -5q42 0 60 40q34 84 98.5 148.5t148.5 98.5q25 11 35 35t0 49t-34 35t-49 0q-108 -44 -191 -127t-127 -191q-10 -25 0 -49zM1376 1472q0 13 9 23q10 9 23 9t23 -9l90 -91q10 -9 10 -22.5t-10 -22.5 q-10 -10 -22 -10q-13 0 -23 10l-91 90q-9 10 -9 23zM1536 1408v96q0 14 9 23t23 9t23 -9t9 -23v-96q0 -14 -9 -23t-23 -9t-23 9t-9 23zM1605 1242.5q0 13.5 10 22.5q9 10 22.5 10t22.5 -10l91 -90q9 -10 9 -23t-9 -23q-11 -9 -23 -9t-23 9l-90 91q-10 9 -10 22.5z M1605 1381.5q0 13.5 10 22.5l90 91q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-91 -90q-10 -10 -22 -10q-13 0 -23 10q-10 9 -10 22.5zM1632 1312q0 14 9 23t23 9h96q14 0 23 -9t9 -23t-9 -23t-23 -9h-96q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf1e3;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e4;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e5;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e6;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e7;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e8;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e9;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ea;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1eb;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ec;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ed;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ee;" horiz-adv-x="1792" />
+<glyph unicode="&#xf500;" horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+</font>
+</defs></svg> 
\ No newline at end of file
diff --git a/doc/fonts/fontawesome-webfont.ttf b/doc/fonts/fontawesome-webfont.ttf
new file mode 100755 (executable)
index 0000000..5cd6cff
Binary files /dev/null and b/doc/fonts/fontawesome-webfont.ttf differ
diff --git a/doc/fonts/fontawesome-webfont.woff b/doc/fonts/fontawesome-webfont.woff
new file mode 100755 (executable)
index 0000000..9eaecb3
Binary files /dev/null and b/doc/fonts/fontawesome-webfont.woff differ
diff --git a/doc/fonts/glyphicons-halflings-regular.eot b/doc/fonts/glyphicons-halflings-regular.eot
new file mode 100644 (file)
index 0000000..423bd5d
Binary files /dev/null and b/doc/fonts/glyphicons-halflings-regular.eot differ
diff --git a/doc/fonts/glyphicons-halflings-regular.svg b/doc/fonts/glyphicons-halflings-regular.svg
new file mode 100644 (file)
index 0000000..4469488
--- /dev/null
@@ -0,0 +1,229 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="glyphicons_halflingsregular" horiz-adv-x="1200" >
+<font-face units-per-em="1200" ascent="960" descent="-240" />
+<missing-glyph horiz-adv-x="500" />
+<glyph />
+<glyph />
+<glyph unicode="&#xd;" />
+<glyph unicode=" " />
+<glyph unicode="*" d="M100 500v200h259l-183 183l141 141l183 -183v259h200v-259l183 183l141 -141l-183 -183h259v-200h-259l183 -183l-141 -141l-183 183v-259h-200v259l-183 -183l-141 141l183 183h-259z" />
+<glyph unicode="+" d="M0 400v300h400v400h300v-400h400v-300h-400v-400h-300v400h-400z" />
+<glyph unicode="&#xa0;" />
+<glyph unicode="&#x2000;" horiz-adv-x="652" />
+<glyph unicode="&#x2001;" horiz-adv-x="1304" />
+<glyph unicode="&#x2002;" horiz-adv-x="652" />
+<glyph unicode="&#x2003;" horiz-adv-x="1304" />
+<glyph unicode="&#x2004;" horiz-adv-x="434" />
+<glyph unicode="&#x2005;" horiz-adv-x="326" />
+<glyph unicode="&#x2006;" horiz-adv-x="217" />
+<glyph unicode="&#x2007;" horiz-adv-x="217" />
+<glyph unicode="&#x2008;" horiz-adv-x="163" />
+<glyph unicode="&#x2009;" horiz-adv-x="260" />
+<glyph unicode="&#x200a;" horiz-adv-x="72" />
+<glyph unicode="&#x202f;" horiz-adv-x="260" />
+<glyph unicode="&#x205f;" horiz-adv-x="326" />
+<glyph unicode="&#x20ac;" d="M100 500l100 100h113q0 47 5 100h-218l100 100h135q37 167 112 257q117 141 297 141q242 0 354 -189q60 -103 66 -209h-181q0 55 -25.5 99t-63.5 68t-75 36.5t-67 12.5q-24 0 -52.5 -10t-62.5 -32t-65.5 -67t-50.5 -107h379l-100 -100h-300q-6 -46 -6 -100h406l-100 -100 h-300q9 -74 33 -132t52.5 -91t62 -54.5t59 -29t46.5 -7.5q29 0 66 13t75 37t63.5 67.5t25.5 96.5h174q-31 -172 -128 -278q-107 -117 -274 -117q-205 0 -324 158q-36 46 -69 131.5t-45 205.5h-217z" />
+<glyph unicode="&#x2212;" d="M200 400h900v300h-900v-300z" />
+<glyph unicode="&#x2601;" d="M-14 494q0 -80 56.5 -137t135.5 -57h750q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5z" />
+<glyph unicode="&#x2709;" d="M0 100l400 400l200 -200l200 200l400 -400h-1200zM0 300v600l300 -300zM0 1100l600 -603l600 603h-1200zM900 600l300 300v-600z" />
+<glyph unicode="&#x270f;" d="M-13 -13l333 112l-223 223zM187 403l214 -214l614 614l-214 214zM887 1103l214 -214l99 92q13 13 13 32.5t-13 33.5l-153 153q-15 13 -33 13t-33 -13z" />
+<glyph unicode="&#xe000;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xe001;" d="M0 1200h1200l-500 -550v-550h300v-100h-800v100h300v550z" />
+<glyph unicode="&#xe002;" d="M14 84q18 -55 86 -75.5t147 5.5q65 21 109 69t44 90v606l600 155v-521q-64 16 -138 -7q-79 -26 -122.5 -83t-25.5 -111q17 -55 85.5 -75.5t147.5 4.5q70 23 111.5 63.5t41.5 95.5v881q0 10 -7 15.5t-17 2.5l-752 -193q-10 -3 -17 -12.5t-7 -19.5v-689q-64 17 -138 -7 q-79 -25 -122.5 -82t-25.5 -112z" />
+<glyph unicode="&#xe003;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233z" />
+<glyph unicode="&#xe005;" d="M100 784q0 64 28 123t73 100.5t104.5 64t119 20.5t120 -38.5t104.5 -104.5q48 69 109.5 105t121.5 38t118.5 -20.5t102.5 -64t71 -100.5t27 -123q0 -57 -33.5 -117.5t-94 -124.5t-126.5 -127.5t-150 -152.5t-146 -174q-62 85 -145.5 174t-149.5 152.5t-126.5 127.5 t-94 124.5t-33.5 117.5z" />
+<glyph unicode="&#xe006;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1z" />
+<glyph unicode="&#xe007;" d="M-72 800h479l146 400h2l146 -400h472l-382 -278l145 -449l-384 275l-382 -275l146 447zM168 71l2 1zM237 700l196 -142l-73 -226l192 140l195 -141l-74 229l193 140h-235l-77 211l-78 -211h-239z" />
+<glyph unicode="&#xe008;" d="M0 0v143l400 257v100q-37 0 -68.5 74.5t-31.5 125.5v200q0 124 88 212t212 88t212 -88t88 -212v-200q0 -51 -31.5 -125.5t-68.5 -74.5v-100l400 -257v-143h-1200z" />
+<glyph unicode="&#xe009;" d="M0 0v1100h1200v-1100h-1200zM100 100h100v100h-100v-100zM100 300h100v100h-100v-100zM100 500h100v100h-100v-100zM100 700h100v100h-100v-100zM100 900h100v100h-100v-100zM300 100h600v400h-600v-400zM300 600h600v400h-600v-400zM1000 100h100v100h-100v-100z M1000 300h100v100h-100v-100zM1000 500h100v100h-100v-100zM1000 700h100v100h-100v-100zM1000 900h100v100h-100v-100z" />
+<glyph unicode="&#xe010;" d="M0 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM0 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400 q-21 0 -35.5 14.5t-14.5 35.5zM600 50v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5zM600 650v400q0 21 14.5 35.5t35.5 14.5h400q21 0 35.5 -14.5t14.5 -35.5v-400 q0 -21 -14.5 -35.5t-35.5 -14.5h-400q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe011;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200 q-21 0 -35.5 14.5t-14.5 35.5zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 450v200q0 21 14.5 35.5t35.5 14.5h200 q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM800 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe012;" d="M0 50v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM0 450q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v200q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5 t-14.5 -35.5v-200zM0 850v200q0 21 14.5 35.5t35.5 14.5h200q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5zM400 50v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5 t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 450v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5zM400 850v200q0 21 14.5 35.5t35.5 14.5h700q21 0 35.5 -14.5t14.5 -35.5 v-200q0 -21 -14.5 -35.5t-35.5 -14.5h-700q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe013;" d="M29 454l419 -420l818 820l-212 212l-607 -607l-206 207z" />
+<glyph unicode="&#xe014;" d="M106 318l282 282l-282 282l212 212l282 -282l282 282l212 -212l-282 -282l282 -282l-212 -212l-282 282l-282 -282z" />
+<glyph unicode="&#xe015;" d="M23 693q0 200 142 342t342 142t342 -142t142 -342q0 -142 -78 -261l300 -300q7 -8 7 -18t-7 -18l-109 -109q-8 -7 -18 -7t-18 7l-300 300q-119 -78 -261 -78q-200 0 -342 142t-142 342zM176 693q0 -136 97 -233t234 -97t233.5 96.5t96.5 233.5t-96.5 233.5t-233.5 96.5 t-234 -97t-97 -233zM300 600v200h100v100h200v-100h100v-200h-100v-100h-200v100h-100z" />
+<glyph unicode="&#xe016;" d="M23 694q0 200 142 342t342 142t342 -142t142 -342q0 -141 -78 -262l300 -299q7 -7 7 -18t-7 -18l-109 -109q-8 -8 -18 -8t-18 8l-300 299q-120 -77 -261 -77q-200 0 -342 142t-142 342zM176 694q0 -136 97 -233t234 -97t233.5 97t96.5 233t-96.5 233t-233.5 97t-234 -97 t-97 -233zM300 601h400v200h-400v-200z" />
+<glyph unicode="&#xe017;" d="M23 600q0 183 105 331t272 210v-166q-103 -55 -165 -155t-62 -220q0 -177 125 -302t302 -125t302 125t125 302q0 120 -62 220t-165 155v166q167 -62 272 -210t105 -331q0 -118 -45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5 zM500 750q0 -21 14.5 -35.5t35.5 -14.5h100q21 0 35.5 14.5t14.5 35.5v400q0 21 -14.5 35.5t-35.5 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-400z" />
+<glyph unicode="&#xe018;" d="M100 1h200v300h-200v-300zM400 1v500h200v-500h-200zM700 1v800h200v-800h-200zM1000 1v1200h200v-1200h-200z" />
+<glyph unicode="&#xe019;" d="M26 601q0 -33 6 -74l151 -38l2 -6q14 -49 38 -93l3 -5l-80 -134q45 -59 105 -105l133 81l5 -3q45 -26 94 -39l5 -2l38 -151q40 -5 74 -5q27 0 74 5l38 151l6 2q46 13 93 39l5 3l134 -81q56 44 104 105l-80 134l3 5q24 44 39 93l1 6l152 38q5 40 5 74q0 28 -5 73l-152 38 l-1 6q-16 51 -39 93l-3 5l80 134q-44 58 -104 105l-134 -81l-5 3q-45 25 -93 39l-6 1l-38 152q-40 5 -74 5q-27 0 -74 -5l-38 -152l-5 -1q-50 -14 -94 -39l-5 -3l-133 81q-59 -47 -105 -105l80 -134l-3 -5q-25 -47 -38 -93l-2 -6l-151 -38q-6 -48 -6 -73zM385 601 q0 88 63 151t152 63t152 -63t63 -151q0 -89 -63 -152t-152 -63t-152 63t-63 152z" />
+<glyph unicode="&#xe020;" d="M100 1025v50q0 10 7.5 17.5t17.5 7.5h275v100q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5v-100h275q10 0 17.5 -7.5t7.5 -17.5v-50q0 -11 -7 -18t-18 -7h-1050q-11 0 -18 7t-7 18zM200 100v800h900v-800q0 -41 -29.5 -71t-70.5 -30h-700q-41 0 -70.5 30 t-29.5 71zM300 100h100v700h-100v-700zM500 100h100v700h-100v-700zM500 1100h300v100h-300v-100zM700 100h100v700h-100v-700zM900 100h100v700h-100v-700z" />
+<glyph unicode="&#xe021;" d="M1 601l656 644l644 -644h-200v-600h-300v400h-300v-400h-300v600h-200z" />
+<glyph unicode="&#xe022;" d="M100 25v1150q0 11 7 18t18 7h475v-500h400v-675q0 -11 -7 -18t-18 -7h-850q-11 0 -18 7t-7 18zM700 800v300l300 -300h-300z" />
+<glyph unicode="&#xe023;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 500v400h100 v-300h200v-100h-300z" />
+<glyph unicode="&#xe024;" d="M-100 0l431 1200h209l-21 -300h162l-20 300h208l431 -1200h-538l-41 400h-242l-40 -400h-539zM488 500h224l-27 300h-170z" />
+<glyph unicode="&#xe025;" d="M0 0v400h490l-290 300h200v500h300v-500h200l-290 -300h490v-400h-1100zM813 200h175v100h-175v-100z" />
+<glyph unicode="&#xe026;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM188 600q0 -170 121 -291t291 -121t291 121t121 291t-121 291t-291 121 t-291 -121t-121 -291zM350 600h150v300h200v-300h150l-250 -300z" />
+<glyph unicode="&#xe027;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM350 600l250 300 l250 -300h-150v-300h-200v300h-150z" />
+<glyph unicode="&#xe028;" d="M0 25v475l200 700h800q199 -700 200 -700v-475q0 -11 -7 -18t-18 -7h-1150q-11 0 -18 7t-7 18zM200 500h200l50 -200h300l50 200h200l-97 500h-606z" />
+<glyph unicode="&#xe029;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM500 397v401 l297 -200z" />
+<glyph unicode="&#xe030;" d="M23 600q0 -118 45.5 -224.5t123 -184t184 -123t224.5 -45.5t224.5 45.5t184 123t123 184t45.5 224.5h-150q0 -177 -125 -302t-302 -125t-302 125t-125 302t125 302t302 125q136 0 246 -81l-146 -146h400v400l-145 -145q-157 122 -355 122q-118 0 -224.5 -45.5t-184 -123 t-123 -184t-45.5 -224.5z" />
+<glyph unicode="&#xe031;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5q198 0 355 -122l145 145v-400h-400l147 147q-112 80 -247 80q-177 0 -302 -125t-125 -302h-150zM100 0v400h400l-147 -147q112 -80 247 -80q177 0 302 125t125 302h150q0 -118 -45.5 -224.5t-123 -184t-184 -123 t-224.5 -45.5q-198 0 -355 122z" />
+<glyph unicode="&#xe032;" d="M100 0h1100v1200h-1100v-1200zM200 100v900h900v-900h-900zM300 200v100h100v-100h-100zM300 400v100h100v-100h-100zM300 600v100h100v-100h-100zM300 800v100h100v-100h-100zM500 200h500v100h-500v-100zM500 400v100h500v-100h-500zM500 600v100h500v-100h-500z M500 800v100h500v-100h-500z" />
+<glyph unicode="&#xe033;" d="M0 100v600q0 41 29.5 70.5t70.5 29.5h100v200q0 82 59 141t141 59h300q82 0 141 -59t59 -141v-200h100q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-900q-41 0 -70.5 29.5t-29.5 70.5zM400 800h300v150q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-150z" />
+<glyph unicode="&#xe034;" d="M100 0v1100h100v-1100h-100zM300 400q60 60 127.5 84t127.5 17.5t122 -23t119 -30t110 -11t103 42t91 120.5v500q-40 -81 -101.5 -115.5t-127.5 -29.5t-138 25t-139.5 40t-125.5 25t-103 -29.5t-65 -115.5v-500z" />
+<glyph unicode="&#xe035;" d="M0 275q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 127 70.5 231.5t184.5 161.5t245 57t245 -57t184.5 -161.5t70.5 -231.5v-300q0 -11 7 -18t18 -7h50q11 0 18 7t7 18v300q0 116 -49.5 227t-131 192.5t-192.5 131t-227 49.5t-227 -49.5t-192.5 -131t-131 -192.5 t-49.5 -227v-300zM200 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14zM800 20v460q0 8 6 14t14 6h160q8 0 14 -6t6 -14v-460q0 -8 -6 -14t-14 -6h-160q-8 0 -14 6t-6 14z" />
+<glyph unicode="&#xe036;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM688 459l141 141l-141 141l71 71l141 -141l141 141l71 -71l-141 -141l141 -141l-71 -71l-141 141l-141 -141z" />
+<glyph unicode="&#xe037;" d="M0 400h300l300 -200v800l-300 -200h-300v-400zM700 857l69 53q111 -135 111 -310q0 -169 -106 -302l-67 54q86 110 86 248q0 146 -93 257z" />
+<glyph unicode="&#xe038;" d="M0 401v400h300l300 200v-800l-300 200h-300zM702 858l69 53q111 -135 111 -310q0 -170 -106 -303l-67 55q86 110 86 248q0 145 -93 257zM889 951l7 -8q123 -151 123 -344q0 -189 -119 -339l-7 -8l81 -66l6 8q142 178 142 405q0 230 -144 408l-6 8z" />
+<glyph unicode="&#xe039;" d="M0 0h500v500h-200v100h-100v-100h-200v-500zM0 600h100v100h400v100h100v100h-100v300h-500v-600zM100 100v300h300v-300h-300zM100 800v300h300v-300h-300zM200 200v100h100v-100h-100zM200 900h100v100h-100v-100zM500 500v100h300v-300h200v-100h-100v-100h-200v100 h-100v100h100v200h-200zM600 0v100h100v-100h-100zM600 1000h100v-300h200v-300h300v200h-200v100h200v500h-600v-200zM800 800v300h300v-300h-300zM900 0v100h300v-100h-300zM900 900v100h100v-100h-100zM1100 200v100h100v-100h-100z" />
+<glyph unicode="&#xe040;" d="M0 200h100v1000h-100v-1000zM100 0v100h300v-100h-300zM200 200v1000h100v-1000h-100zM500 0v91h100v-91h-100zM500 200v1000h200v-1000h-200zM700 0v91h100v-91h-100zM800 200v1000h100v-1000h-100zM900 0v91h200v-91h-200zM1000 200v1000h200v-1000h-200z" />
+<glyph unicode="&#xe041;" d="M1 700v475q0 10 7.5 17.5t17.5 7.5h474l700 -700l-500 -500zM148 953q0 -42 29 -71q30 -30 71.5 -30t71.5 30q29 29 29 71t-29 71q-30 30 -71.5 30t-71.5 -30q-29 -29 -29 -71z" />
+<glyph unicode="&#xe042;" d="M2 700v475q0 11 7 18t18 7h474l700 -700l-500 -500zM148 953q0 -42 30 -71q29 -30 71 -30t71 30q30 29 30 71t-30 71q-29 30 -71 30t-71 -30q-30 -29 -30 -71zM701 1200h100l700 -700l-500 -500l-50 50l450 450z" />
+<glyph unicode="&#xe043;" d="M100 0v1025l175 175h925v-1000l-100 -100v1000h-750l-100 -100h750v-1000h-900z" />
+<glyph unicode="&#xe044;" d="M200 0l450 444l450 -443v1150q0 20 -14.5 35t-35.5 15h-800q-21 0 -35.5 -15t-14.5 -35v-1151z" />
+<glyph unicode="&#xe045;" d="M0 100v700h200l100 -200h600l100 200h200v-700h-200v200h-800v-200h-200zM253 829l40 -124h592l62 124l-94 346q-2 11 -10 18t-18 7h-450q-10 0 -18 -7t-10 -18zM281 24l38 152q2 10 11.5 17t19.5 7h500q10 0 19.5 -7t11.5 -17l38 -152q2 -10 -3.5 -17t-15.5 -7h-600 q-10 0 -15.5 7t-3.5 17z" />
+<glyph unicode="&#xe046;" d="M0 200q0 -41 29.5 -70.5t70.5 -29.5h1000q41 0 70.5 29.5t29.5 70.5v600q0 41 -29.5 70.5t-70.5 29.5h-150q-4 8 -11.5 21.5t-33 48t-53 61t-69 48t-83.5 21.5h-200q-41 0 -82 -20.5t-70 -50t-52 -59t-34 -50.5l-12 -20h-150q-41 0 -70.5 -29.5t-29.5 -70.5v-600z M356 500q0 100 72 172t172 72t172 -72t72 -172t-72 -172t-172 -72t-172 72t-72 172zM494 500q0 -44 31 -75t75 -31t75 31t31 75t-31 75t-75 31t-75 -31t-31 -75zM900 700v100h100v-100h-100z" />
+<glyph unicode="&#xe047;" d="M53 0h365v66q-41 0 -72 11t-49 38t1 71l92 234h391l82 -222q16 -45 -5.5 -88.5t-74.5 -43.5v-66h417v66q-34 1 -74 43q-18 19 -33 42t-21 37l-6 13l-385 998h-93l-399 -1006q-24 -48 -52 -75q-12 -12 -33 -25t-36 -20l-15 -7v-66zM416 521l178 457l46 -140l116 -317h-340 z" />
+<glyph unicode="&#xe048;" d="M100 0v89q41 7 70.5 32.5t29.5 65.5v827q0 28 -1 39.5t-5.5 26t-15.5 21t-29 14t-49 14.5v70h471q120 0 213 -88t93 -228q0 -55 -11.5 -101.5t-28 -74t-33.5 -47.5t-28 -28l-12 -7q8 -3 21.5 -9t48 -31.5t60.5 -58t47.5 -91.5t21.5 -129q0 -84 -59 -156.5t-142 -111 t-162 -38.5h-500zM400 200h161q89 0 153 48.5t64 132.5q0 90 -62.5 154.5t-156.5 64.5h-159v-400zM400 700h139q76 0 130 61.5t54 138.5q0 82 -84 130.5t-239 48.5v-379z" />
+<glyph unicode="&#xe049;" d="M200 0v57q77 7 134.5 40.5t65.5 80.5l173 849q10 56 -10 74t-91 37q-6 1 -10.5 2.5t-9.5 2.5v57h425l2 -57q-33 -8 -62 -25.5t-46 -37t-29.5 -38t-17.5 -30.5l-5 -12l-128 -825q-10 -52 14 -82t95 -36v-57h-500z" />
+<glyph unicode="&#xe050;" d="M-75 200h75v800h-75l125 167l125 -167h-75v-800h75l-125 -167zM300 900v300h150h700h150v-300h-50q0 29 -8 48.5t-18.5 30t-33.5 15t-39.5 5.5t-50.5 1h-200v-850l100 -50v-100h-400v100l100 50v850h-200q-34 0 -50.5 -1t-40 -5.5t-33.5 -15t-18.5 -30t-8.5 -48.5h-49z " />
+<glyph unicode="&#xe051;" d="M33 51l167 125v-75h800v75l167 -125l-167 -125v75h-800v-75zM100 901v300h150h700h150v-300h-50q0 29 -8 48.5t-18 30t-33.5 15t-40 5.5t-50.5 1h-200v-650l100 -50v-100h-400v100l100 50v650h-200q-34 0 -50.5 -1t-39.5 -5.5t-33.5 -15t-18.5 -30t-8 -48.5h-50z" />
+<glyph unicode="&#xe052;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 350q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM0 650q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1000q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 950q0 -20 14.5 -35t35.5 -15h600q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-600q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe053;" d="M0 50q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM0 650q0 -20 14.5 -35t35.5 -15h1100q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5 v-100zM200 350q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM200 950q0 -20 14.5 -35t35.5 -15h700q21 0 35.5 15t14.5 35v100q0 21 -14.5 35.5t-35.5 14.5h-700q-21 0 -35.5 -14.5 t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe054;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM100 650v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1000q-21 0 -35.5 15 t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM500 950v100q0 21 14.5 35.5t35.5 14.5h600q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-600 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe055;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h1100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-1100 q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe056;" d="M0 50v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 350v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM0 650v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15t-14.5 35zM0 950v100q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-100q-21 0 -35.5 15 t-14.5 35zM300 50v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 350v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800 q-21 0 -35.5 15t-14.5 35zM300 650v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15h-800q-21 0 -35.5 15t-14.5 35zM300 950v100q0 21 14.5 35.5t35.5 14.5h800q21 0 35.5 -14.5t14.5 -35.5v-100q0 -20 -14.5 -35t-35.5 -15 h-800q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe057;" d="M-101 500v100h201v75l166 -125l-166 -125v75h-201zM300 0h100v1100h-100v-1100zM500 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35 v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 650q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM500 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100 q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100z" />
+<glyph unicode="&#xe058;" d="M1 50q0 -20 14.5 -35t35.5 -15h600q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-600q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 350q0 -20 14.5 -35t35.5 -15h300q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-300q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 650 q0 -20 14.5 -35t35.5 -15h500q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-500q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM1 950q0 -20 14.5 -35t35.5 -15h100q20 0 35 15t15 35v100q0 21 -15 35.5t-35 14.5h-100q-21 0 -35.5 -14.5t-14.5 -35.5v-100zM801 0v1100h100v-1100 h-100zM934 550l167 -125v75h200v100h-200v75z" />
+<glyph unicode="&#xe059;" d="M0 275v650q0 31 22 53t53 22h750q31 0 53 -22t22 -53v-650q0 -31 -22 -53t-53 -22h-750q-31 0 -53 22t-22 53zM900 600l300 300v-600z" />
+<glyph unicode="&#xe060;" d="M0 44v1012q0 18 13 31t31 13h1112q19 0 31.5 -13t12.5 -31v-1012q0 -18 -12.5 -31t-31.5 -13h-1112q-18 0 -31 13t-13 31zM100 263l247 182l298 -131l-74 156l293 318l236 -288v500h-1000v-737zM208 750q0 56 39 95t95 39t95 -39t39 -95t-39 -95t-95 -39t-95 39t-39 95z " />
+<glyph unicode="&#xe062;" d="M148 745q0 124 60.5 231.5t165 172t226.5 64.5q123 0 227 -63t164.5 -169.5t60.5 -229.5t-73 -272q-73 -114 -166.5 -237t-150.5 -189l-57 -66q-10 9 -27 26t-66.5 70.5t-96 109t-104 135.5t-100.5 155q-63 139 -63 262zM342 772q0 -107 75.5 -182.5t181.5 -75.5 q107 0 182.5 75.5t75.5 182.5t-75.5 182t-182.5 75t-182 -75.5t-75 -181.5z" />
+<glyph unicode="&#xe063;" d="M1 600q0 122 47.5 233t127.5 191t191 127.5t233 47.5t233 -47.5t191 -127.5t127.5 -191t47.5 -233t-47.5 -233t-127.5 -191t-191 -127.5t-233 -47.5t-233 47.5t-191 127.5t-127.5 191t-47.5 233zM173 600q0 -177 125.5 -302t301.5 -125v854q-176 0 -301.5 -125 t-125.5 -302z" />
+<glyph unicode="&#xe064;" d="M117 406q0 94 34 186t88.5 172.5t112 159t115 177t87.5 194.5q21 -71 57.5 -142.5t76 -130.5t83 -118.5t82 -117t70 -116t50 -125.5t18.5 -136q0 -89 -39 -165.5t-102 -126.5t-140 -79.5t-156 -33.5q-114 6 -211.5 53t-161.5 138.5t-64 210.5zM243 414q14 -82 59.5 -136 t136.5 -80l16 98q-7 6 -18 17t-34 48t-33 77q-15 73 -14 143.5t10 122.5l9 51q-92 -110 -119.5 -185t-12.5 -156z" />
+<glyph unicode="&#xe065;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5q366 -6 397 -14l-186 -186h-311q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v125l200 200v-225q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM436 341l161 50l412 412l-114 113l-405 -405zM995 1015l113 -113l113 113l-21 85l-92 28z" />
+<glyph unicode="&#xe066;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h261l2 -80q-133 -32 -218 -120h-145q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5l200 153v-53q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5 zM423 524q30 38 81.5 64t103 35.5t99 14t77.5 3.5l29 -1v-209l360 324l-359 318v-216q-7 0 -19 -1t-48 -8t-69.5 -18.5t-76.5 -37t-76.5 -59t-62 -88t-39.5 -121.5z" />
+<glyph unicode="&#xe067;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q60 0 127 -23l-178 -177h-349q-41 0 -70.5 -29.5t-29.5 -70.5v-500q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v69l200 200v-169q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5 t-117.5 282.5zM342 632l283 -284l566 567l-136 137l-430 -431l-147 147z" />
+<glyph unicode="&#xe068;" d="M0 603l300 296v-198h200v200h-200l300 300l295 -300h-195v-200h200v198l300 -296l-300 -300v198h-200v-200h195l-295 -300l-300 300h200v200h-200v-198z" />
+<glyph unicode="&#xe069;" d="M200 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-1100l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe070;" d="M0 50v1000q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-437l500 487v-487l500 487v-1100l-500 488v-488l-500 488v-438q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5z" />
+<glyph unicode="&#xe071;" d="M136 550l564 550v-487l500 487v-1100l-500 488v-488z" />
+<glyph unicode="&#xe072;" d="M200 0l900 550l-900 550v-1100z" />
+<glyph unicode="&#xe073;" d="M200 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200q-21 0 -35.5 -14.5t-14.5 -35.5v-800zM600 150q0 -21 14.5 -35.5t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v800q0 21 -14.5 35.5t-35.5 14.5h-200 q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe074;" d="M200 150q0 -20 14.5 -35t35.5 -15h800q21 0 35.5 15t14.5 35v800q0 21 -14.5 35.5t-35.5 14.5h-800q-21 0 -35.5 -14.5t-14.5 -35.5v-800z" />
+<glyph unicode="&#xe075;" d="M0 0v1100l500 -487v487l564 -550l-564 -550v488z" />
+<glyph unicode="&#xe076;" d="M0 0v1100l500 -487v487l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438l-500 -488v488z" />
+<glyph unicode="&#xe077;" d="M300 0v1100l500 -487v437q0 21 14.5 35.5t35.5 14.5h100q21 0 35.5 -14.5t14.5 -35.5v-1000q0 -21 -14.5 -35.5t-35.5 -14.5h-100q-21 0 -35.5 14.5t-14.5 35.5v438z" />
+<glyph unicode="&#xe078;" d="M100 250v100q0 21 14.5 35.5t35.5 14.5h1000q21 0 35.5 -14.5t14.5 -35.5v-100q0 -21 -14.5 -35.5t-35.5 -14.5h-1000q-21 0 -35.5 14.5t-14.5 35.5zM100 500h1100l-550 564z" />
+<glyph unicode="&#xe079;" d="M185 599l592 -592l240 240l-353 353l353 353l-240 240z" />
+<glyph unicode="&#xe080;" d="M272 194l353 353l-353 353l241 240l572 -571l21 -22l-1 -1v-1l-592 -591z" />
+<glyph unicode="&#xe081;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h200v-200h200v200h200v200h-200v200h-200v-200h-200v-200z" />
+<glyph unicode="&#xe082;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM300 500h600v200h-600v-200z" />
+<glyph unicode="&#xe083;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM246 459l213 -213l141 142l141 -142l213 213l-142 141l142 141l-213 212l-141 -141l-141 142l-212 -213l141 -141z" />
+<glyph unicode="&#xe084;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -299.5t-217.5 -217.5t-299.5 -80t-299.5 80t-217.5 217.5t-80 299.5zM270 551l276 -277l411 411l-175 174l-236 -236l-102 102z" />
+<glyph unicode="&#xe085;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM363 700h144q4 0 11.5 -1t11 -1t6.5 3t3 9t1 11t3.5 8.5t3.5 6t5.5 4t6.5 2.5t9 1.5t9 0.5h11.5h12.5q19 0 30 -10t11 -26 q0 -22 -4 -28t-27 -22q-5 -1 -12.5 -3t-27 -13.5t-34 -27t-26.5 -46t-11 -68.5h200q5 3 14 8t31.5 25.5t39.5 45.5t31 69t14 94q0 51 -17.5 89t-42 58t-58.5 32t-58.5 15t-51.5 3q-105 0 -172 -56t-67 -183zM500 300h200v100h-200v-100z" />
+<glyph unicode="&#xe086;" d="M3 600q0 162 80 299.5t217.5 217.5t299.5 80t299.5 -80t217.5 -217.5t80 -299.5t-80 -300t-217.5 -218t-299.5 -80t-299.5 80t-217.5 218t-80 300zM400 300h400v100h-100v300h-300v-100h100v-200h-100v-100zM500 800h200v100h-200v-100z" />
+<glyph unicode="&#xe087;" d="M0 500v200h194q15 60 36 104.5t55.5 86t88 69t126.5 40.5v200h200v-200q54 -20 113 -60t112.5 -105.5t71.5 -134.5h203v-200h-203q-25 -102 -116.5 -186t-180.5 -117v-197h-200v197q-140 27 -208 102.5t-98 200.5h-194zM290 500q24 -73 79.5 -127.5t130.5 -78.5v206h200 v-206q149 48 201 206h-201v200h200q-25 74 -76 127.5t-124 76.5v-204h-200v203q-75 -24 -130 -77.5t-79 -125.5h209v-200h-210z" />
+<glyph unicode="&#xe088;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM356 465l135 135 l-135 135l109 109l135 -135l135 135l109 -109l-135 -135l135 -135l-109 -109l-135 135l-135 -135z" />
+<glyph unicode="&#xe089;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM322 537l141 141 l87 -87l204 205l142 -142l-346 -345z" />
+<glyph unicode="&#xe090;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -115 62 -215l568 567q-100 62 -216 62q-171 0 -292.5 -121.5t-121.5 -292.5zM391 245q97 -59 209 -59q171 0 292.5 121.5t121.5 292.5 q0 112 -59 209z" />
+<glyph unicode="&#xe091;" d="M0 547l600 453v-300h600v-300h-600v-301z" />
+<glyph unicode="&#xe092;" d="M0 400v300h600v300l600 -453l-600 -448v301h-600z" />
+<glyph unicode="&#xe093;" d="M204 600l450 600l444 -600h-298v-600h-300v600h-296z" />
+<glyph unicode="&#xe094;" d="M104 600h296v600h300v-600h298l-449 -600z" />
+<glyph unicode="&#xe095;" d="M0 200q6 132 41 238.5t103.5 193t184 138t271.5 59.5v271l600 -453l-600 -448v301q-95 -2 -183 -20t-170 -52t-147 -92.5t-100 -135.5z" />
+<glyph unicode="&#xe096;" d="M0 0v400l129 -129l294 294l142 -142l-294 -294l129 -129h-400zM635 777l142 -142l294 294l129 -129v400h-400l129 -129z" />
+<glyph unicode="&#xe097;" d="M34 176l295 295l-129 129h400v-400l-129 130l-295 -295zM600 600v400l129 -129l295 295l142 -141l-295 -295l129 -130h-400z" />
+<glyph unicode="&#xe101;" d="M23 600q0 118 45.5 224.5t123 184t184 123t224.5 45.5t224.5 -45.5t184 -123t123 -184t45.5 -224.5t-45.5 -224.5t-123 -184t-184 -123t-224.5 -45.5t-224.5 45.5t-184 123t-123 184t-45.5 224.5zM456 851l58 -302q4 -20 21.5 -34.5t37.5 -14.5h54q20 0 37.5 14.5 t21.5 34.5l58 302q4 20 -8 34.5t-33 14.5h-207q-20 0 -32 -14.5t-8 -34.5zM500 300h200v100h-200v-100z" />
+<glyph unicode="&#xe102;" d="M0 800h100v-200h400v300h200v-300h400v200h100v100h-111v6t-1 15t-3 18l-34 172q-11 39 -41.5 63t-69.5 24q-32 0 -61 -17l-239 -144q-22 -13 -40 -35q-19 24 -40 36l-238 144q-33 18 -62 18q-39 0 -69.5 -23t-40.5 -61l-35 -177q-2 -8 -3 -18t-1 -15v-6h-111v-100z M100 0h400v400h-400v-400zM200 900q-3 0 14 48t35 96l18 47l214 -191h-281zM700 0v400h400v-400h-400zM731 900l202 197q5 -12 12 -32.5t23 -64t25 -72t7 -28.5h-269z" />
+<glyph unicode="&#xe103;" d="M0 -22v143l216 193q-9 53 -13 83t-5.5 94t9 113t38.5 114t74 124q47 60 99.5 102.5t103 68t127.5 48t145.5 37.5t184.5 43.5t220 58.5q0 -189 -22 -343t-59 -258t-89 -181.5t-108.5 -120t-122 -68t-125.5 -30t-121.5 -1.5t-107.5 12.5t-87.5 17t-56.5 7.5l-99 -55z M238.5 300.5q19.5 -6.5 86.5 76.5q55 66 367 234q70 38 118.5 69.5t102 79t99 111.5t86.5 148q22 50 24 60t-6 19q-7 5 -17 5t-26.5 -14.5t-33.5 -39.5q-35 -51 -113.5 -108.5t-139.5 -89.5l-61 -32q-369 -197 -458 -401q-48 -111 -28.5 -117.5z" />
+<glyph unicode="&#xe104;" d="M111 408q0 -33 5 -63q9 -56 44 -119.5t105 -108.5q31 -21 64 -16t62 23.5t57 49.5t48 61.5t35 60.5q32 66 39 184.5t-13 157.5q79 -80 122 -164t26 -184q-5 -33 -20.5 -69.5t-37.5 -80.5q-10 -19 -14.5 -29t-12 -26t-9 -23.5t-3 -19t2.5 -15.5t11 -9.5t19.5 -5t30.5 2.5 t42 8q57 20 91 34t87.5 44.5t87 64t65.5 88.5t47 122q38 172 -44.5 341.5t-246.5 278.5q22 -44 43 -129q39 -159 -32 -154q-15 2 -33 9q-79 33 -120.5 100t-44 175.5t48.5 257.5q-13 -8 -34 -23.5t-72.5 -66.5t-88.5 -105.5t-60 -138t-8 -166.5q2 -12 8 -41.5t8 -43t6 -39.5 t3.5 -39.5t-1 -33.5t-6 -31.5t-13.5 -24t-21 -20.5t-31 -12q-38 -10 -67 13t-40.5 61.5t-15 81.5t10.5 75q-52 -46 -83.5 -101t-39 -107t-7.5 -85z" />
+<glyph unicode="&#xe105;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5t145.5 -23.5t132.5 -59t116.5 -83.5t97 -90t74.5 -85.5t49 -63.5t20 -30l26 -40l-26 -40q-6 -10 -20 -30t-49 -63.5t-74.5 -85.5t-97 -90t-116.5 -83.5t-132.5 -59t-145.5 -23.5 t-145.5 23.5t-132.5 59t-116.5 83.5t-97 90t-74.5 85.5t-49 63.5t-20 30zM120 600q7 -10 40.5 -58t56 -78.5t68 -77.5t87.5 -75t103 -49.5t125 -21.5t123.5 20t100.5 45.5t85.5 71.5t66.5 75.5t58 81.5t47 66q-1 1 -28.5 37.5t-42 55t-43.5 53t-57.5 63.5t-58.5 54 q49 -74 49 -163q0 -124 -88 -212t-212 -88t-212 88t-88 212q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l105 105q-37 24 -75 72t-57 84l-20 36z" />
+<glyph unicode="&#xe106;" d="M-61 600l26 40q6 10 20 30t49 63.5t74.5 85.5t97 90t116.5 83.5t132.5 59t145.5 23.5q61 0 121 -17l37 142h148l-314 -1200h-148l37 143q-82 21 -165 71.5t-140 102t-109.5 112t-72 88.5t-29.5 43zM120 600q210 -282 393 -336l37 141q-107 18 -178.5 101.5t-71.5 193.5 q0 85 46 158q-102 -87 -226 -258zM377 656q49 -124 154 -191l47 47l23 87q-30 28 -59 69t-44 68l-14 26zM780 161l38 145q22 15 44.5 34t46 44t40.5 44t41 50.5t33.5 43.5t33 44t24.5 34q-97 127 -140 175l39 146q67 -54 131.5 -125.5t87.5 -103.5t36 -52l26 -40l-26 -40 q-7 -12 -25.5 -38t-63.5 -79.5t-95.5 -102.5t-124 -100t-146.5 -79z" />
+<glyph unicode="&#xe107;" d="M-97.5 34q13.5 -34 50.5 -34h1294q37 0 50.5 35.5t-7.5 67.5l-642 1056q-20 33 -48 36t-48 -29l-642 -1066q-21 -32 -7.5 -66zM155 200l445 723l445 -723h-345v100h-200v-100h-345zM500 600l100 -300l100 300v100h-200v-100z" />
+<glyph unicode="&#xe108;" d="M100 262v41q0 20 11 44.5t26 38.5l363 325v339q0 62 44 106t106 44t106 -44t44 -106v-339l363 -325q15 -14 26 -38.5t11 -44.5v-41q0 -20 -12 -26.5t-29 5.5l-359 249v-263q100 -91 100 -113v-64q0 -21 -13 -29t-32 1l-94 78h-222l-94 -78q-19 -9 -32 -1t-13 29v64 q0 22 100 113v263l-359 -249q-17 -12 -29 -5.5t-12 26.5z" />
+<glyph unicode="&#xe109;" d="M0 50q0 -20 14.5 -35t35.5 -15h1000q21 0 35.5 15t14.5 35v750h-1100v-750zM0 900h1100v150q0 21 -14.5 35.5t-35.5 14.5h-150v100h-100v-100h-500v100h-100v-100h-150q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 100v100h100v-100h-100zM100 300v100h100v-100h-100z M100 500v100h100v-100h-100zM300 100v100h100v-100h-100zM300 300v100h100v-100h-100zM300 500v100h100v-100h-100zM500 100v100h100v-100h-100zM500 300v100h100v-100h-100zM500 500v100h100v-100h-100zM700 100v100h100v-100h-100zM700 300v100h100v-100h-100zM700 500 v100h100v-100h-100zM900 100v100h100v-100h-100zM900 300v100h100v-100h-100zM900 500v100h100v-100h-100z" />
+<glyph unicode="&#xe110;" d="M0 200v200h259l600 600h241v198l300 -295l-300 -300v197h-159l-600 -600h-341zM0 800h259l122 -122l141 142l-181 180h-341v-200zM678 381l141 142l122 -123h159v198l300 -295l-300 -300v197h-241z" />
+<glyph unicode="&#xe111;" d="M0 400v600q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-600q0 -41 -29.5 -70.5t-70.5 -29.5h-596l-304 -300v300h-100q-41 0 -70.5 29.5t-29.5 70.5z" />
+<glyph unicode="&#xe112;" d="M100 600v200h300v-250q0 -113 6 -145q17 -92 102 -117q39 -11 92 -11q37 0 66.5 5.5t50 15.5t36 24t24 31.5t14 37.5t7 42t2.5 45t0 47v25v250h300v-200q0 -42 -3 -83t-15 -104t-31.5 -116t-58 -109.5t-89 -96.5t-129 -65.5t-174.5 -25.5t-174.5 25.5t-129 65.5t-89 96.5 t-58 109.5t-31.5 116t-15 104t-3 83zM100 900v300h300v-300h-300zM800 900v300h300v-300h-300z" />
+<glyph unicode="&#xe113;" d="M-30 411l227 -227l352 353l353 -353l226 227l-578 579z" />
+<glyph unicode="&#xe114;" d="M70 797l580 -579l578 579l-226 227l-353 -353l-352 353z" />
+<glyph unicode="&#xe115;" d="M-198 700l299 283l300 -283h-203v-400h385l215 -200h-800v600h-196zM402 1000l215 -200h381v-400h-198l299 -283l299 283h-200v600h-796z" />
+<glyph unicode="&#xe116;" d="M18 939q-5 24 10 42q14 19 39 19h896l38 162q5 17 18.5 27.5t30.5 10.5h94q20 0 35 -14.5t15 -35.5t-15 -35.5t-35 -14.5h-54l-201 -961q-2 -4 -6 -10.5t-19 -17.5t-33 -11h-31v-50q0 -20 -14.5 -35t-35.5 -15t-35.5 15t-14.5 35v50h-300v-50q0 -20 -14.5 -35t-35.5 -15 t-35.5 15t-14.5 35v50h-50q-21 0 -35.5 15t-14.5 35q0 21 14.5 35.5t35.5 14.5h535l48 200h-633q-32 0 -54.5 21t-27.5 43z" />
+<glyph unicode="&#xe117;" d="M0 0v800h1200v-800h-1200zM0 900v100h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-100h-1200z" />
+<glyph unicode="&#xe118;" d="M1 0l300 700h1200l-300 -700h-1200zM1 400v600h200q0 41 29.5 70.5t70.5 29.5h300q41 0 70.5 -29.5t29.5 -70.5h500v-200h-1000z" />
+<glyph unicode="&#xe119;" d="M302 300h198v600h-198l298 300l298 -300h-198v-600h198l-298 -300z" />
+<glyph unicode="&#xe120;" d="M0 600l300 298v-198h600v198l300 -298l-300 -297v197h-600v-197z" />
+<glyph unicode="&#xe121;" d="M0 100v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM31 400l172 739q5 22 23 41.5t38 19.5h672q19 0 37.5 -22.5t23.5 -45.5l172 -732h-1138zM800 100h100v100h-100v-100z M1000 100h100v100h-100v-100z" />
+<glyph unicode="&#xe122;" d="M-101 600v50q0 24 25 49t50 38l25 13v-250l-11 5.5t-24 14t-30 21.5t-24 27.5t-11 31.5zM99 500v250v5q0 13 0.5 18.5t2.5 13t8 10.5t15 3h200l675 250v-850l-675 200h-38l47 -276q2 -12 -3 -17.5t-11 -6t-21 -0.5h-8h-83q-20 0 -34.5 14t-18.5 35q-56 337 -56 351z M1100 200v850q0 21 14.5 35.5t35.5 14.5q20 0 35 -14.5t15 -35.5v-850q0 -20 -15 -35t-35 -15q-21 0 -35.5 15t-14.5 35z" />
+<glyph unicode="&#xe123;" d="M74 350q0 21 13.5 35.5t33.5 14.5h17l118 173l63 327q15 77 76 140t144 83l-18 32q-6 19 3 32t29 13h94q20 0 29 -10.5t3 -29.5l-18 -37q83 -19 144 -82.5t76 -140.5l63 -327l118 -173h17q20 0 33.5 -14.5t13.5 -35.5q0 -20 -13 -40t-31 -27q-22 -9 -63 -23t-167.5 -37 t-251.5 -23t-245.5 20.5t-178.5 41.5l-58 20q-18 7 -31 27.5t-13 40.5zM497 110q12 -49 40 -79.5t63 -30.5t63 30.5t39 79.5q-48 -6 -102 -6t-103 6z" />
+<glyph unicode="&#xe124;" d="M21 445l233 -45l-78 -224l224 78l45 -233l155 179l155 -179l45 233l224 -78l-78 224l234 45l-180 155l180 156l-234 44l78 225l-224 -78l-45 233l-155 -180l-155 180l-45 -233l-224 78l78 -225l-233 -44l179 -156z" />
+<glyph unicode="&#xe125;" d="M0 200h200v600h-200v-600zM300 275q0 -75 100 -75h61q123 -100 139 -100h250q46 0 83 57l238 344q29 31 29 74v100q0 44 -30.5 84.5t-69.5 40.5h-328q28 118 28 125v150q0 44 -30.5 84.5t-69.5 40.5h-50q-27 0 -51 -20t-38 -48l-96 -198l-145 -196q-20 -26 -20 -63v-400z M400 300v375l150 212l100 213h50v-175l-50 -225h450v-125l-250 -375h-214l-136 100h-100z" />
+<glyph unicode="&#xe126;" d="M0 400v600h200v-600h-200zM300 525v400q0 75 100 75h61q123 100 139 100h250q46 0 83 -57l238 -344q29 -31 29 -74v-100q0 -44 -30.5 -84.5t-69.5 -40.5h-328q28 -118 28 -125v-150q0 -44 -30.5 -84.5t-69.5 -40.5h-50q-27 0 -51 20t-38 48l-96 198l-145 196 q-20 26 -20 63zM400 525l150 -212l100 -213h50v175l-50 225h450v125l-250 375h-214l-136 -100h-100v-375z" />
+<glyph unicode="&#xe127;" d="M8 200v600h200v-600h-200zM308 275v525q0 17 14 35.5t28 28.5l14 9l362 230q14 6 25 6q17 0 29 -12l109 -112q14 -14 14 -34q0 -18 -11 -32l-85 -121h302q85 0 138.5 -38t53.5 -110t-54.5 -111t-138.5 -39h-107l-130 -339q-7 -22 -20.5 -41.5t-28.5 -19.5h-341 q-7 0 -90 81t-83 94zM408 289l100 -89h293l131 339q6 21 19.5 41t28.5 20h203q16 0 25 15t9 36q0 20 -9 34.5t-25 14.5h-457h-6.5h-7.5t-6.5 0.5t-6 1t-5 1.5t-5.5 2.5t-4 4t-4 5.5q-5 12 -5 20q0 14 10 27l147 183l-86 83l-339 -236v-503z" />
+<glyph unicode="&#xe128;" d="M-101 651q0 72 54 110t139 37h302l-85 121q-11 16 -11 32q0 21 14 34l109 113q13 12 29 12q11 0 25 -6l365 -230q7 -4 16.5 -10.5t26 -26t16.5 -36.5v-526q0 -13 -85.5 -93.5t-93.5 -80.5h-342q-15 0 -28.5 20t-19.5 41l-131 339h-106q-84 0 -139 39t-55 111zM-1 601h222 q15 0 28.5 -20.5t19.5 -40.5l131 -339h293l106 89v502l-342 237l-87 -83l145 -184q10 -11 10 -26q0 -11 -5 -20q-1 -3 -3.5 -5.5l-4 -4t-5 -2.5t-5.5 -1.5t-6.5 -1t-6.5 -0.5h-7.5h-6.5h-476v-100zM999 201v600h200v-600h-200z" />
+<glyph unicode="&#xe129;" d="M97 719l230 -363q4 -6 10.5 -15.5t26 -25t36.5 -15.5h525q13 0 94 83t81 90v342q0 15 -20 28.5t-41 19.5l-339 131v106q0 84 -39 139t-111 55t-110 -53.5t-38 -138.5v-302l-121 84q-15 12 -33.5 11.5t-32.5 -13.5l-112 -110q-22 -22 -6 -53zM172 739l83 86l183 -146 q22 -18 47 -5q3 1 5.5 3.5l4 4t2.5 5t1.5 5.5t1 6.5t0.5 6v7.5v7v456q0 22 25 31t50 -0.5t25 -30.5v-202q0 -16 20 -29.5t41 -19.5l339 -130v-294l-89 -100h-503zM400 0v200h600v-200h-600z" />
+<glyph unicode="&#xe130;" d="M1 585q-15 -31 7 -53l112 -110q13 -13 32 -13.5t34 10.5l121 85l-1 -302q0 -84 38.5 -138t110.5 -54t111 55t39 139v106l339 131q20 6 40.5 19.5t20.5 28.5v342q0 7 -81 90t-94 83h-525q-17 0 -35.5 -14t-28.5 -28l-10 -15zM76 565l237 339h503l89 -100v-294l-340 -130 q-20 -6 -40 -20t-20 -29v-202q0 -22 -25 -31t-50 0t-25 31v456v14.5t-1.5 11.5t-5 12t-9.5 7q-24 13 -46 -5l-184 -146zM305 1104v200h600v-200h-600z" />
+<glyph unicode="&#xe131;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q162 0 299.5 -80t217.5 -218t80 -300t-80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 500h300l-2 -194l402 294l-402 298v-197h-298v-201z" />
+<glyph unicode="&#xe132;" d="M0 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t231.5 47.5q122 0 232.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-218 -217.5t-300 -80t-299.5 80t-217.5 217.5t-80 299.5zM200 600l400 -294v194h302v201h-300v197z" />
+<glyph unicode="&#xe133;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600h200v-300h200v300h200l-300 400z" />
+<glyph unicode="&#xe134;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM300 600l300 -400l300 400h-200v300h-200v-300h-200z" />
+<glyph unicode="&#xe135;" d="M5 597q0 122 47.5 232.5t127.5 190.5t190.5 127.5t232.5 47.5q121 0 231.5 -47.5t190.5 -127.5t127.5 -190.5t47.5 -232.5q0 -162 -80 -299.5t-217.5 -217.5t-299.5 -80t-300 80t-218 217.5t-80 299.5zM254 780q-8 -34 5.5 -93t7.5 -87q0 -9 17 -44t16 -60q12 0 23 -5.5 t23 -15t20 -13.5q20 -10 108 -42q22 -8 53 -31.5t59.5 -38.5t57.5 -11q8 -18 -15 -55.5t-20 -57.5q12 -21 22.5 -34.5t28 -27t36.5 -17.5q0 -6 -3 -15.5t-3.5 -14.5t4.5 -17q101 -2 221 111q31 30 47 48t34 49t21 62q-14 9 -37.5 9.5t-35.5 7.5q-14 7 -49 15t-52 19 q-9 0 -39.5 -0.5t-46.5 -1.5t-39 -6.5t-39 -16.5q-50 -35 -66 -12q-4 2 -3.5 25.5t0.5 25.5q-6 13 -26.5 17t-24.5 7q2 22 -2 41t-16.5 28t-38.5 -20q-23 -25 -42 4q-19 28 -8 58q8 16 22 22q6 -1 26 -1.5t33.5 -4.5t19.5 -13q12 -19 32 -37.5t34 -27.5l14 -8q0 3 9.5 39.5 t5.5 57.5q-4 23 14.5 44.5t22.5 31.5q5 14 10 35t8.5 31t15.5 22.5t34 21.5q-6 18 10 37q8 0 23.5 -1.5t24.5 -1.5t20.5 4.5t20.5 15.5q-10 23 -30.5 42.5t-38 30t-49 26.5t-43.5 23q11 41 1 44q31 -13 58.5 -14.5t39.5 3.5l11 4q6 36 -17 53.5t-64 28.5t-56 23 q-19 -3 -37 0q-15 -12 -36.5 -21t-34.5 -12t-44 -8t-39 -6q-15 -3 -46 0t-45 -3q-20 -6 -51.5 -25.5t-34.5 -34.5q-3 -11 6.5 -22.5t8.5 -18.5q-3 -34 -27.5 -91t-29.5 -79zM518 915q3 12 16 30.5t16 25.5q10 -10 18.5 -10t14 6t14.5 14.5t16 12.5q0 -18 8 -42.5t16.5 -44 t9.5 -23.5q-6 1 -39 5t-53.5 10t-36.5 16z" />
+<glyph unicode="&#xe136;" d="M0 164.5q0 21.5 15 37.5l600 599q-33 101 6 201.5t135 154.5q164 92 306 -9l-259 -138l145 -232l251 126q13 -175 -151 -267q-123 -70 -253 -23l-596 -596q-15 -16 -36.5 -16t-36.5 16l-111 110q-15 15 -15 36.5z" />
+<glyph unicode="&#xe137;" horiz-adv-x="1220" d="M0 196v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 596v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000 q-41 0 -70.5 29.5t-29.5 70.5zM0 996v100q0 41 29.5 70.5t70.5 29.5h1000q41 0 70.5 -29.5t29.5 -70.5v-100q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM600 596h500v100h-500v-100zM800 196h300v100h-300v-100zM900 996h200v100h-200v-100z" />
+<glyph unicode="&#xe138;" d="M100 1100v100h1000v-100h-1000zM150 1000h900l-350 -500v-300l-200 -200v500z" />
+<glyph unicode="&#xe139;" d="M0 200v200h1200v-200q0 -41 -29.5 -70.5t-70.5 -29.5h-1000q-41 0 -70.5 29.5t-29.5 70.5zM0 500v400q0 41 29.5 70.5t70.5 29.5h300v100q0 41 29.5 70.5t70.5 29.5h200q41 0 70.5 -29.5t29.5 -70.5v-100h300q41 0 70.5 -29.5t29.5 -70.5v-400h-500v100h-200v-100h-500z M500 1000h200v100h-200v-100z" />
+<glyph unicode="&#xe140;" d="M0 0v400l129 -129l200 200l142 -142l-200 -200l129 -129h-400zM0 800l129 129l200 -200l142 142l-200 200l129 129h-400v-400zM729 329l142 142l200 -200l129 129v-400h-400l129 129zM729 871l200 200l-129 129h400v-400l-129 129l-200 -200z" />
+<glyph unicode="&#xe141;" d="M0 596q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 596q0 -172 121.5 -293t292.5 -121t292.5 121t121.5 293q0 171 -121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM291 655 q0 23 15.5 38.5t38.5 15.5t39 -16t16 -38q0 -23 -16 -39t-39 -16q-22 0 -38 16t-16 39zM400 850q0 22 16 38.5t39 16.5q22 0 38 -16t16 -39t-16 -39t-38 -16q-23 0 -39 16.5t-16 38.5zM513 609q0 32 21 56.5t52 29.5l122 126l1 1q-9 14 -9 28q0 22 16 38.5t39 16.5 q22 0 38 -16t16 -39t-16 -39t-38 -16q-16 0 -29 10l-55 -145q17 -22 17 -51q0 -36 -25.5 -61.5t-61.5 -25.5q-37 0 -62.5 25.5t-25.5 61.5zM800 655q0 22 16 38t39 16t38.5 -15.5t15.5 -38.5t-16 -39t-38 -16q-23 0 -39 16t-16 39z" />
+<glyph unicode="&#xe142;" d="M-40 375q-13 -95 35 -173q35 -57 94 -89t129 -32q63 0 119 28q33 16 65 40.5t52.5 45.5t59.5 64q40 44 57 61l394 394q35 35 47 84t-3 96q-27 87 -117 104q-20 2 -29 2q-46 0 -79.5 -17t-67.5 -51l-388 -396l-7 -7l69 -67l377 373q20 22 39 38q23 23 50 23q38 0 53 -36 q16 -39 -20 -75l-547 -547q-52 -52 -125 -52q-55 0 -100 33t-54 96q-5 35 2.5 66t31.5 63t42 50t56 54q24 21 44 41l348 348q52 52 82.5 79.5t84 54t107.5 26.5q25 0 48 -4q95 -17 154 -94.5t51 -175.5q-7 -101 -98 -192l-252 -249l-253 -256l7 -7l69 -60l517 511 q67 67 95 157t11 183q-16 87 -67 154t-130 103q-69 33 -152 33q-107 0 -197 -55q-40 -24 -111 -95l-512 -512q-68 -68 -81 -163z" />
+<glyph unicode="&#xe143;" d="M79 784q0 131 99 229.5t230 98.5q144 0 242 -129q103 129 245 129q130 0 227 -98.5t97 -229.5q0 -46 -17.5 -91t-61 -99t-77 -89.5t-104.5 -105.5q-197 -191 -293 -322l-17 -23l-16 23q-43 58 -100 122.5t-92 99.5t-101 100l-84.5 84.5t-68 74t-60 78t-33.5 70.5t-15 78z M250 784q0 -27 30.5 -70t61.5 -75.5t95 -94.5l22 -22q93 -90 190 -201q82 92 195 203l12 12q64 62 97.5 97t64.5 79t31 72q0 71 -48 119.5t-106 48.5q-73 0 -131 -83l-118 -171l-114 174q-51 80 -124 80q-59 0 -108.5 -49.5t-49.5 -118.5z" />
+<glyph unicode="&#xe144;" d="M57 353q0 -94 66 -160l141 -141q66 -66 159 -66q95 0 159 66l283 283q66 66 66 159t-66 159l-141 141q-12 12 -19 17l-105 -105l212 -212l-389 -389l-247 248l95 95l-18 18q-46 45 -75 101l-55 -55q-66 -66 -66 -159zM269 706q0 -93 66 -159l141 -141l19 -17l105 105 l-212 212l389 389l247 -247l-95 -96l18 -18q46 -46 77 -99l29 29q35 35 62.5 88t27.5 96q0 93 -66 159l-141 141q-66 66 -159 66q-95 0 -159 -66l-283 -283q-66 -64 -66 -159z" />
+<glyph unicode="&#xe145;" d="M200 100v953q0 21 30 46t81 48t129 38t163 15t162 -15t127 -38t79 -48t29 -46v-953q0 -41 -29.5 -70.5t-70.5 -29.5h-600q-41 0 -70.5 29.5t-29.5 70.5zM300 300h600v700h-600v-700zM496 150q0 -43 30.5 -73.5t73.5 -30.5t73.5 30.5t30.5 73.5t-30.5 73.5t-73.5 30.5 t-73.5 -30.5t-30.5 -73.5z" />
+<glyph unicode="&#xe146;" d="M0 0l303 380l207 208l-210 212h300l267 279l-35 36q-15 14 -15 35t15 35q14 15 35 15t35 -15l283 -282q15 -15 15 -36t-15 -35q-14 -15 -35 -15t-35 15l-36 35l-279 -267v-300l-212 210l-208 -207z" />
+<glyph unicode="&#xe148;" d="M295 433h139q5 -77 48.5 -126.5t117.5 -64.5v335l-27 7q-46 14 -79 26.5t-72 36t-62.5 52t-40 72.5t-16.5 99q0 92 44 159.5t109 101t144 40.5v78h100v-79q38 -4 72.5 -13.5t75.5 -31.5t71 -53.5t51.5 -84t24.5 -118.5h-159q-8 72 -35 109.5t-101 50.5v-307l64 -14 q34 -7 64 -16.5t70 -31.5t67.5 -52t47.5 -80.5t20 -112.5q0 -139 -89 -224t-244 -96v-77h-100v78q-152 17 -237 104q-40 40 -52.5 93.5t-15.5 139.5zM466 889q0 -29 8 -51t16.5 -34t29.5 -22.5t31 -13.5t38 -10q7 -2 11 -3v274q-61 -8 -97.5 -37.5t-36.5 -102.5zM700 237 q170 18 170 151q0 64 -44 99.5t-126 60.5v-311z" />
+<glyph unicode="&#xe149;" d="M100 600v100h166q-24 49 -44 104q-10 26 -14.5 55.5t-3 72.5t25 90t68.5 87q97 88 263 88q129 0 230 -89t101 -208h-153q0 52 -34 89.5t-74 51.5t-76 14q-37 0 -79 -14.5t-62 -35.5q-41 -44 -41 -101q0 -11 2.5 -24.5t5.5 -24t9.5 -26.5t10.5 -25t14 -27.5t14 -25.5 t15.5 -27t13.5 -24h242v-100h-197q8 -50 -2.5 -115t-31.5 -94q-41 -59 -99 -113q35 11 84 18t70 7q32 1 102 -16t104 -17q76 0 136 30l50 -147q-41 -25 -80.5 -36.5t-59 -13t-61.5 -1.5q-23 0 -128 33t-155 29q-39 -4 -82 -17t-66 -25l-24 -11l-55 145l16.5 11t15.5 10 t13.5 9.5t14.5 12t14.5 14t17.5 18.5q48 55 54 126.5t-30 142.5h-221z" />
+<glyph unicode="&#xe150;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM602 900l298 300l298 -300h-198v-900h-200v900h-198z" />
+<glyph unicode="&#xe151;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v200h100v-100h200v-100h-300zM700 400v100h300v-200h-99v-100h-100v100h99v100h-200zM700 700v500h300v-500h-100v100h-100v-100h-100zM801 900h100v200h-100v-200z" />
+<glyph unicode="&#xe152;" d="M2 300h198v900h200v-900h198l-298 -300zM700 0v500h300v-500h-100v100h-100v-100h-100zM700 700v200h100v-100h200v-100h-300zM700 1100v100h300v-200h-99v-100h-100v100h99v100h-200zM801 200h100v200h-100v-200z" />
+<glyph unicode="&#xe153;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 100v400h300v-500h-100v100h-200zM800 1100v100h200v-500h-100v400h-100zM901 200h100v200h-100v-200z" />
+<glyph unicode="&#xe154;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM800 400v100h200v-500h-100v400h-100zM800 800v400h300v-500h-100v100h-200zM901 900h100v200h-100v-200z" />
+<glyph unicode="&#xe155;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h500v-200h-500zM700 400v200h400v-200h-400zM700 700v200h300v-200h-300zM700 1000v200h200v-200h-200z" />
+<glyph unicode="&#xe156;" d="M2 300l298 -300l298 300h-198v900h-200v-900h-198zM700 100v200h200v-200h-200zM700 400v200h300v-200h-300zM700 700v200h400v-200h-400zM700 1000v200h500v-200h-500z" />
+<glyph unicode="&#xe157;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q162 0 281 -118.5t119 -281.5v-300q0 -165 -118.5 -282.5t-281.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500z" />
+<glyph unicode="&#xe158;" d="M0 400v300q0 163 119 281.5t281 118.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-163 0 -281.5 117.5t-118.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM400 300l333 250l-333 250v-500z" />
+<glyph unicode="&#xe159;" d="M0 400v300q0 163 117.5 281.5t282.5 118.5h300q163 0 281.5 -119t118.5 -281v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-300q-165 0 -282.5 117.5t-117.5 282.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 700l250 -333l250 333h-500z" />
+<glyph unicode="&#xe160;" d="M0 400v300q0 165 117.5 282.5t282.5 117.5h300q165 0 282.5 -117.5t117.5 -282.5v-300q0 -162 -118.5 -281t-281.5 -119h-300q-165 0 -282.5 118.5t-117.5 281.5zM200 300q0 -41 29.5 -70.5t70.5 -29.5h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5 h-500q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM300 400h500l-250 333z" />
+<glyph unicode="&#xe161;" d="M0 400v300h300v200l400 -350l-400 -350v200h-300zM500 0v200h500q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-500v200h400q165 0 282.5 -117.5t117.5 -282.5v-300q0 -165 -117.5 -282.5t-282.5 -117.5h-400z" />
+<glyph unicode="&#xe162;" d="M216 519q10 -19 32 -19h302q-155 -438 -160 -458q-5 -21 4 -32l9 -8l9 -1q13 0 26 16l538 630q15 19 6 36q-8 18 -32 16h-300q1 4 78 219.5t79 227.5q2 17 -6 27l-8 8h-9q-16 0 -25 -15q-4 -5 -98.5 -111.5t-228 -257t-209.5 -238.5q-17 -19 -7 -40z" />
+<glyph unicode="&#xe163;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q47 0 100 15v185h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h500v185q-14 4 -114 7.5t-193 5.5l-93 2q-165 0 -282.5 -117.5t-117.5 -282.5v-300zM600 400v300h300v200l400 -350l-400 -350v200h-300z " />
+<glyph unicode="&#xe164;" d="M0 400q0 -165 117.5 -282.5t282.5 -117.5h300q163 0 281.5 117.5t118.5 282.5v98l-78 73l-122 -123v-148q0 -41 -29.5 -70.5t-70.5 -29.5h-500q-41 0 -70.5 29.5t-29.5 70.5v500q0 41 29.5 70.5t70.5 29.5h156l118 122l-74 78h-100q-165 0 -282.5 -117.5t-117.5 -282.5 v-300zM496 709l353 342l-149 149h500v-500l-149 149l-342 -353z" />
+<glyph unicode="&#xe165;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM406 600 q0 80 57 137t137 57t137 -57t57 -137t-57 -137t-137 -57t-137 57t-57 137z" />
+<glyph unicode="&#xe166;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 800l445 -500l450 500h-295v400h-300v-400h-300zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe167;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 700h300v-300h300v300h295l-445 500zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe168;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 705l305 -305l596 596l-154 155l-442 -442l-150 151zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe169;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM100 988l97 -98l212 213l-97 97zM200 401h700v699l-250 -239l-149 149l-212 -212l149 -149zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe170;" d="M0 0v275q0 11 7 18t18 7h1048q11 0 19 -7.5t8 -17.5v-275h-1100zM200 612l212 -212l98 97l-213 212zM300 1200l239 -250l-149 -149l212 -212l149 148l248 -237v700h-699zM900 150h100v50h-100v-50z" />
+<glyph unicode="&#xe171;" d="M23 415l1177 784v-1079l-475 272l-310 -393v416h-392zM494 210l672 938l-672 -712v-226z" />
+<glyph unicode="&#xe172;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-850q0 -21 -15 -35.5t-35 -14.5h-150v400h-700v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe173;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-218l-276 -275l-120 120l-126 -127h-378v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM581 306l123 123l120 -120l353 352l123 -123l-475 -476zM600 1000h100v200h-100v-200z" />
+<glyph unicode="&#xe174;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-269l-103 -103l-170 170l-298 -298h-329v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 1000h100v200h-100v-200zM700 133l170 170l-170 170l127 127l170 -170l170 170l127 -128l-170 -169l170 -170 l-127 -127l-170 170l-170 -170z" />
+<glyph unicode="&#xe175;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-300h-400v-200h-500v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300l300 -300l300 300h-200v300h-200v-300h-200zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe176;" d="M0 150v1000q0 20 14.5 35t35.5 15h250v-300h500v300h100l200 -200v-402l-200 200l-298 -298h-402v-400h-150q-21 0 -35.5 14.5t-14.5 35.5zM600 300h200v-300h200v300h200l-300 300zM600 1000v200h100v-200h-100z" />
+<glyph unicode="&#xe177;" d="M0 250q0 -21 14.5 -35.5t35.5 -14.5h1100q21 0 35.5 14.5t14.5 35.5v550h-1200v-550zM0 900h1200v150q0 21 -14.5 35.5t-35.5 14.5h-1100q-21 0 -35.5 -14.5t-14.5 -35.5v-150zM100 300v200h400v-200h-400z" />
+<glyph unicode="&#xe178;" d="M0 400l300 298v-198h400v-200h-400v-198zM100 800v200h100v-200h-100zM300 800v200h100v-200h-100zM500 800v200h400v198l300 -298l-300 -298v198h-400zM800 300v200h100v-200h-100zM1000 300h100v200h-100v-200z" />
+<glyph unicode="&#xe179;" d="M100 700v400l50 100l50 -100v-300h100v300l50 100l50 -100v-300h100v300l50 100l50 -100v-400l-100 -203v-447q0 -21 -14.5 -35.5t-35.5 -14.5h-200q-21 0 -35.5 14.5t-14.5 35.5v447zM800 597q0 -29 10.5 -55.5t25 -43t29 -28.5t25.5 -18l10 -5v-397q0 -21 14.5 -35.5 t35.5 -14.5h200q21 0 35.5 14.5t14.5 35.5v1106q0 31 -18 40.5t-44 -7.5l-276 -117q-25 -16 -43.5 -50.5t-18.5 -65.5v-359z" />
+<glyph unicode="&#xe180;" d="M100 0h400v56q-75 0 -87.5 6t-12.5 44v394h500v-394q0 -38 -12.5 -44t-87.5 -6v-56h400v56q-4 0 -11 0.5t-24 3t-30 7t-24 15t-11 24.5v888q0 22 25 34.5t50 13.5l25 2v56h-400v-56q75 0 87.5 -6t12.5 -44v-394h-500v394q0 38 12.5 44t87.5 6v56h-400v-56q4 0 11 -0.5 t24 -3t30 -7t24 -15t11 -24.5v-888q0 -22 -25 -34.5t-50 -13.5l-25 -2v-56z" />
+<glyph unicode="&#xe181;" d="M0 300q0 -41 29.5 -70.5t70.5 -29.5h300q41 0 70.5 29.5t29.5 70.5v500q0 41 -29.5 70.5t-70.5 29.5h-300q-41 0 -70.5 -29.5t-29.5 -70.5v-500zM100 100h400l200 200h105l295 98v-298h-425l-100 -100h-375zM100 300v200h300v-200h-300zM100 600v200h300v-200h-300z M100 1000h400l200 -200v-98l295 98h105v200h-425l-100 100h-375zM700 402v163l400 133v-163z" />
+<glyph unicode="&#xe182;" d="M16.5 974.5q0.5 -21.5 16 -90t46.5 -140t104 -177.5t175 -208q103 -103 207.5 -176t180 -103.5t137 -47t92.5 -16.5l31 1l163 162q16 17 13 40.5t-22 37.5l-192 136q-19 14 -45 12t-42 -19l-119 -118q-143 103 -267 227q-126 126 -227 268l118 118q17 17 20 41.5 t-11 44.5l-139 194q-14 19 -36.5 22t-40.5 -14l-162 -162q-1 -11 -0.5 -32.5z" />
+<glyph unicode="&#xe183;" d="M0 50v212q0 20 10.5 45.5t24.5 39.5l365 303v50q0 4 1 10.5t12 22.5t30 28.5t60 23t97 10.5t97 -10t60 -23.5t30 -27.5t12 -24l1 -10v-50l365 -303q14 -14 24.5 -39.5t10.5 -45.5v-212q0 -21 -15 -35.5t-35 -14.5h-1100q-21 0 -35.5 14.5t-14.5 35.5zM0 712 q0 -21 14.5 -33.5t34.5 -8.5l202 33q20 4 34.5 21t14.5 38v146q141 24 300 24t300 -24v-146q0 -21 14.5 -38t34.5 -21l202 -33q20 -4 34.5 8.5t14.5 33.5v200q-6 8 -19 20.5t-63 45t-112 57t-171 45t-235 20.5q-92 0 -175 -10.5t-141.5 -27t-108.5 -36.5t-81.5 -40 t-53.5 -36.5t-31 -27.5l-9 -10v-200z" />
+<glyph unicode="&#xe184;" d="M100 0v100h1100v-100h-1100zM175 200h950l-125 150v250l100 100v400h-100v-200h-100v200h-200v-200h-100v200h-200v-200h-100v200h-100v-400l100 -100v-250z" />
+<glyph unicode="&#xe185;" d="M100 0h300v400q0 41 -29.5 70.5t-70.5 29.5h-100q-41 0 -70.5 -29.5t-29.5 -70.5v-400zM500 0v1000q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-1000h-300zM900 0v700q0 41 29.5 70.5t70.5 29.5h100q41 0 70.5 -29.5t29.5 -70.5v-700h-300z" />
+<glyph unicode="&#xe186;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe187;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h100v200h100v-200h100v500h-100v-200h-100v200h-100v-500zM600 300h200v100h100v300h-100v100h-200v-500 zM700 400v300h100v-300h-100z" />
+<glyph unicode="&#xe188;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v100h-200v300h200v100h-300v-500zM600 300h300v100h-200v300h200v100h-300v-500z" />
+<glyph unicode="&#xe189;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 550l300 -150v300zM600 400l300 150l-300 150v-300z" />
+<glyph unicode="&#xe190;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300v500h700v-500h-700zM300 400h130q41 0 68 42t27 107t-28.5 108t-66.5 43h-130v-300zM575 549 q0 -65 27 -107t68 -42h130v300h-130q-38 0 -66.5 -43t-28.5 -108z" />
+<glyph unicode="&#xe191;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v300h-200v100h200v100h-300v-300h200v-100h-200v-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe192;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 300h300v400h-200v100h-100v-500zM301 400v200h100v-200h-100zM601 300h100v100h-100v-100zM700 700h100 v-400h100v500h-200v-100z" />
+<glyph unicode="&#xe193;" d="M-100 300v500q0 124 88 212t212 88h700q124 0 212 -88t88 -212v-500q0 -124 -88 -212t-212 -88h-700q-124 0 -212 88t-88 212zM100 200h900v700h-900v-700zM200 700v100h300v-300h-99v-100h-100v100h99v200h-200zM201 300v100h100v-100h-100zM601 300v100h100v-100h-100z M700 700v100h200v-500h-100v400h-100z" />
+<glyph unicode="&#xe194;" d="M4 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM186 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 500v200 l100 100h300v-100h-300v-200h300v-100h-300z" />
+<glyph unicode="&#xe195;" d="M0 600q0 162 80 299t217 217t299 80t299 -80t217 -217t80 -299t-80 -299t-217 -217t-299 -80t-299 80t-217 217t-80 299zM182 600q0 -171 121.5 -292.5t292.5 -121.5t292.5 121.5t121.5 292.5t-121.5 292.5t-292.5 121.5t-292.5 -121.5t-121.5 -292.5zM400 400v400h300 l100 -100v-100h-100v100h-200v-100h200v-100h-200v-100h-100zM700 400v100h100v-100h-100z" />
+<glyph unicode="&#xe197;" d="M-14 494q0 -80 56.5 -137t135.5 -57h222v300h400v-300h128q120 0 205 86t85 208q0 120 -85 206.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200h200v300h200v-300 h200l-300 -300z" />
+<glyph unicode="&#xe198;" d="M-14 494q0 -80 56.5 -137t135.5 -57h8l414 414l403 -403q94 26 154.5 104t60.5 178q0 121 -85 207.5t-205 86.5q-46 0 -90 -14q-44 97 -134.5 156.5t-200.5 59.5q-152 0 -260 -107.5t-108 -260.5q0 -25 2 -37q-66 -14 -108.5 -67.5t-42.5 -122.5zM300 200l300 300 l300 -300h-200v-300h-200v300h-200z" />
+<glyph unicode="&#xe199;" d="M100 200h400v-155l-75 -45h350l-75 45v155h400l-270 300h170l-270 300h170l-300 333l-300 -333h170l-270 -300h170z" />
+<glyph unicode="&#xe200;" d="M121 700q0 -53 28.5 -97t75.5 -65q-4 -16 -4 -38q0 -74 52.5 -126.5t126.5 -52.5q56 0 100 30v-306l-75 -45h350l-75 45v306q46 -30 100 -30q74 0 126.5 52.5t52.5 126.5q0 24 -9 55q50 32 79.5 83t29.5 112q0 90 -61.5 155.5t-150.5 71.5q-26 89 -99.5 145.5 t-167.5 56.5q-116 0 -197.5 -81.5t-81.5 -197.5q0 -4 1 -12t1 -11q-14 2 -23 2q-74 0 -126.5 -52.5t-52.5 -126.5z" />
+</font>
+</defs></svg> 
\ No newline at end of file
diff --git a/doc/fonts/glyphicons-halflings-regular.ttf b/doc/fonts/glyphicons-halflings-regular.ttf
new file mode 100644 (file)
index 0000000..a498ef4
Binary files /dev/null and b/doc/fonts/glyphicons-halflings-regular.ttf differ
diff --git a/doc/fonts/glyphicons-halflings-regular.woff b/doc/fonts/glyphicons-halflings-regular.woff
new file mode 100644 (file)
index 0000000..d83c539
Binary files /dev/null and b/doc/fonts/glyphicons-halflings-regular.woff differ
diff --git a/doc/gen_api_method_docs.py b/doc/gen_api_method_docs.py
new file mode 100755 (executable)
index 0000000..afbe9e7
--- /dev/null
@@ -0,0 +1,127 @@
+#! /usr/bin/env python
+
+# gen_api_method_docs.py
+#
+# Generate docs for Arvados methods.
+#
+# This script will retrieve the discovery document at
+# https://localhost:9900/discovery/v1/apis/arvados/v1/rest
+# and will generate Textile documentation files in the current
+# directory.
+
+import argparse
+import pprint
+import re
+import requests
+import os
+import sys #debugging
+
+p = argparse.ArgumentParser(description='Generate Arvados API method documentation.')
+
+p.add_argument('--host',
+               type=str,
+               default='localhost',
+               help="The hostname or IP address of the API server")
+
+p.add_argument('--port',
+               type=int,
+               default=9900,
+               help="The port of the API server")
+
+p.add_argument('--output-dir',
+               type=str,
+               default='.',
+               help="Directory in which to write output files.")
+
+args = p.parse_args()
+
+api_url = 'https://{host}:{port}/discovery/v1/apis/arvados/v1/rest'.format(**vars(args))
+
+r = requests.get(api_url, verify=False)
+if r.status_code != 200:
+    raise Exception('Bad status code %d: %s' % (r.status_code, r.text))
+
+if 'application/json' not in r.headers.get('content-type', ''):
+    raise Exception('Unexpected content type: %s: %s' %
+                    (r.headers.get('content-type', ''), r.text))
+
+api = r.json()
+
+resource_num = 0
+for resource in sorted(api[u'resources']):
+    resource_num = resource_num + 1
+    out_fname = os.path.join(args.output_dir, resource + '.textile')
+    if os.path.exists(out_fname):
+        backup_name = out_fname + '.old'
+        try:
+            os.rename(out_fname, backup_name)
+        except OSError as e:
+            print "WARNING: could not back up {1} as {2}: {3}".format(
+                out_fname, backup_name, e)
+    outf = open(out_fname, 'w')
+    outf.write(
+"""---
+navsection: api
+navmenu: API Methods
+title: "{resource}"
+navorder: {resource_num}
+---
+
+h1. {resource}
+
+Required arguments are displayed in %{{background:#ccffcc}}green%.
+
+""".format(resource_num=resource_num, resource=resource))
+
+    methods = api['resources'][resource]['methods']
+    for method in sorted(methods.keys()):
+        methodinfo = methods[method]
+        outf.write(
+"""
+h2. {method}
+
+{description}
+
+Arguments:
+
+table(table table-bordered table-condensed).
+|_. Argument |_. Type |_. Description |_. Location |_. Example |
+""".format(
+    method=method, description=methodinfo['description']))
+
+        required = []
+        notrequired = []
+        for param, paraminfo in methodinfo['parameters'].iteritems():
+            paraminfo.setdefault(u'description', '')
+            paraminfo.setdefault(u'location', '')
+            limit = ''
+            if paraminfo.get('minimum', '') or paraminfo.get('maximum', ''):
+                limit = "range {0}-{1}".format(
+                    paraminfo.get('minimum', ''),
+                    paraminfo.get('maximum', 'unlimited'))
+            if paraminfo.get('default', ''):
+                if limit:
+                    limit = limit + '; '
+                limit = limit + 'default %d' % paraminfo['default']
+            if limit:
+                paraminfo['type'] = '{0} ({1})'.format(
+                    paraminfo['type'], limit)
+
+            row = "|{param}|{type}|{description}|{location}||\n".format(
+                param=param, **paraminfo)
+            if paraminfo.get('required', False):
+                required.append(row)
+            else:
+                notrequired.append(row)
+
+        for row in sorted(required):
+            outf.write("{background:#ccffcc}." + row)
+        for row in sorted(notrequired):
+            outf.write(row)
+
+        # pprint.pprint(methodinfo)
+
+    outf.close()
+    print "wrote ", out_fname
+
+
diff --git a/doc/gen_api_schema_docs.py b/doc/gen_api_schema_docs.py
new file mode 100755 (executable)
index 0000000..239beda
--- /dev/null
@@ -0,0 +1,76 @@
+#! /usr/bin/env python
+
+# gen_api_schema_docs.py
+#
+# Generate Textile documentation pages for Arvados schema resources.
+
+import requests
+import re
+import os
+
+r = requests.get('https://localhost:9900/arvados/v1/schema',
+                 verify=False)
+if r.status_code != 200:
+    raise Exception('Bad status code %d: %s' % (r.status_code, r.text))
+
+if 'application/json' not in r.headers.get('content-type', ''):
+    raise Exception('Unexpected content type: %s: %s' %
+                    (r.headers.get('content-type', ''), r.text))
+
+schema = r.json()
+navorder = 0
+for resource in sorted(schema.keys()):
+    navorder = navorder + 1
+    properties = schema[resource]
+    res_api_endpoint = re.sub(r'([a-z])([A-Z])', r'\1_\2', resource).lower()
+    outfile = "{}.textile".format(resource)
+    if os.path.exists(outfile):
+        outfile = "{}_new.textile".format(resource)
+    print outfile, "..."
+    with open(outfile, "w") as f:
+        f.write("""---
+layout: default
+navsection: api
+navmenu: Schema
+title: {resource}
+---
+
+h1. {resource}
+
+A **{resource}** represents...
+
+h2. Methods
+
+        See "REST methods for working with Arvados resources":{{{{site.baseurl}}}}/api/methods.html
+
+API endpoint base: @https://{{{{ site.arvados_api_host }}}}/arvados/v1/{res_api_endpoint}@
+
+h2. Creation
+
+h3. Prerequisites
+
+Prerequisites for creating a {resource}.
+
+h3. Side effects
+
+Side effects of creating a {resource}.
+
+h2. Resources
+
+Each {resource} has, in addition to the usual "attributes of Arvados resources":resources.html:
+
+table(table table-bordered table-condensed).
+|_. Attribute|_. Type|_. Description|_. Example|
+""".format(
+    resource=resource,
+    navorder=navorder,
+    res_api_endpoint=res_api_endpoint))
+
+        for prop in properties:
+            if prop not in ['id', 'uuid', 'href', 'kind', 'etag', 'self_link',
+                            'owner_uuid', 'created_at',
+                            'modified_by_client_uuid',
+                            'modified_by_user_uuid',
+                            'modified_at']:
+                f.write('|{name}|{type}|||\n'.format(**prop))
+
diff --git a/doc/images/dax-reading-book.png b/doc/images/dax-reading-book.png
new file mode 100644 (file)
index 0000000..d10d3be
Binary files /dev/null and b/doc/images/dax-reading-book.png differ
diff --git a/doc/images/dax.png b/doc/images/dax.png
new file mode 100644 (file)
index 0000000..c511f0e
Binary files /dev/null and b/doc/images/dax.png differ
diff --git a/doc/images/doc-bg.jpg b/doc/images/doc-bg.jpg
new file mode 100644 (file)
index 0000000..e3abc50
Binary files /dev/null and b/doc/images/doc-bg.jpg differ
diff --git a/doc/images/favicon.ico b/doc/images/favicon.ico
new file mode 100644 (file)
index 0000000..4c763b6
Binary files /dev/null and b/doc/images/favicon.ico differ
diff --git a/doc/images/glyphicons-halflings-white.png b/doc/images/glyphicons-halflings-white.png
new file mode 100644 (file)
index 0000000..3bf6484
Binary files /dev/null and b/doc/images/glyphicons-halflings-white.png differ
diff --git a/doc/images/glyphicons-halflings.png b/doc/images/glyphicons-halflings.png
new file mode 100644 (file)
index 0000000..a996999
Binary files /dev/null and b/doc/images/glyphicons-halflings.png differ
diff --git a/doc/images/ssh-adding-public-key.png b/doc/images/ssh-adding-public-key.png
new file mode 100644 (file)
index 0000000..8aea827
Binary files /dev/null and b/doc/images/ssh-adding-public-key.png differ
diff --git a/doc/images/workbench-dashboard.png b/doc/images/workbench-dashboard.png
new file mode 100644 (file)
index 0000000..76df32c
Binary files /dev/null and b/doc/images/workbench-dashboard.png differ
diff --git a/doc/images/workbench-move-selected.png b/doc/images/workbench-move-selected.png
new file mode 100644 (file)
index 0000000..5ed1ef5
Binary files /dev/null and b/doc/images/workbench-move-selected.png differ
diff --git a/doc/index.html.liquid b/doc/index.html.liquid
new file mode 100644 (file)
index 0000000..a4255b6
--- /dev/null
@@ -0,0 +1,45 @@
+---
+layout: default
+no_nav_left: true
+navsection: top
+title: Arvados | Documentation
+...
+
+<div class="jumbotron">
+  <div class="container">
+    <div class="row">
+      <div class="col-sm-6">
+        <h1>ARVADOS</h1>
+        <p>manuals, guides, and references</p>
+      </div>
+      <div class="col-sm-6">
+        <img src="images/dax-reading-book.png" style="max-height: 10em" alt="Dax reading a book" />
+      </div>
+    </div>
+  </div>
+</div>
+
+<div class="container-fluid">
+  <div class="row">
+    <div class="col-sm-5">
+      <p>This site contains documentation for the <a href="https://arvados.org/">Arvados platform</a>. The documentation is being developed as part of the open source project. It is a work in progress that has just gotten started. You can get involved by <a href="https://arvados.org/projects/arvados/wiki/Documentation">joining the documentation effort</a>.
+    </div>
+    <div class="col-sm-7" style="border-left: solid; border-width: 1px">
+      <p>
+        <a href="{{ site.baseurl }}/user/index.html">User Guide</a> &mdash; How to manage data and do analysis with Arvados.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/sdk/index.html">SDK Reference</a> &mdash; Details about the accessing Arvados from various programming languages.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/api/index.html">API Reference</a> &mdash; Details about the the Arvados REST API.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/admin/index.html">Admin Guide</a> &mdash; How to administer an Arvados system.
+      </p>
+      <p>
+        <a href="{{ site.baseurl }}/install/index.html">Install Guide</a> &mdash; How to install Arvados on a cloud platform.
+      </p>
+    </div>
+  </div>
+</div>
diff --git a/doc/install/client.html.textile.liquid b/doc/install/client.html.textile.liquid
new file mode 100644 (file)
index 0000000..c2fc966
--- /dev/null
@@ -0,0 +1,9 @@
+---
+layout: default
+navsection: installguide
+title: Install client libraries
+
+...
+
+The "SDK Reference":{{site.baseurl}}/sdk/index.html page has installation instructions for each of the SDKs.
+
diff --git a/doc/install/create-standard-objects.html.textile.liquid b/doc/install/create-standard-objects.html.textile.liquid
new file mode 100644 (file)
index 0000000..4e105e8
--- /dev/null
@@ -0,0 +1,27 @@
+---
+layout: default
+navsection: installguide
+title: Add an Arvados repository
+
+...
+
+Next, we're going to use the Arvados CLI tools on the <strong>shell server</strong> to create a few Arvados objects. These objects set up a hosted clone of the arvados repository on this cluster.
+
+This will be readable by the "All users" group, and therefore by every active user. This makes it possible for users to run the bundled Crunch scripts by specifying @"script_version":"master","repository":"arvados"@ rather than pulling the Arvados source tree into their own repositories.
+
+<notextile>
+<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
+~$ <span class="userinput">all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"</span>
+~$ <span class="userinput">repo_uuid=`arv --format=uuid repository create --repository '{"name":"arvados"}'`</span>
+~$ <span class="userinput">echo "Arvados repository uuid is '$repo_uuid'"</span>
+~$ <span class="userinput">read -rd $'\000' newlink &lt;&lt;EOF; arv link create --link "$newlink"</span>
+<span class="userinput">{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$repo_uuid",
+ "link_class":"permission",
+ "name":"can_read" 
+}                                         
+EOF</span>
+</code></pre></notextile>
+
diff --git a/doc/install/index.html.textile.liquid b/doc/install/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..500f6f6
--- /dev/null
@@ -0,0 +1,27 @@
+---
+layout: default
+navsection: installguide
+title: Installation overview
+...
+
+Arvados can be installed in multiple ways. Arvados does not depend on any particular cloud operating stack. Arvados runs on one or more GNU/Linux system(s). Arvados is being developed on Debian and Ubuntu GNU/Linux.
+
+h2. Quick installation
+
+Make sure curl and docker are installed on your system. Then, as a user in the docker group, execute
+
+<notextile>
+<pre><code>~$ <span class="userinput">\curl -sSL http://get.arvados.org | bash</span>
+</code></pre></notextile>
+
+This command will download the latest copy of the Arvados docker images. It also gets the arvdock command and saves it in the current working directory. It then uses arvdock to spin up Arvados. Depending on the speed of your internet connection, it can take a while to download the Arvados docker images.
+
+This installation method assumes your web browser and the Arvados docker containers run on the same host. 
+
+If you prefer, you can also download the installation script and inspect it before running it. The @http://get.arvados.org@ url redirects to <a href="https://raw.githubusercontent.com/curoverse/arvados-dev/master/install/easy-docker-install.sh">https://raw.githubusercontent.com/curoverse/arvados-dev/master/install/easy-docker-install.sh</a>, which is the installation script.
+
+h2. Installation from source
+
+It is also possible to build the Arvados docker images from source. The instructions are available "here":install-docker.html.
+
+For production use or evaluation at scale a "Manual Installation":install-manual-overview.html is more appropriate. This installation method assumes you have a number of (virtual) machines at your disposal to install the different Arvados components onto.
diff --git a/doc/install/install-api-server.html.textile.liquid b/doc/install/install-api-server.html.textile.liquid
new file mode 100644 (file)
index 0000000..6ce4de6
--- /dev/null
@@ -0,0 +1,187 @@
+---
+layout: default
+navsection: installguide
+title: Install the API server
+...
+
+This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
+
+h2. Install prerequisites
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install \
+    bison build-essential gettext libcurl3 libcurl3-gnutls \
+    libcurl4-openssl-dev libpcre3-dev libpq-dev libreadline-dev \
+    libssl-dev libxslt1.1 postgresql sudo wget zlib1g-dev
+</span></code></pre></notextile>
+
+Also make sure you have "Ruby and bundler":install-manual-prerequisites-ruby.html installed.
+
+h2. Download the source tree
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+</code></pre></notextile>
+
+See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+
+The API server is in @services/api@ in the source tree.
+
+h2. Install gem dependencies
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/services/api</span>
+~/arvados/services/api$ <span class="userinput">bundle install</span>
+</code></pre></notextile>
+
+h2. Choose your environment
+
+The API server can be run in @development@ or in @production@ mode. Unless this installation is going to be used for development on the Arvados API server itself, you should run it in @production@ mode.
+
+Copy the example environment file for your environment. For example, if you choose @production@:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/environments/production.rb.example config/environments/production.rb</span>
+</code></pre></notextile>
+
+h2. Configure the API server
+
+First, copy the example configuration file:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/application.yml.example config/application.yml</span>
+</code></pre></notextile>
+
+The API server reads the @config/application.yml@ file, as well as the @config/application.defaults.yml@ file. Values in @config/application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@. The @config/application.yml.example@ file is not read by the API server and is provided for installation convenience, only.
+
+Consult @config/application.default.yml@ for a full list of configuration options. Always put your local configuration in @config/application.yml@, never edit @config/application.default.yml@.
+
+h3(#uuid_prefix). uuid_prefix
+
+It is recommended to explicitly define your @uuid_prefix@ in @config/application.yml@, by setting the 'uuid_prefix' field in the section for your environment.
+
+h3(#git_repositories_dir). git_repositories_dir
+
+This field defaults to @/var/lib/arvados/git@. You can override the value by defining it in @config/application.yml@.
+
+Make sure a clone of the arvados repository exists in @git_repositories_dir@.
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">sudo mkdir -p /var/lib/arvados/git</span>
+~/arvados/services/api$ <span class="userinput">sudo git clone --bare ../../.git /var/lib/arvados/git/arvados.git</span>
+</code></pre></notextile>
+
+h3. secret_token
+
+Generate a new secret token for signing cookies:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">rake secret</span>
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</code></pre></notextile>
+
+Then put that value in the @secret_token@ field.
+
+h3. blob_signing_key
+
+If you want access control on your "Keepstore":install-keepstore.html server(s), you should set @blob_signing_key@ to the same value as the permission key you provide to your Keepstore daemon(s).
+
+h3. workbench_address
+
+Fill in the url of your workbench application in @workbench_address@, for example
+
+&nbsp;&nbsp;https://workbench.@prefix_uuid@.your.domain
+
+h3. other options
+
+Consult @application.default.yml@ for a full list of configuration options. Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
+
+h2. Set up the database
+
+Generate a new database password. Nobody ever needs to memorize it or type it, so we'll make a strong one:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">ruby -e 'puts rand(2**128).to_s(36)'</span>
+6gqa1vu492idd7yca9tfandj3
+</code></pre></notextile>
+
+Create a new database user with permission to create its own databases.
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">sudo -u postgres createuser --createdb --encrypted --pwprompt arvados</span>
+[sudo] password for <b>you</b>: <span class="userinput">yourpassword</span>
+Enter password for new role: <span class="userinput">paste-password-you-generated</span>
+Enter it again: <span class="userinput">paste-password-again</span>
+Shall the new role be a superuser? (y/n) <span class="userinput">n</span>
+Shall the new role be allowed to create more new roles? (y/n) <span class="userinput">n</span>
+</code></pre></notextile>
+
+Configure API server to connect to your database by creating and updating @config/database.yml@. Replace the @xxxxxxxx@ database password placeholders with the new password you generated above.
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/database.yml.sample config/database.yml</span>
+~/arvados/services/api$ <span class="userinput">edit config/database.yml</span>
+</code></pre></notextile>
+
+Create and initialize the database. If you are planning a production system, choose the @production@ rails environment, otherwise use @development@.
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rake db:setup</span>
+</code></pre></notextile>
+
+Alternatively, if the database user you intend to use for the API server is not allowed to create new databases, you can create the database first and then populate it with rake. Be sure to adjust the database name if you are using the @development@ environment. This sequence of commands is functionally equivalent to the rake db:setup command above.
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">su postgres createdb arvados_production -E UTF8 -O arvados</span>
+~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rake db:structure:load</span>
+~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rake db:seed</span>
+</code></pre></notextile>
+
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Note!</h4>
+You can safely ignore the following error message you may see when loading the database structure:
+<notextile>
+<pre><code>ERROR:  must be owner of extension plpgsql</code></pre></notextile>
+</div>
+
+h2(#omniauth). Set up omniauth
+
+First copy the omniauth configuration file:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">cp -i config/initializers/omniauth.rb.example config/initializers/omniauth.rb
+</code></pre></notextile>
+
+Edit @config/initializers/omniauth.rb@ to configure the SSO server for authentication.  @APP_ID@ and @APP_SECRET@ correspond to the @app_id@ and @app_secret@ set in "Create arvados-server client for Single Sign On (SSO)":install-sso.html#client and @CUSTOM_PROVIDER_URL@ is the address of your SSO server.
+
+<notextile>
+<pre><code>APP_ID = 'arvados-server'
+APP_SECRET = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
+CUSTOM_PROVIDER_URL = 'https://sso.example.com/'
+</code></pre>
+</notextile>
+
+h2. Start the API server
+
+h3. Development environment
+
+If you plan to run in development mode, you can now run the development server this way:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">bundle exec rails server --port=3030
+</code></pre></notextile>
+
+h3. Production environment
+
+We recommend "Passenger":https://www.phusionpassenger.com/ to run the API server in production.
+
+Point it to the services/api directory in the source tree.
+
+To enable streaming so users can monitor crunch jobs in real time, make sure to add the following to your Passenger configuration:
+
+<notextile>
+<pre><code><span class="userinput">PassengerBufferResponse off</span>
+</code></pre>
+</notextile>
diff --git a/doc/install/install-crunch-dispatch.html.textile.liquid b/doc/install/install-crunch-dispatch.html.textile.liquid
new file mode 100644 (file)
index 0000000..231d1f4
--- /dev/null
@@ -0,0 +1,88 @@
+---
+layout: default
+navsection: installguide
+title: Install the Crunch dispatcher
+
+...
+
+
+
+The dispatcher normally runs on the same host/VM as the API server.
+
+h4. Perl SDK dependencies
+
+Install the Perl SDK on the controller.
+
+* See "Perl SDK":{{site.baseurl}}/sdk/perl/index.html page for details.
+
+h4. Python SDK dependencies
+
+Install the Python SDK and CLI tools on controller and all compute nodes.
+
+* See "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html page for details.
+
+h4. Likely crunch job dependencies
+
+On compute nodes:
+
+* @pip install --upgrade pyvcf@
+
+h4. Crunch user account
+
+On compute nodes and controller:
+
+* @adduser crunch@
+
+The crunch user should have the same UID, GID, and home directory on all compute nodes and on the controller.
+
+h4. Repositories
+
+Crunch scripts must be in Git repositories in the directory configured as @git_repositories_dir@/*.git (see the "API server installation":install-api-server.html#git_repositories_dir).
+
+Once you have a repository with commits -- and you have read access to the repository -- you should be able to create a new job:
+
+<pre>
+read -rd $'\000' newjob <<EOF; arv job create --job "$newjob"
+{"script_parameters":{"input":"f815ec01d5d2f11cb12874ab2ed50daa"},
+ "script_version":"master",
+ "script":"hash",
+ "repository":"arvados"}
+EOF
+</pre>
+
+Without getting this error:
+
+<pre>
+ArgumentError: Specified script_version does not resolve to a commit
+</pre>
+
+h4. Running jobs
+
+* @services/api/script/crunch-dispatch.rb@ must be running.
+* @crunch-dispatch.rb@ needs @services/crunch/crunch-job@ in its @PATH@.
+* @crunch-job@ needs @sdk/perl/lib@ and @warehouse-apps/libwarehouse-perl/lib@ in its @PERLLIB@
+* @crunch-job@ needs @ARVADOS_API_HOST@ (and, if necessary in a development environment, @ARVADOS_API_HOST_INSECURE@)
+
+Example @/var/service/arvados_crunch_dispatch/run@ script:
+
+<pre>
+#!/bin/sh
+set -e
+
+rvmexec=""
+## uncomment this line if you use rvm:
+#rvmexec="/usr/local/rvm/bin/rvm-exec 2.1.1"
+
+export PATH="$PATH":/path/to/arvados/services/crunch
+export ARVADOS_API_HOST={{ site.arvados_api_host }}
+export CRUNCH_DISPATCH_LOCKFILE=/var/lock/crunch-dispatch
+
+fuser -TERM -k $CRUNCH_DISPATCH_LOCKFILE || true
+
+## Only if your SSL cert is unverifiable:
+# export ARVADOS_API_HOST_INSECURE=yes
+
+cd /path/to/arvados/services/api
+export RAILS_ENV=production
+exec $rvmexec bundle exec ./script/crunch-dispatch.rb 2>&1
+</pre>
diff --git a/doc/install/install-docker.html.textile.liquid b/doc/install/install-docker.html.textile.liquid
new file mode 100644 (file)
index 0000000..00f84eb
--- /dev/null
@@ -0,0 +1,194 @@
+---
+layout: default
+navsection: installguide
+title: Installing with Docker
+...
+
+h2. Purpose
+
+This installation method is appropriate for local testing, evaluation, and development. For production use, this method is not recommended.
+
+h2. Prerequisites
+
+# A GNU/Linux (virtual) machine
+# A working Docker installation (see "Installing Docker":https://docs.docker.com/installation/)
+
+h2. Download the source tree
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+</code></pre></notextile>
+
+See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+
+h2. Building the Arvados Docker containers
+
+First of all, a suitable @config.yml@ file is needed. The easiest way to generate one based on the provided @config.yml.example@ file is to run the @build.sh@ script. If no @config.yml@ file exists, it will will prompt for a few inputs, write the @config.yml@ file, and then proceed to build all the Docker containers. If @config.yml@ exists, invoking @build.sh@ will simply build all Docker containers or update those that need to be updated.
+
+Run @build.sh@ without arguments to generate @config.yml@ and build all Docker containers (this will take a while!):
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">./build.sh</span>
+Generating config.yml.
+Arvados needs to know the email address of the administrative user,
+so that when that user logs in they are automatically made an admin.
+This should be an email address associated with a Google account.
+
+Enter your Google ID email address here:
+
+...
+
+Step 7 : ADD generated/setup.sh /usr/local/bin/setup.sh
+ ---> d7c0e7fdf7ab
+Removing intermediate container f3d81180795d
+Step 8 : CMD ["/usr/bin/supervisord", "-n"]
+ ---> Running in 84c64cb9f0d5
+ ---> d6cbb5002604
+Removing intermediate container 84c64cb9f0d5
+Successfully built d6cbb5002604
+date >shell-image
+</code></pre></notextile>
+
+If all goes well, you should now have a number of docker images built:
+
+<notextile>
+<pre><code>~$ <span class="userinput">docker.io images</span>
+REPOSITORY              TAG                 IMAGE ID            CREATED             VIRTUAL SIZE
+arvados/shell           latest              d6cbb5002604        10 minutes ago      1.613 GB
+arvados/sso             latest              377f1fa0108e        11 minutes ago      1.807 GB
+arvados/keep            latest              ade0e0d2dd00        12 minutes ago      210.8 MB
+arvados/workbench       latest              b0e4fb6da385        12 minutes ago      2.218 GB
+arvados/doc             latest              4b64daec9454        12 minutes ago      1.524 GB
+arvados/compute         latest              7f1f5f7faf54        13 minutes ago      1.862 GB
+arvados/slurm           latest              f5bfd1008e6b        17 minutes ago      1.573 GB
+arvados/api             latest              6b93c5f5fc42        17 minutes ago      2.274 GB
+arvados/passenger       latest              add2d11fdf24        18 minutes ago      1.738 GB
+arvados/base            latest              81eaadd0c6f5        22 minutes ago      1.463 GB
+arvados/debian          7.6                 f339ce275c01        6 days ago          116.8 MB
+arvados/debian          latest              f339ce275c01        6 days ago          116.8 MB
+arvados/debian          wheezy              f339ce275c01        6 days ago          116.8 MB
+crosbymichael/skydock   latest              e985023521f6        3 months ago        510.7 MB
+crosbymichael/skydns    next                79c99a4608ed        3 months ago        525 MB
+crosbymichael/skydns    latest              1923ce648d4c        5 months ago        137.5 MB
+</code></pre></notextile>
+
+h2. Updating the Arvados Docker containers
+
+@build.sh@ takes a few optional arguments:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput"> ./build.sh --help</span>
+
+usage: ./build.sh [options]
+
+Calling ./build.sh without arguments will build all Arvados docker images
+
+./build.sh options:
+  -h, --help   Print this help text
+  clean        Clear all build information
+  realclean    clean and remove all Arvados Docker images except arvados/debian
+  deepclean    realclean and remove arvados/debian, crosbymichael/skydns and
+               crosbymichael/skydns Docker images
+</code></pre></notextile>
+
+If there has been an update to the Arvados Docker building code, it is safest to rebuild the Arvados Docker images from scratch. All build information can be cleared with the 'clean' option to build.sh:
+
+<notextile>
+<pre><code>~$ <span class="userinput">./build.sh clean</span></code></pre>
+</notextile>
+
+You can also use 'realclean', which does what 'clean' does and in addition removes all Arvados Docker containers and images from your system, with the exception of the arvados/debian image.
+
+<notextile>
+<pre><code>~$ <span class="userinput">./build.sh realclean</span></code></pre>
+</notextile>
+
+Finally, the 'deepclean' option does what 'realclean' does, and also removes the arvados/debian, crosbymichael/skydns and crosbymichael/skydock images.
+
+<notextile>
+<pre><code>~$ <span class="userinput">./build.sh deepclean</span></code></pre>
+</notextile>
+
+h2. Running the Arvados Docker containers
+
+The @arvdock@ command can be used to start and stop the docker containers. It has a number of options:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">./arvdock -h</span>
+
+usage: ./arvdock (start|stop|restart|test) [options]
+
+./arvdock start/stop/restart options:
+  -d[port], --doc[=port]        Documentation server (default port 9898)
+  -w[port], --workbench[=port]  Workbench server (default port 9899)
+  -s[port], --sso[=port]        SSO server (default port 9901)
+  -a[port], --api[=port]        API server (default port 9900)
+  -c, --compute                 Compute nodes (starts 2)
+  -v, --vm                      Shell server
+  -n, --nameserver              Nameserver
+  -k, --keep                    Keep servers
+  --ssh                         Enable SSH access to server containers
+  -h, --help                    Display this help and exit
+
+  If no options are given, the action is applied to all servers.
+
+./arvdock test [testname] [testname] ...
+  By default, all tests are run.
+</code>
+</pre>
+</notextile>
+
+The @--ssh@ option can be useful to debug issues with the Docker containers; it allows you to ssh into the running containers as the @root@ user, provided you have access to the private key that matches the public key specified in @config.yml@'s PUBLIC_KEY_PATH variable.
+
+Start the docker containers:
+
+<notextile>
+<pre><code>
+~$ <span class="userinput">./arvdock start</span>
+sso_server
+Starting container:
+  /usr/bin/docker.io run -d -i -t -p 9901:443 --name sso_server arvados/sso
+api_server
+Starting container:
+  /usr/bin/docker.io run -d -i -t -p 9900:443 --name api_server --link sso_server:sso arvados/api
+keep_server_0
+Starting container:
+  /usr/bin/docker.io run -d -i -t -p 25107:25107 --name keep_server_0 -v /tmp/tmp.aCSx8Pq6Wb:/dev/keep-0 --link api_server:api arvados/keep
+keep_server_1
+Starting container:
+  /usr/bin/docker.io run -d -i -t -p 25108:25107 --name keep_server_1 -v /tmp/tmp.m4OQ9WB73G:/dev/keep-0 --link api_server:api arvados/keep
+doc_server
+Starting container:
+  /usr/bin/docker.io run -d -i -t -p 9898:80 --name doc_server arvados/doc
+
+*****************************************************************
+You can access the Arvados documentation at http://localhost:9898
+*****************************************************************
+
+workbench_server
+Starting container:
+  /usr/bin/docker.io run -d -i -t -p 9899:80 --name workbench_server --link api_server:api arvados/workbench
+
+*****************************************************************
+You can access the Arvados workbench at http://localhost:9899
+*****************************************************************
+</code></pre></notextile>
+
+h2. Accessing workbench
+
+Point your browser to the Dockerized workbench:
+
+<notextile>
+<pre><code><span class="userinput">https://localhost:9899</span>
+</code></pre>
+</notextile>
+
+Now use the google account you specified as @API_AUTO_ADMIN_USER@ in @config.yml@ to log in.
+
+You will be prompted by your browser that you are accessing a site with an untrusted SSL certificate. This is normal; by default the Arvados Docker installation uses self-signed SSL certificates for the SSO and API servers, respectively. If you use a local SSO server in a Docker container, you will be prompted *twice*. The default is to use the Curoverse SSO server.
+
+
diff --git a/doc/install/install-keepproxy.html.textile.liquid b/doc/install/install-keepproxy.html.textile.liquid
new file mode 100644 (file)
index 0000000..43c1c67
--- /dev/null
@@ -0,0 +1,83 @@
+---
+layout: default
+navsection: installguide
+title: Install Keepproxy server
+...
+
+This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
+
+The Keepproxy server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is designed to provide secure access into Keep from anywhere on the internet.
+
+By convention, we use the following hostname for the Keepproxy:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Hostname_|
+|keep.@uuid_prefix@.your.domain|
+</div>
+
+This hostname should resolve from anywhere on the internet.
+
+h2. Install Keepproxy
+
+First add the Arvados apt repository, and then install the Keepproxy package.
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
+~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get install keepproxy</span>
+</code></pre>
+</notextile>
+
+Verify that Keepproxy is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">keepproxy -h</span>
+Usage of default:
+  -default-replicas=2: Default number of replicas to write if not specified by the client.
+  -listen=":25107": Interface on which to listen for requests, in the format ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port to listen on all network interfaces.
+  -no-get=false: If set, disable GET operations
+  -no-put=false: If set, disable PUT operations
+  -pid="": Path to write pid file
+</code></pre>
+</notextile>
+
+It's recommended to run Keepproxy under "runit":https://packages.debian.org/search?keywords=runit or something similar.
+
+h3. Create an API token for the Keepproxy server
+
+The Keepproxy server needs a token to talk to the API server.
+
+On the <strong>API server</strong>, use the following command to create the token:
+
+<notextile>
+<pre><code>~/arvados/services/api/script$ <span class="userinput">RAILS_ENV=production ./get_anonymous_user_token.rb</span>
+hoShoomoo2bai3Ju1xahg6aeng1siquuaZ1yae2gi2Uhaeng2r
+</code></pre></notextile>
+
+The value for the @api_token@ field should be added to Keepproxy's environment as ARVADOS_API_TOKEN. Make sure to also set ARVADOS_API_HOST to @uuid_prefix@.your.domain.
+
+h3. Set up a reverse proxy with SSL support
+
+Because the Keepproxy is intended for access from anywhere on the internet, it is recommended to use SSL for transport encryption.
+
+This is best achieved by putting a reverse proxy with SSL support in front of Keepproxy. Keepproxy itself runs on port 25107 by default; your reverse proxy can run on port 443 and pass requests to Keepproxy on port 25107.
+
+h3. Tell the API server about the Keepproxy server
+
+The API server needs to be informed about the presence of your Keepproxy server. Please execute the following commands on your <strong>shell server</strong>.
+
+<notextile>
+<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
+~$ <span class="userinput">read -rd $'\000' keepservice &lt;&lt;EOF; arv keep_service create --keep-service "$keepservice"</span>
+<span class="userinput">{
+ "service_host":"keep.$prefix.your.domain",
+ "service_port":443,
+ "service_ssl_flag":true,
+ "service_type":"proxy"
+}
+EOF</span>
+</code></pre></notextile>
+
diff --git a/doc/install/install-keepstore.html.textile.liquid b/doc/install/install-keepstore.html.textile.liquid
new file mode 100644 (file)
index 0000000..7fb810d
--- /dev/null
@@ -0,0 +1,89 @@
+---
+layout: default
+navsection: installguide
+title: Install Keepstore servers
+...
+
+This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
+
+We are going to install two Keepstore servers. By convention, we use the following hostname pattern:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Hostname_|
+|keep0.@uuid_prefix@.your.domain|
+|keep1.@uuid_prefix@.your.domain|
+</div>
+
+Because the Keepstore servers are not directly accessible from the internet, these hostnames only need to resolve on the local network.
+
+h2. Install Keepstore
+
+First add the Arvados apt repository, and then install the Keepstore package.
+
+<notextile>
+<pre><code>~$ <span class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</span>
+~$ <span class="userinput">sudo /usr/bin/apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get update</span>
+~$ <span class="userinput">sudo /usr/bin/apt-get install keepstore</span>
+</code></pre>
+</notextile>
+
+Verify that Keepstore is functional:
+
+<notextile>
+<pre><code>~$ <span class="userinput">keepstore -h</span>
+2014/10/29 14:23:38 Keep started: pid 6848
+Usage of keepstore:
+  -data-manager-token-file="": File with the API token used by the Data Manager. All DELETE requests or GET /index requests must carry this token.
+  -enforce-permissions=false: Enforce permission signatures on requests.
+  -listen=":25107": Interface on which to listen for requests, in the format ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port to listen on all network interfaces.
+  -never-delete=false: If set, nothing will be deleted. HTTP 405 will be returned for valid DELETE requests.
+  -permission-key-file="": File containing the secret key for generating and verifying permission signatures.
+  -permission-ttl=1209600: Expiration time (in seconds) for newly generated permission signatures.
+  -pid="": Path to write pid file
+  -serialize=false: If set, all read and write operations on local Keep volumes will be serialized.
+  -volumes="": Comma-separated list of directories to use for Keep volumes, e.g. -volumes=/var/keep1,/var/keep2. If empty or not supplied, Keep will scan mounted filesystems for volumes with a /keep top-level directory.
+</code></pre>
+</notextile>
+
+If you want access control on your Keepstore server(s), you should provide a permission key. The @-permission-key-file@ argument should contain the path to a file that contains a single line with a long random alphanumeric string. It should be the same as the @blob_signing_key@ that can be set in the "API server":install-api-server.html config/application.yml file.
+
+Prepare one or more volumes for Keepstore to use. Simply create a /keep directory on all the partitions you would like Keepstore to use, and then start Keepstore. For example, using 2 tmpfs volumes:
+
+<notextile>
+<pre><code>~$ <span class="userinput">keepstore</span>
+2014/10/29 11:41:37 Keep started: pid 20736
+2014/10/29 11:41:37 adding Keep volume: /tmp/tmp.vwSCtUCyeH/keep
+2014/10/29 11:41:37 adding Keep volume: /tmp/tmp.Lsn4w8N3Xv/keep
+2014/10/29 11:41:37 Running without a PermissionSecret. Block locators returned by this server will not be signed, and will be rejected by a server that enforces permissions.
+2014/10/29 11:41:37 To fix this, run Keep with --permission-key-file=<path> to define the location of a file containing the permission key.
+
+</code></pre>
+</notextile>
+
+It's recommended to run Keepstore under "runit":https://packages.debian.org/search?keywords=runit or something similar.
+
+Repeat this section for each Keepstore server you are setting up.
+
+h3. Tell the API server about the Keepstore servers
+
+The API server needs to be informed about the presence of your Keepstore servers. For each of the Keepstore servers you have created, please execute the following commands on your <strong>shell server</strong>.
+
+Make sure to update the @service_host@ value to match each of your Keepstore servers.
+
+<notextile>
+<pre><code>~$ <span class="userinput">prefix=`arv --format=uuid user current | cut -d- -f1`</span>
+~$ <span class="userinput">echo "Site prefix is '$prefix'"</span>
+~$ <span class="userinput">read -rd $'\000' keepservice &lt;&lt;EOF; arv keep_service create --keep-service "$keepservice"</span>
+<span class="userinput">{
+ "service_host":"keep0.$prefix.your.domain",
+ "service_port":25107,
+ "service_ssl_flag":false,
+ "service_type":"disk"
+}
+EOF</span>
+</code></pre></notextile>
+
+
+
diff --git a/doc/install/install-manual-overview.html.textile.liquid b/doc/install/install-manual-overview.html.textile.liquid
new file mode 100644 (file)
index 0000000..1ba9451
--- /dev/null
@@ -0,0 +1,16 @@
+---
+layout: default
+navsection: installguide
+title: Overview
+...
+
+{% include 'alert_stub' %}
+
+The manual installation guide will walk you through setting up a basic Arvados cluster on a number of (virtual) GNU/Linux systems. This installation method is intended for evaluation or production use at scale.
+
+<div class="alert alert-block alert-info">
+  <button type="button" class="close" data-dismiss="alert">&times;</button>
+  <h4>Note</h4>
+  <p>If you are looking to evaluate Arvados on one machine, we recommend the "Docker installation method":install-docker.html instead.</p>
+</div>
+
diff --git a/doc/install/install-manual-prerequisites-ruby.html.textile.liquid b/doc/install/install-manual-prerequisites-ruby.html.textile.liquid
new file mode 100644 (file)
index 0000000..0db1e43
--- /dev/null
@@ -0,0 +1,31 @@
+---
+layout: default
+navsection: installguide
+title: Install Ruby and bundler
+...
+
+We recommend Ruby >= 2.1.
+
+h2(#rvm). Option 1: Install with rvm
+
+<notextile>
+<pre><code>~$ <span class="userinput">\curl -sSL https://get.rvm.io | bash -s stable --ruby=2.1</span>
+~$ <span class="userinput">gem install bundler
+</span></code></pre></notextile>
+
+h2(#fromsource). Option 2: Install from source
+
+<notextile>
+<pre><code><span class="userinput">mkdir -p ~/src
+cd ~/src
+wget http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.3.tar.gz
+tar xzf ruby-2.1.3.tar.gz
+cd ruby-2.1.3
+./configure
+make
+sudo make install
+
+sudo gem install bundler</span>
+</code></pre></notextile>
+
+
diff --git a/doc/install/install-manual-prerequisites.html.textile.liquid b/doc/install/install-manual-prerequisites.html.textile.liquid
new file mode 100644 (file)
index 0000000..e5b28d9
--- /dev/null
@@ -0,0 +1,46 @@
+---
+layout: default
+navsection: installguide
+title: Prerequisites
+...
+
+h2. Hardware (or virtual machines)
+
+This guide assumes you have seven systems available in the same network subnet:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Function_|_Number of nodes_|
+|Arvados REST API, Websockets, Workbench and Crunch dispatcher|1|
+|Arvados SSO server|1|
+|Arvados Keepproxy server|1|
+|Arvados Keepstore servers|2|
+|Arvados shell server|1|
+|Arvados compute node|1|
+</div>
+
+The number of Keepstore, shell and compute nodes listed above is a minimum. In a real production installation, you will likely run many more of each of those types of nodes. In such a scenario, you would probably also want to dedicate a node to the Workbench server and Crunch dispatcher, respectively. For performance reasons, you may want to run the database server on a separate node as well.
+
+h2. A unique identifier
+
+Each Arvados installation should have a globally unique identifier, which is a unique 5-character alphanumeric string. Here is a snippet of ruby that generates such a string based on the hostname of your computer:
+
+<pre>
+Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4]
+</pre>
+
+You may also use a different method to pick the unique identifier. The unique identifier will be part of the hostname of the services in your Arvados cluster. The rest of this documentation will refer to it as your @uuid_prefix@. 
+
+
+h2. SSL certificates
+
+There are four public-facing services that will require an SSL certificate. If you do not have official SSL certificates, you can use self-signed certificates. By convention, we use the following hostname pattern:
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Function_|_Hostname_|
+|Arvados REST API|@uuid_prefix@.your.domain|
+|Arvados Websockets endpoint|ws.@uuid_prefix@.your.domain|
+|Arvados Keepproxy server|keep.@uuid_prefix@.your.domain|
+|Arvados Workbench|workbench.@uuid_prefix@.your.domain|
+</div>
diff --git a/doc/install/install-shell-server.html.textile.liquid b/doc/install/install-shell-server.html.textile.liquid
new file mode 100644 (file)
index 0000000..25ddf7b
--- /dev/null
@@ -0,0 +1,17 @@
+---
+layout: default
+navsection: installguide
+title: Install a shell server
+...
+
+This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
+
+There is nothing inherently special about an Arvados shell server. It is just a GNU/Linux machine with the Arvados SDKs installed. For optimal performance, the Arvados shell server should be on the same LAN as the Arvados cluster, but that is not required.
+
+h2. Install API tokens
+
+Please follow the "API token guide":{{site.baseurl}}/user/reference/api-tokens.html to get API tokens for your user and install them on your shell server. We will use those tokens to test the SDKs as we install them.
+
+h2. Install the SDKs
+
+Install the "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html and the "Command line SDK":{{site.baseurl}}/sdk/cli/install.html
diff --git a/doc/install/install-sso.html.textile.liquid b/doc/install/install-sso.html.textile.liquid
new file mode 100644 (file)
index 0000000..f272d85
--- /dev/null
@@ -0,0 +1,77 @@
+---
+layout: default
+navsection: installguide
+title: Install Single Sign On (SSO) server
+...
+
+h2(#dependencies). Install dependencies
+
+Make sure you have "Ruby and Bundler":install-manual-prerequisites-ruby.html installed.
+
+h2(#install). Install SSO server
+
+h3. Get SSO server code and create database
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/sso-devise-omniauth-provider.git</span>
+~$ <span class="userinput">cd sso-devise-omniauth-provider</span>
+~/sso-devise-omniauth-provider$ <span class="userinput">bundle install</span>
+~/sso-devise-omniauth-provider$ <span class="userinput">RAILS_ENV=production bundle exec rake db:create</span>
+~/sso-devise-omniauth-provider$ <span class="userinput">RAILS_ENV=production bundle exec rake db:migrate</span>
+</code></pre>
+</notextile>
+
+h3. Configure Rails secret
+
+Create a secret:
+
+<notextile>
+<pre><code>~/sso-devise-omniauth-provider$ <span class="userinput">cp -i config/initializers/secret_token.rb.example config/initializers/secret_token.rb</span>
+~/sso-devise-omniauth-provider$ <span class="userinput">rake secret</span>
+zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+</code></pre>
+</notextile>
+
+Edit @config/initializers/secret_token.rb@ to set @config.secret_token@ to the string produced by @rake secret@.
+
+h3. Configure upstream authentication provider
+
+<notextile>
+<pre><code>~/sso-devise-omniauth-provider$ <span class="userinput">cp -i config/environments/production.rb.example config/environments/production.rb</span>
+</code></pre>
+</notextile>
+
+Edit @config/environments/production.rb@ to set @config.google_oauth2_client_id@ and @config.google_oauth2_client_secret@.  See "Omniauth Google OAuth2 gem documentation":https://github.com/zquestz/omniauth-google-oauth2 and "Using OAuth 2.0 to Access Google APIs":https://developers.google.com/accounts/docs/OAuth2 for information about using the "Google Developers Console":https://console.developers.google.com to get a Google client id and client secret.
+
+h3(#client). Create arvados-server client
+
+Use @rails console@ to create a @Client@ record that will be used by the Arvados API server.  The values of @app_id@ and @app_secret@ correspond to the @APP_ID@ and @APP_SECRET@ that must be set in in "Setting up Omniauth in the API server.":install-api-server.html#omniauth
+
+<notextile>
+<pre><code>~/sso-devise-omniauth-provider$ <span class="userinput">rake secret</span>
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+~/sso-devise-omniauth-provider$ <span class="userinput">RAILS_ENV=production bundle exec rails console</span>
+irb(main):001:0&gt; <span class="userinput">c = Client.new</span>
+irb(main):002:0&gt; <span class="userinput">c.name = "joshid"</span>
+irb(main):003:0&gt; <span class="userinput">c.app_id = "arvados-server"</span>
+irb(main):004:0&gt; <span class="userinput">c.app_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"</span>
+irb(main):005:0&gt; <span class="userinput">c.save!</span>
+irb(main):006:0&gt; <span class="userinput">quit</span>
+</code></pre>
+</notextile>
+
+h2. Start the SSO server
+
+h3. Run a simple standalone server
+
+You can use the Webrick server that is bundled with Ruby to quickly verify that your installation is functioning:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rails server</span>
+</code></pre>
+</notextile>
+
+h3. Production environment
+
+As a Ruby on Rails application, the SSO server should be compatible with any Ruby application server that supports Rack applications.  We recommend "Passenger":https://www.phusionpassenger.com/ to run the SSO server in production.
diff --git a/doc/install/install-workbench-app.html.textile.liquid b/doc/install/install-workbench-app.html.textile.liquid
new file mode 100644 (file)
index 0000000..00f33ac
--- /dev/null
@@ -0,0 +1,162 @@
+---
+layout: default
+navsection: installguide
+title: Install Workbench
+...
+
+This installation guide assumes you are on a 64 bit Debian or Ubuntu system.
+
+h2. Install prerequisites
+
+<notextile>
+<pre><code>~$ <span class="userinput">sudo apt-get install \
+    bison build-essential gettext libcurl3 libcurl3-gnutls \
+    libcurl4-openssl-dev libpcre3-dev libpq-dev libreadline-dev \
+    libssl-dev libxslt1.1 sudo wget zlib1g-dev graphviz
+</span></code></pre></notextile>
+
+Also make sure you have "Ruby and bundler":install-manual-prerequisites-ruby.html installed.
+
+Workbench doesn't need its own database, so it does not need to have PostgreSQL installed.
+
+h2. Download the source tree
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone https://github.com/curoverse/arvados.git</span>
+</code></pre></notextile>
+
+See also: "Downloading the source code":https://arvados.org/projects/arvados/wiki/Download on the Arvados wiki.
+
+The Workbench application is in @apps/workbench@ in the source tree.
+
+h2. Install gem dependencies
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/apps/workbench</span>
+~/arvados/apps/workbench$ <span class="userinput">bundle install</span>
+</code></pre>
+</notextile>
+
+Alternatively, if you don't have sudo/root privileges on the host, install the gems in your own directory instead of installing them system-wide:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd arvados/apps/workbench</span>
+~/arvados/apps/workbench$ <span class="userinput">bundle install --path=vendor/bundle</span>
+</code></pre></notextile>
+
+The @bundle install@ command might produce a warning about the themes_for_rails gem. This is OK:
+
+<notextile>
+<pre><code>themes_for_rails at /home/<b>you</b>/.rvm/gems/ruby-2.1.1/bundler/gems/themes_for_rails-1fd2d7897d75 did not have a valid gemspec.
+This prevents bundler from installing bins or native extensions, but that may not affect its functionality.
+The validation message from Rubygems was:
+  duplicate dependency on rails (= 3.0.11, development), (>= 3.0.0) use:
+    add_runtime_dependency 'rails', '= 3.0.11', '>= 3.0.0'
+Using themes_for_rails (0.5.1) from https://github.com/holtkampw/themes_for_rails (at 1fd2d78)
+</code></pre></notextile>
+
+h2. Choose your environment
+
+The Workbench application can be run in @development@ or in @production@ mode. Unless this installation is going to be used for development on the Workbench applicatoin itself, you should run it in @production@ mode.
+
+Copy the example environment file for your environment. For example, if you choose @production@:
+
+<notextile>
+<pre><code>~/arvados/apps/workbench$ <span class="userinput">cp -i config/environments/production.rb.example config/environments/production.rb</span>
+</code></pre></notextile>
+
+h2. Configure the Workbench application
+
+First, copy the example configuration file:
+
+<notextile>
+<pre><code>~/arvados/apps/workbench$ <span class="userinput">cp -i config/application.yml.example config/application.yml</span>
+</code></pre></notextile>
+
+The Workbench application reads the @config/application.yml@ file, as well as the @config/application.defaults.yml@ file. Values in @config/application.yml@ take precedence over the defaults that are defined in @config/application.defaults.yml@. The @config/application.yml.example@ file is not read by the Workbench application and is provided for installation convenience, only.
+
+Consult @config/application.default.yml@ for a full list of configuration options. Always put your local configuration in @config/application.yml@, never edit @config/application.default.yml@.
+
+h3. secret_token
+
+This application needs a secret token. Generate a new secret:
+
+<notextile>
+<pre><code>~/arvados/apps/workbench$ <span class="userinput">rake secret</span>
+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+</code></pre>
+</notextile>
+
+Then put that value in the @secret_token@ field.
+
+h3. arvados_login_base and arvados_v1_base
+
+Point @arvados_login_base@ and @arvados_v1_base@ at your "API server":install-api-server.html. For example like this:
+
+<notextile>
+<pre><code>arvados_login_base: https://prefix_uuid.your.domain/login
+arvados_v1_base: https://prefix_uuid.your.domain/arvados/v1
+</code></pre>
+</notextile>
+
+h3. site_name
+
+@site_name@ can be set to any arbitrary string. It is used to identify this Workbench to people visiting it.
+
+h3. arvados_insecure_https
+
+If the SSL certificate you use for your API server isn't an official certificate signed by a CA, make sure @arvados_insecure_https@ is @true@.
+
+h3. other options
+
+Consult @application.default.yml@ for a full list of configuration options. Always put your local configuration in @application.yml@ instead of editing @application.default.yml@.
+
+Copy @config/piwik.yml.example@ to @config/piwik.yml@ and edit to suit.
+
+h2. Start the Workbench application
+
+h3. Development environment
+
+If you plan to run in development mode, you can now run the development server this way:
+
+<notextile>
+<pre><code>~/arvados/apps/workbench$ <span class="userinput">bundle exec rails server --port=3031</span>
+</code></pre></notextile>
+
+h3. Production environment
+
+We recommend "Passenger":https://www.phusionpassenger.com/ to run the API server in production.
+
+Point it to the apps/workbench directory in the source tree.
+
+h2. Trusted client setting
+
+Log in to Workbench once to ensure that the Arvados API server has a record of the Workbench client. (It's OK if Workbench says your account hasn't been activated yet. We'll deal with that next.)
+
+In the <strong>API server</strong> project root, start the rails console.  Locate the ApiClient record for your Workbench installation (typically, while you're setting this up, the @last@ one in the database is the one you want), then set the @is_trusted@ flag for the appropriate client record:
+
+<notextile><pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rails console</span>
+irb(main):001:0&gt; <span class="userinput">wb = ApiClient.all.last; [wb.url_prefix, wb.created_at]</span>
+=&gt; ["https://workbench.example.com/", Sat, 19 Apr 2014 03:35:12 UTC +00:00]
+irb(main):002:0&gt; <span class="userinput">include CurrentApiClient</span>
+=&gt; true
+irb(main):003:0&gt; <span class="userinput">act_as_system_user do wb.update_attributes!(is_trusted: true) end</span>
+=&gt; true
+</code></pre>
+</notextile>
+
+h2(#admin-user). Add an admin user
+
+Next, we're going to use the rails console on the <strong>API server</strong> to activate our own account and give yourself admin privileges:
+
+<notextile>
+<pre><code>~/arvados/services/api$ <span class="userinput">RAILS_ENV=production bundle exec rails console</span>
+irb(main):001:0&gt; <span class="userinput">Thread.current[:user] = User.all.select(&:identity_url).last</span>
+irb(main):002:0&gt; <span class="userinput">Thread.current[:user].is_admin = true</span>
+irb(main):003:0&gt; <span class="userinput">Thread.current[:user].update_attributes is_admin: true, is_active: true</span>
+irb(main):004:0&gt; <span class="userinput">User.where(is_admin: true).collect &:email</span>
+=&gt; ["root", "<b>your_address@example.com</b>"]
+</code></pre></notextile>
+
+At this point, you should have a working Workbench login with administrator privileges. Revisit your Workbench URL in a browser and reload the page to access it.
diff --git a/doc/js/bootstrap.js b/doc/js/bootstrap.js
new file mode 100644 (file)
index 0000000..39ec471
--- /dev/null
@@ -0,0 +1,1951 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+
+if (typeof jQuery === 'undefined') { throw new Error('Bootstrap requires jQuery') }
+
+/* ========================================================================
+ * Bootstrap: transition.js v3.1.0
+ * http://getbootstrap.com/javascript/#transitions
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)
+  // ============================================================
+
+  function transitionEnd() {
+    var el = document.createElement('bootstrap')
+
+    var transEndEventNames = {
+      'WebkitTransition' : 'webkitTransitionEnd',
+      'MozTransition'    : 'transitionend',
+      'OTransition'      : 'oTransitionEnd otransitionend',
+      'transition'       : 'transitionend'
+    }
+
+    for (var name in transEndEventNames) {
+      if (el.style[name] !== undefined) {
+        return { end: transEndEventNames[name] }
+      }
+    }
+
+    return false // explicit for ie8 (  ._.)
+  }
+
+  // http://blog.alexmaccaw.com/css-transitions
+  $.fn.emulateTransitionEnd = function (duration) {
+    var called = false, $el = this
+    $(this).one($.support.transition.end, function () { called = true })
+    var callback = function () { if (!called) $($el).trigger($.support.transition.end) }
+    setTimeout(callback, duration)
+    return this
+  }
+
+  $(function () {
+    $.support.transition = transitionEnd()
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: alert.js v3.1.0
+ * http://getbootstrap.com/javascript/#alerts
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // ALERT CLASS DEFINITION
+  // ======================
+
+  var dismiss = '[data-dismiss="alert"]'
+  var Alert   = function (el) {
+    $(el).on('click', dismiss, this.close)
+  }
+
+  Alert.prototype.close = function (e) {
+    var $this    = $(this)
+    var selector = $this.attr('data-target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') // strip for ie7
+    }
+
+    var $parent = $(selector)
+
+    if (e) e.preventDefault()
+
+    if (!$parent.length) {
+      $parent = $this.hasClass('alert') ? $this : $this.parent()
+    }
+
+    $parent.trigger(e = $.Event('close.bs.alert'))
+
+    if (e.isDefaultPrevented()) return
+
+    $parent.removeClass('in')
+
+    function removeElement() {
+      $parent.trigger('closed.bs.alert').remove()
+    }
+
+    $.support.transition && $parent.hasClass('fade') ?
+      $parent
+        .one($.support.transition.end, removeElement)
+        .emulateTransitionEnd(150) :
+      removeElement()
+  }
+
+
+  // ALERT PLUGIN DEFINITION
+  // =======================
+
+  var old = $.fn.alert
+
+  $.fn.alert = function (option) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.alert')
+
+      if (!data) $this.data('bs.alert', (data = new Alert(this)))
+      if (typeof option == 'string') data[option].call($this)
+    })
+  }
+
+  $.fn.alert.Constructor = Alert
+
+
+  // ALERT NO CONFLICT
+  // =================
+
+  $.fn.alert.noConflict = function () {
+    $.fn.alert = old
+    return this
+  }
+
+
+  // ALERT DATA-API
+  // ==============
+
+  $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: button.js v3.1.0
+ * http://getbootstrap.com/javascript/#buttons
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // BUTTON PUBLIC CLASS DEFINITION
+  // ==============================
+
+  var Button = function (element, options) {
+    this.$element  = $(element)
+    this.options   = $.extend({}, Button.DEFAULTS, options)
+    this.isLoading = false
+  }
+
+  Button.DEFAULTS = {
+    loadingText: 'loading...'
+  }
+
+  Button.prototype.setState = function (state) {
+    var d    = 'disabled'
+    var $el  = this.$element
+    var val  = $el.is('input') ? 'val' : 'html'
+    var data = $el.data()
+
+    state = state + 'Text'
+
+    if (!data.resetText) $el.data('resetText', $el[val]())
+
+    $el[val](data[state] || this.options[state])
+
+    // push to event loop to allow forms to submit
+    setTimeout($.proxy(function () {
+      if (state == 'loadingText') {
+        this.isLoading = true
+        $el.addClass(d).attr(d, d)
+      } else if (this.isLoading) {
+        this.isLoading = false
+        $el.removeClass(d).removeAttr(d)
+      }
+    }, this), 0)
+  }
+
+  Button.prototype.toggle = function () {
+    var changed = true
+    var $parent = this.$element.closest('[data-toggle="buttons"]')
+
+    if ($parent.length) {
+      var $input = this.$element.find('input')
+      if ($input.prop('type') == 'radio') {
+        if ($input.prop('checked') && this.$element.hasClass('active')) changed = false
+        else $parent.find('.active').removeClass('active')
+      }
+      if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change')
+    }
+
+    if (changed) this.$element.toggleClass('active')
+  }
+
+
+  // BUTTON PLUGIN DEFINITION
+  // ========================
+
+  var old = $.fn.button
+
+  $.fn.button = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.button')
+      var options = typeof option == 'object' && option
+
+      if (!data) $this.data('bs.button', (data = new Button(this, options)))
+
+      if (option == 'toggle') data.toggle()
+      else if (option) data.setState(option)
+    })
+  }
+
+  $.fn.button.Constructor = Button
+
+
+  // BUTTON NO CONFLICT
+  // ==================
+
+  $.fn.button.noConflict = function () {
+    $.fn.button = old
+    return this
+  }
+
+
+  // BUTTON DATA-API
+  // ===============
+
+  $(document).on('click.bs.button.data-api', '[data-toggle^=button]', function (e) {
+    var $btn = $(e.target)
+    if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')
+    $btn.button('toggle')
+    e.preventDefault()
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: carousel.js v3.1.0
+ * http://getbootstrap.com/javascript/#carousel
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // CAROUSEL CLASS DEFINITION
+  // =========================
+
+  var Carousel = function (element, options) {
+    this.$element    = $(element)
+    this.$indicators = this.$element.find('.carousel-indicators')
+    this.options     = options
+    this.paused      =
+    this.sliding     =
+    this.interval    =
+    this.$active     =
+    this.$items      = null
+
+    this.options.pause == 'hover' && this.$element
+      .on('mouseenter', $.proxy(this.pause, this))
+      .on('mouseleave', $.proxy(this.cycle, this))
+  }
+
+  Carousel.DEFAULTS = {
+    interval: 5000,
+    pause: 'hover',
+    wrap: true
+  }
+
+  Carousel.prototype.cycle =  function (e) {
+    e || (this.paused = false)
+
+    this.interval && clearInterval(this.interval)
+
+    this.options.interval
+      && !this.paused
+      && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))
+
+    return this
+  }
+
+  Carousel.prototype.getActiveIndex = function () {
+    this.$active = this.$element.find('.item.active')
+    this.$items  = this.$active.parent().children()
+
+    return this.$items.index(this.$active)
+  }
+
+  Carousel.prototype.to = function (pos) {
+    var that        = this
+    var activeIndex = this.getActiveIndex()
+
+    if (pos > (this.$items.length - 1) || pos < 0) return
+
+    if (this.sliding)       return this.$element.one('slid.bs.carousel', function () { that.to(pos) })
+    if (activeIndex == pos) return this.pause().cycle()
+
+    return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos]))
+  }
+
+  Carousel.prototype.pause = function (e) {
+    e || (this.paused = true)
+
+    if (this.$element.find('.next, .prev').length && $.support.transition) {
+      this.$element.trigger($.support.transition.end)
+      this.cycle(true)
+    }
+
+    this.interval = clearInterval(this.interval)
+
+    return this
+  }
+
+  Carousel.prototype.next = function () {
+    if (this.sliding) return
+    return this.slide('next')
+  }
+
+  Carousel.prototype.prev = function () {
+    if (this.sliding) return
+    return this.slide('prev')
+  }
+
+  Carousel.prototype.slide = function (type, next) {
+    var $active   = this.$element.find('.item.active')
+    var $next     = next || $active[type]()
+    var isCycling = this.interval
+    var direction = type == 'next' ? 'left' : 'right'
+    var fallback  = type == 'next' ? 'first' : 'last'
+    var that      = this
+
+    if (!$next.length) {
+      if (!this.options.wrap) return
+      $next = this.$element.find('.item')[fallback]()
+    }
+
+    if ($next.hasClass('active')) return this.sliding = false
+
+    var e = $.Event('slide.bs.carousel', { relatedTarget: $next[0], direction: direction })
+    this.$element.trigger(e)
+    if (e.isDefaultPrevented()) return
+
+    this.sliding = true
+
+    isCycling && this.pause()
+
+    if (this.$indicators.length) {
+      this.$indicators.find('.active').removeClass('active')
+      this.$element.one('slid.bs.carousel', function () {
+        var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()])
+        $nextIndicator && $nextIndicator.addClass('active')
+      })
+    }
+
+    if ($.support.transition && this.$element.hasClass('slide')) {
+      $next.addClass(type)
+      $next[0].offsetWidth // force reflow
+      $active.addClass(direction)
+      $next.addClass(direction)
+      $active
+        .one($.support.transition.end, function () {
+          $next.removeClass([type, direction].join(' ')).addClass('active')
+          $active.removeClass(['active', direction].join(' '))
+          that.sliding = false
+          setTimeout(function () { that.$element.trigger('slid.bs.carousel') }, 0)
+        })
+        .emulateTransitionEnd($active.css('transition-duration').slice(0, -1) * 1000)
+    } else {
+      $active.removeClass('active')
+      $next.addClass('active')
+      this.sliding = false
+      this.$element.trigger('slid.bs.carousel')
+    }
+
+    isCycling && this.cycle()
+
+    return this
+  }
+
+
+  // CAROUSEL PLUGIN DEFINITION
+  // ==========================
+
+  var old = $.fn.carousel
+
+  $.fn.carousel = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.carousel')
+      var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)
+      var action  = typeof option == 'string' ? option : options.slide
+
+      if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))
+      if (typeof option == 'number') data.to(option)
+      else if (action) data[action]()
+      else if (options.interval) data.pause().cycle()
+    })
+  }
+
+  $.fn.carousel.Constructor = Carousel
+
+
+  // CAROUSEL NO CONFLICT
+  // ====================
+
+  $.fn.carousel.noConflict = function () {
+    $.fn.carousel = old
+    return this
+  }
+
+
+  // CAROUSEL DATA-API
+  // =================
+
+  $(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) {
+    var $this   = $(this), href
+    var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
+    var options = $.extend({}, $target.data(), $this.data())
+    var slideIndex = $this.attr('data-slide-to')
+    if (slideIndex) options.interval = false
+
+    $target.carousel(options)
+
+    if (slideIndex = $this.attr('data-slide-to')) {
+      $target.data('bs.carousel').to(slideIndex)
+    }
+
+    e.preventDefault()
+  })
+
+  $(window).on('load', function () {
+    $('[data-ride="carousel"]').each(function () {
+      var $carousel = $(this)
+      $carousel.carousel($carousel.data())
+    })
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: collapse.js v3.1.0
+ * http://getbootstrap.com/javascript/#collapse
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // COLLAPSE PUBLIC CLASS DEFINITION
+  // ================================
+
+  var Collapse = function (element, options) {
+    this.$element      = $(element)
+    this.options       = $.extend({}, Collapse.DEFAULTS, options)
+    this.transitioning = null
+
+    if (this.options.parent) this.$parent = $(this.options.parent)
+    if (this.options.toggle) this.toggle()
+  }
+
+  Collapse.DEFAULTS = {
+    toggle: true
+  }
+
+  Collapse.prototype.dimension = function () {
+    var hasWidth = this.$element.hasClass('width')
+    return hasWidth ? 'width' : 'height'
+  }
+
+  Collapse.prototype.show = function () {
+    if (this.transitioning || this.$element.hasClass('in')) return
+
+    var startEvent = $.Event('show.bs.collapse')
+    this.$element.trigger(startEvent)
+    if (startEvent.isDefaultPrevented()) return
+
+    var actives = this.$parent && this.$parent.find('> .panel > .in')
+
+    if (actives && actives.length) {
+      var hasData = actives.data('bs.collapse')
+      if (hasData && hasData.transitioning) return
+      actives.collapse('hide')
+      hasData || actives.data('bs.collapse', null)
+    }
+
+    var dimension = this.dimension()
+
+    this.$element
+      .removeClass('collapse')
+      .addClass('collapsing')
+      [dimension](0)
+
+    this.transitioning = 1
+
+    var complete = function () {
+      this.$element
+        .removeClass('collapsing')
+        .addClass('collapse in')
+        [dimension]('auto')
+      this.transitioning = 0
+      this.$element.trigger('shown.bs.collapse')
+    }
+
+    if (!$.support.transition) return complete.call(this)
+
+    var scrollSize = $.camelCase(['scroll', dimension].join('-'))
+
+    this.$element
+      .one($.support.transition.end, $.proxy(complete, this))
+      .emulateTransitionEnd(350)
+      [dimension](this.$element[0][scrollSize])
+  }
+
+  Collapse.prototype.hide = function () {
+    if (this.transitioning || !this.$element.hasClass('in')) return
+
+    var startEvent = $.Event('hide.bs.collapse')
+    this.$element.trigger(startEvent)
+    if (startEvent.isDefaultPrevented()) return
+
+    var dimension = this.dimension()
+
+    this.$element
+      [dimension](this.$element[dimension]())
+      [0].offsetHeight
+
+    this.$element
+      .addClass('collapsing')
+      .removeClass('collapse')
+      .removeClass('in')
+
+    this.transitioning = 1
+
+    var complete = function () {
+      this.transitioning = 0
+      this.$element
+        .trigger('hidden.bs.collapse')
+        .removeClass('collapsing')
+        .addClass('collapse')
+    }
+
+    if (!$.support.transition) return complete.call(this)
+
+    this.$element
+      [dimension](0)
+      .one($.support.transition.end, $.proxy(complete, this))
+      .emulateTransitionEnd(350)
+  }
+
+  Collapse.prototype.toggle = function () {
+    this[this.$element.hasClass('in') ? 'hide' : 'show']()
+  }
+
+
+  // COLLAPSE PLUGIN DEFINITION
+  // ==========================
+
+  var old = $.fn.collapse
+
+  $.fn.collapse = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.collapse')
+      var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)
+
+      if (!data && options.toggle && option == 'show') option = !option
+      if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.collapse.Constructor = Collapse
+
+
+  // COLLAPSE NO CONFLICT
+  // ====================
+
+  $.fn.collapse.noConflict = function () {
+    $.fn.collapse = old
+    return this
+  }
+
+
+  // COLLAPSE DATA-API
+  // =================
+
+  $(document).on('click.bs.collapse.data-api', '[data-toggle=collapse]', function (e) {
+    var $this   = $(this), href
+    var target  = $this.attr('data-target')
+        || e.preventDefault()
+        || (href = $this.attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '') //strip for ie7
+    var $target = $(target)
+    var data    = $target.data('bs.collapse')
+    var option  = data ? 'toggle' : $this.data()
+    var parent  = $this.attr('data-parent')
+    var $parent = parent && $(parent)
+
+    if (!data || !data.transitioning) {
+      if ($parent) $parent.find('[data-toggle=collapse][data-parent="' + parent + '"]').not($this).addClass('collapsed')
+      $this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed')
+    }
+
+    $target.collapse(option)
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: dropdown.js v3.1.0
+ * http://getbootstrap.com/javascript/#dropdowns
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // DROPDOWN CLASS DEFINITION
+  // =========================
+
+  var backdrop = '.dropdown-backdrop'
+  var toggle   = '[data-toggle=dropdown]'
+  var Dropdown = function (element) {
+    $(element).on('click.bs.dropdown', this.toggle)
+  }
+
+  Dropdown.prototype.toggle = function (e) {
+    var $this = $(this)
+
+    if ($this.is('.disabled, :disabled')) return
+
+    var $parent  = getParent($this)
+    var isActive = $parent.hasClass('open')
+
+    clearMenus()
+
+    if (!isActive) {
+      if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {
+        // if mobile we use a backdrop because click events don't delegate
+        $('<div class="dropdown-backdrop"/>').insertAfter($(this)).on('click', clearMenus)
+      }
+
+      var relatedTarget = { relatedTarget: this }
+      $parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))
+
+      if (e.isDefaultPrevented()) return
+
+      $parent
+        .toggleClass('open')
+        .trigger('shown.bs.dropdown', relatedTarget)
+
+      $this.focus()
+    }
+
+    return false
+  }
+
+  Dropdown.prototype.keydown = function (e) {
+    if (!/(38|40|27)/.test(e.keyCode)) return
+
+    var $this = $(this)
+
+    e.preventDefault()
+    e.stopPropagation()
+
+    if ($this.is('.disabled, :disabled')) return
+
+    var $parent  = getParent($this)
+    var isActive = $parent.hasClass('open')
+
+    if (!isActive || (isActive && e.keyCode == 27)) {
+      if (e.which == 27) $parent.find(toggle).focus()
+      return $this.click()
+    }
+
+    var desc = ' li:not(.divider):visible a'
+    var $items = $parent.find('[role=menu]' + desc + ', [role=listbox]' + desc)
+
+    if (!$items.length) return
+
+    var index = $items.index($items.filter(':focus'))
+
+    if (e.keyCode == 38 && index > 0)                 index--                        // up
+    if (e.keyCode == 40 && index < $items.length - 1) index++                        // down
+    if (!~index)                                      index = 0
+
+    $items.eq(index).focus()
+  }
+
+  function clearMenus(e) {
+    $(backdrop).remove()
+    $(toggle).each(function () {
+      var $parent = getParent($(this))
+      var relatedTarget = { relatedTarget: this }
+      if (!$parent.hasClass('open')) return
+      $parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))
+      if (e.isDefaultPrevented()) return
+      $parent.removeClass('open').trigger('hidden.bs.dropdown', relatedTarget)
+    })
+  }
+
+  function getParent($this) {
+    var selector = $this.attr('data-target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
+    }
+
+    var $parent = selector && $(selector)
+
+    return $parent && $parent.length ? $parent : $this.parent()
+  }
+
+
+  // DROPDOWN PLUGIN DEFINITION
+  // ==========================
+
+  var old = $.fn.dropdown
+
+  $.fn.dropdown = function (option) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.dropdown')
+
+      if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))
+      if (typeof option == 'string') data[option].call($this)
+    })
+  }
+
+  $.fn.dropdown.Constructor = Dropdown
+
+
+  // DROPDOWN NO CONFLICT
+  // ====================
+
+  $.fn.dropdown.noConflict = function () {
+    $.fn.dropdown = old
+    return this
+  }
+
+
+  // APPLY TO STANDARD DROPDOWN ELEMENTS
+  // ===================================
+
+  $(document)
+    .on('click.bs.dropdown.data-api', clearMenus)
+    .on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })
+    .on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)
+    .on('keydown.bs.dropdown.data-api', toggle + ', [role=menu], [role=listbox]', Dropdown.prototype.keydown)
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: modal.js v3.1.0
+ * http://getbootstrap.com/javascript/#modals
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // MODAL CLASS DEFINITION
+  // ======================
+
+  var Modal = function (element, options) {
+    this.options   = options
+    this.$element  = $(element)
+    this.$backdrop =
+    this.isShown   = null
+
+    if (this.options.remote) {
+      this.$element
+        .find('.modal-content')
+        .load(this.options.remote, $.proxy(function () {
+          this.$element.trigger('loaded.bs.modal')
+        }, this))
+    }
+  }
+
+  Modal.DEFAULTS = {
+    backdrop: true,
+    keyboard: true,
+    show: true
+  }
+
+  Modal.prototype.toggle = function (_relatedTarget) {
+    return this[!this.isShown ? 'show' : 'hide'](_relatedTarget)
+  }
+
+  Modal.prototype.show = function (_relatedTarget) {
+    var that = this
+    var e    = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })
+
+    this.$element.trigger(e)
+
+    if (this.isShown || e.isDefaultPrevented()) return
+
+    this.isShown = true
+
+    this.escape()
+
+    this.$element.on('click.dismiss.bs.modal', '[data-dismiss="modal"]', $.proxy(this.hide, this))
+
+    this.backdrop(function () {
+      var transition = $.support.transition && that.$element.hasClass('fade')
+
+      if (!that.$element.parent().length) {
+        that.$element.appendTo(document.body) // don't move modals dom position
+      }
+
+      that.$element
+        .show()
+        .scrollTop(0)
+
+      if (transition) {
+        that.$element[0].offsetWidth // force reflow
+      }
+
+      that.$element
+        .addClass('in')
+        .attr('aria-hidden', false)
+
+      that.enforceFocus()
+
+      var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })
+
+      transition ?
+        that.$element.find('.modal-dialog') // wait for modal to slide in
+          .one($.support.transition.end, function () {
+            that.$element.focus().trigger(e)
+          })
+          .emulateTransitionEnd(300) :
+        that.$element.focus().trigger(e)
+    })
+  }
+
+  Modal.prototype.hide = function (e) {
+    if (e) e.preventDefault()
+
+    e = $.Event('hide.bs.modal')
+
+    this.$element.trigger(e)
+
+    if (!this.isShown || e.isDefaultPrevented()) return
+
+    this.isShown = false
+
+    this.escape()
+
+    $(document).off('focusin.bs.modal')
+
+    this.$element
+      .removeClass('in')
+      .attr('aria-hidden', true)
+      .off('click.dismiss.bs.modal')
+
+    $.support.transition && this.$element.hasClass('fade') ?
+      this.$element
+        .one($.support.transition.end, $.proxy(this.hideModal, this))
+        .emulateTransitionEnd(300) :
+      this.hideModal()
+  }
+
+  Modal.prototype.enforceFocus = function () {
+    $(document)
+      .off('focusin.bs.modal') // guard against infinite focus loop
+      .on('focusin.bs.modal', $.proxy(function (e) {
+        if (this.$element[0] !== e.target && !this.$element.has(e.target).length) {
+          this.$element.focus()
+        }
+      }, this))
+  }
+
+  Modal.prototype.escape = function () {
+    if (this.isShown && this.options.keyboard) {
+      this.$element.on('keyup.dismiss.bs.modal', $.proxy(function (e) {
+        e.which == 27 && this.hide()
+      }, this))
+    } else if (!this.isShown) {
+      this.$element.off('keyup.dismiss.bs.modal')
+    }
+  }
+
+  Modal.prototype.hideModal = function () {
+    var that = this
+    this.$element.hide()
+    this.backdrop(function () {
+      that.removeBackdrop()
+      that.$element.trigger('hidden.bs.modal')
+    })
+  }
+
+  Modal.prototype.removeBackdrop = function () {
+    this.$backdrop && this.$backdrop.remove()
+    this.$backdrop = null
+  }
+
+  Modal.prototype.backdrop = function (callback) {
+    var animate = this.$element.hasClass('fade') ? 'fade' : ''
+
+    if (this.isShown && this.options.backdrop) {
+      var doAnimate = $.support.transition && animate
+
+      this.$backdrop = $('<div class="modal-backdrop ' + animate + '" />')
+        .appendTo(document.body)
+
+      this.$element.on('click.dismiss.bs.modal', $.proxy(function (e) {
+        if (e.target !== e.currentTarget) return
+        this.options.backdrop == 'static'
+          ? this.$element[0].focus.call(this.$element[0])
+          : this.hide.call(this)
+      }, this))
+
+      if (doAnimate) this.$backdrop[0].offsetWidth // force reflow
+
+      this.$backdrop.addClass('in')
+
+      if (!callback) return
+
+      doAnimate ?
+        this.$backdrop
+          .one($.support.transition.end, callback)
+          .emulateTransitionEnd(150) :
+        callback()
+
+    } else if (!this.isShown && this.$backdrop) {
+      this.$backdrop.removeClass('in')
+
+      $.support.transition && this.$element.hasClass('fade') ?
+        this.$backdrop
+          .one($.support.transition.end, callback)
+          .emulateTransitionEnd(150) :
+        callback()
+
+    } else if (callback) {
+      callback()
+    }
+  }
+
+
+  // MODAL PLUGIN DEFINITION
+  // =======================
+
+  var old = $.fn.modal
+
+  $.fn.modal = function (option, _relatedTarget) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.modal')
+      var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)
+
+      if (!data) $this.data('bs.modal', (data = new Modal(this, options)))
+      if (typeof option == 'string') data[option](_relatedTarget)
+      else if (options.show) data.show(_relatedTarget)
+    })
+  }
+
+  $.fn.modal.Constructor = Modal
+
+
+  // MODAL NO CONFLICT
+  // =================
+
+  $.fn.modal.noConflict = function () {
+    $.fn.modal = old
+    return this
+  }
+
+
+  // MODAL DATA-API
+  // ==============
+
+  $(document).on('click.bs.modal.data-api', '[data-toggle="modal"]', function (e) {
+    var $this   = $(this)
+    var href    = $this.attr('href')
+    var $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\s]+$)/, ''))) //strip for ie7
+    var option  = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())
+
+    if ($this.is('a')) e.preventDefault()
+
+    $target
+      .modal(option, this)
+      .one('hide', function () {
+        $this.is(':visible') && $this.focus()
+      })
+  })
+
+  $(document)
+    .on('show.bs.modal', '.modal', function () { $(document.body).addClass('modal-open') })
+    .on('hidden.bs.modal', '.modal', function () { $(document.body).removeClass('modal-open') })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: tooltip.js v3.1.0
+ * http://getbootstrap.com/javascript/#tooltip
+ * Inspired by the original jQuery.tipsy by Jason Frame
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TOOLTIP PUBLIC CLASS DEFINITION
+  // ===============================
+
+  var Tooltip = function (element, options) {
+    this.type       =
+    this.options    =
+    this.enabled    =
+    this.timeout    =
+    this.hoverState =
+    this.$element   = null
+
+    this.init('tooltip', element, options)
+  }
+
+  Tooltip.DEFAULTS = {
+    animation: true,
+    placement: 'top',
+    selector: false,
+    template: '<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',
+    trigger: 'hover focus',
+    title: '',
+    delay: 0,
+    html: false,
+    container: false
+  }
+
+  Tooltip.prototype.init = function (type, element, options) {
+    this.enabled  = true
+    this.type     = type
+    this.$element = $(element)
+    this.options  = this.getOptions(options)
+
+    var triggers = this.options.trigger.split(' ')
+
+    for (var i = triggers.length; i--;) {
+      var trigger = triggers[i]
+
+      if (trigger == 'click') {
+        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
+      } else if (trigger != 'manual') {
+        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'
+        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'
+
+        this.$element.on(eventIn  + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
+        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
+      }
+    }
+
+    this.options.selector ?
+      (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
+      this.fixTitle()
+  }
+
+  Tooltip.prototype.getDefaults = function () {
+    return Tooltip.DEFAULTS
+  }
+
+  Tooltip.prototype.getOptions = function (options) {
+    options = $.extend({}, this.getDefaults(), this.$element.data(), options)
+
+    if (options.delay && typeof options.delay == 'number') {
+      options.delay = {
+        show: options.delay,
+        hide: options.delay
+      }
+    }
+
+    return options
+  }
+
+  Tooltip.prototype.getDelegateOptions = function () {
+    var options  = {}
+    var defaults = this.getDefaults()
+
+    this._options && $.each(this._options, function (key, value) {
+      if (defaults[key] != value) options[key] = value
+    })
+
+    return options
+  }
+
+  Tooltip.prototype.enter = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type)
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'in'
+
+    if (!self.options.delay || !self.options.delay.show) return self.show()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'in') self.show()
+    }, self.options.delay.show)
+  }
+
+  Tooltip.prototype.leave = function (obj) {
+    var self = obj instanceof this.constructor ?
+      obj : $(obj.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type)
+
+    clearTimeout(self.timeout)
+
+    self.hoverState = 'out'
+
+    if (!self.options.delay || !self.options.delay.hide) return self.hide()
+
+    self.timeout = setTimeout(function () {
+      if (self.hoverState == 'out') self.hide()
+    }, self.options.delay.hide)
+  }
+
+  Tooltip.prototype.show = function () {
+    var e = $.Event('show.bs.' + this.type)
+
+    if (this.hasContent() && this.enabled) {
+      this.$element.trigger(e)
+
+      if (e.isDefaultPrevented()) return
+      var that = this;
+
+      var $tip = this.tip()
+
+      this.setContent()
+
+      if (this.options.animation) $tip.addClass('fade')
+
+      var placement = typeof this.options.placement == 'function' ?
+        this.options.placement.call(this, $tip[0], this.$element[0]) :
+        this.options.placement
+
+      var autoToken = /\s?auto?\s?/i
+      var autoPlace = autoToken.test(placement)
+      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'
+
+      $tip
+        .detach()
+        .css({ top: 0, left: 0, display: 'block' })
+        .addClass(placement)
+
+      this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)
+
+      var pos          = this.getPosition()
+      var actualWidth  = $tip[0].offsetWidth
+      var actualHeight = $tip[0].offsetHeight
+
+      if (autoPlace) {
+        var $parent = this.$element.parent()
+
+        var orgPlacement = placement
+        var docScroll    = document.documentElement.scrollTop || document.body.scrollTop
+        var parentWidth  = this.options.container == 'body' ? window.innerWidth  : $parent.outerWidth()
+        var parentHeight = this.options.container == 'body' ? window.innerHeight : $parent.outerHeight()
+        var parentLeft   = this.options.container == 'body' ? 0 : $parent.offset().left
+
+        placement = placement == 'bottom' && pos.top   + pos.height  + actualHeight - docScroll > parentHeight  ? 'top'    :
+                    placement == 'top'    && pos.top   - docScroll   - actualHeight < 0                         ? 'bottom' :
+                    placement == 'right'  && pos.right + actualWidth > parentWidth                              ? 'left'   :
+                    placement == 'left'   && pos.left  - actualWidth < parentLeft                               ? 'right'  :
+                    placement
+
+        $tip
+          .removeClass(orgPlacement)
+          .addClass(placement)
+      }
+
+      var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)
+
+      this.applyPlacement(calculatedOffset, placement)
+      this.hoverState = null
+
+      var complete = function() {
+        that.$element.trigger('shown.bs.' + that.type)
+      }
+
+      $.support.transition && this.$tip.hasClass('fade') ?
+        $tip
+          .one($.support.transition.end, complete)
+          .emulateTransitionEnd(150) :
+        complete()
+    }
+  }
+
+  Tooltip.prototype.applyPlacement = function (offset, placement) {
+    var replace
+    var $tip   = this.tip()
+    var width  = $tip[0].offsetWidth
+    var height = $tip[0].offsetHeight
+
+    // manually read margins because getBoundingClientRect includes difference
+    var marginTop = parseInt($tip.css('margin-top'), 10)
+    var marginLeft = parseInt($tip.css('margin-left'), 10)
+
+    // we must check for NaN for ie 8/9
+    if (isNaN(marginTop))  marginTop  = 0
+    if (isNaN(marginLeft)) marginLeft = 0
+
+    offset.top  = offset.top  + marginTop
+    offset.left = offset.left + marginLeft
+
+    // $.fn.offset doesn't round pixel values
+    // so we use setOffset directly with our own function B-0
+    $.offset.setOffset($tip[0], $.extend({
+      using: function (props) {
+        $tip.css({
+          top: Math.round(props.top),
+          left: Math.round(props.left)
+        })
+      }
+    }, offset), 0)
+
+    $tip.addClass('in')
+
+    // check to see if placing tip in new offset caused the tip to resize itself
+    var actualWidth  = $tip[0].offsetWidth
+    var actualHeight = $tip[0].offsetHeight
+
+    if (placement == 'top' && actualHeight != height) {
+      replace = true
+      offset.top = offset.top + height - actualHeight
+    }
+
+    if (/bottom|top/.test(placement)) {
+      var delta = 0
+
+      if (offset.left < 0) {
+        delta       = offset.left * -2
+        offset.left = 0
+
+        $tip.offset(offset)
+
+        actualWidth  = $tip[0].offsetWidth
+        actualHeight = $tip[0].offsetHeight
+      }
+
+      this.replaceArrow(delta - width + actualWidth, actualWidth, 'left')
+    } else {
+      this.replaceArrow(actualHeight - height, actualHeight, 'top')
+    }
+
+    if (replace) $tip.offset(offset)
+  }
+
+  Tooltip.prototype.replaceArrow = function (delta, dimension, position) {
+    this.arrow().css(position, delta ? (50 * (1 - delta / dimension) + '%') : '')
+  }
+
+  Tooltip.prototype.setContent = function () {
+    var $tip  = this.tip()
+    var title = this.getTitle()
+
+    $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
+    $tip.removeClass('fade in top bottom left right')
+  }
+
+  Tooltip.prototype.hide = function () {
+    var that = this
+    var $tip = this.tip()
+    var e    = $.Event('hide.bs.' + this.type)
+
+    function complete() {
+      if (that.hoverState != 'in') $tip.detach()
+      that.$element.trigger('hidden.bs.' + that.type)
+    }
+
+    this.$element.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    $tip.removeClass('in')
+
+    $.support.transition && this.$tip.hasClass('fade') ?
+      $tip
+        .one($.support.transition.end, complete)
+        .emulateTransitionEnd(150) :
+      complete()
+
+    this.hoverState = null
+
+    return this
+  }
+
+  Tooltip.prototype.fixTitle = function () {
+    var $e = this.$element
+    if ($e.attr('title') || typeof($e.attr('data-original-title')) != 'string') {
+      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
+    }
+  }
+
+  Tooltip.prototype.hasContent = function () {
+    return this.getTitle()
+  }
+
+  Tooltip.prototype.getPosition = function () {
+    var el = this.$element[0]
+    return $.extend({}, (typeof el.getBoundingClientRect == 'function') ? el.getBoundingClientRect() : {
+      width: el.offsetWidth,
+      height: el.offsetHeight
+    }, this.$element.offset())
+  }
+
+  Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {
+    return placement == 'bottom' ? { top: pos.top + pos.height,   left: pos.left + pos.width / 2 - actualWidth / 2  } :
+           placement == 'top'    ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2  } :
+           placement == 'left'   ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :
+        /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width   }
+  }
+
+  Tooltip.prototype.getTitle = function () {
+    var title
+    var $e = this.$element
+    var o  = this.options
+
+    title = $e.attr('data-original-title')
+      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)
+
+    return title
+  }
+
+  Tooltip.prototype.tip = function () {
+    return this.$tip = this.$tip || $(this.options.template)
+  }
+
+  Tooltip.prototype.arrow = function () {
+    return this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow')
+  }
+
+  Tooltip.prototype.validate = function () {
+    if (!this.$element[0].parentNode) {
+      this.hide()
+      this.$element = null
+      this.options  = null
+    }
+  }
+
+  Tooltip.prototype.enable = function () {
+    this.enabled = true
+  }
+
+  Tooltip.prototype.disable = function () {
+    this.enabled = false
+  }
+
+  Tooltip.prototype.toggleEnabled = function () {
+    this.enabled = !this.enabled
+  }
+
+  Tooltip.prototype.toggle = function (e) {
+    var self = e ? $(e.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type) : this
+    self.tip().hasClass('in') ? self.leave(self) : self.enter(self)
+  }
+
+  Tooltip.prototype.destroy = function () {
+    clearTimeout(this.timeout)
+    this.hide().$element.off('.' + this.type).removeData('bs.' + this.type)
+  }
+
+
+  // TOOLTIP PLUGIN DEFINITION
+  // =========================
+
+  var old = $.fn.tooltip
+
+  $.fn.tooltip = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.tooltip')
+      var options = typeof option == 'object' && option
+
+      if (!data && option == 'destroy') return
+      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.tooltip.Constructor = Tooltip
+
+
+  // TOOLTIP NO CONFLICT
+  // ===================
+
+  $.fn.tooltip.noConflict = function () {
+    $.fn.tooltip = old
+    return this
+  }
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: popover.js v3.1.0
+ * http://getbootstrap.com/javascript/#popovers
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // POPOVER PUBLIC CLASS DEFINITION
+  // ===============================
+
+  var Popover = function (element, options) {
+    this.init('popover', element, options)
+  }
+
+  if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')
+
+  Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {
+    placement: 'right',
+    trigger: 'click',
+    content: '',
+    template: '<div class="popover"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'
+  })
+
+
+  // NOTE: POPOVER EXTENDS tooltip.js
+  // ================================
+
+  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)
+
+  Popover.prototype.constructor = Popover
+
+  Popover.prototype.getDefaults = function () {
+    return Popover.DEFAULTS
+  }
+
+  Popover.prototype.setContent = function () {
+    var $tip    = this.tip()
+    var title   = this.getTitle()
+    var content = this.getContent()
+
+    $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)
+    $tip.find('.popover-content')[ // we use append for html objects to maintain js events
+      this.options.html ? (typeof content == 'string' ? 'html' : 'append') : 'text'
+    ](content)
+
+    $tip.removeClass('fade top bottom left right in')
+
+    // IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do
+    // this manually by checking the contents.
+    if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()
+  }
+
+  Popover.prototype.hasContent = function () {
+    return this.getTitle() || this.getContent()
+  }
+
+  Popover.prototype.getContent = function () {
+    var $e = this.$element
+    var o  = this.options
+
+    return $e.attr('data-content')
+      || (typeof o.content == 'function' ?
+            o.content.call($e[0]) :
+            o.content)
+  }
+
+  Popover.prototype.arrow = function () {
+    return this.$arrow = this.$arrow || this.tip().find('.arrow')
+  }
+
+  Popover.prototype.tip = function () {
+    if (!this.$tip) this.$tip = $(this.options.template)
+    return this.$tip
+  }
+
+
+  // POPOVER PLUGIN DEFINITION
+  // =========================
+
+  var old = $.fn.popover
+
+  $.fn.popover = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.popover')
+      var options = typeof option == 'object' && option
+
+      if (!data && option == 'destroy') return
+      if (!data) $this.data('bs.popover', (data = new Popover(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.popover.Constructor = Popover
+
+
+  // POPOVER NO CONFLICT
+  // ===================
+
+  $.fn.popover.noConflict = function () {
+    $.fn.popover = old
+    return this
+  }
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: scrollspy.js v3.1.0
+ * http://getbootstrap.com/javascript/#scrollspy
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // SCROLLSPY CLASS DEFINITION
+  // ==========================
+
+  function ScrollSpy(element, options) {
+    var href
+    var process  = $.proxy(this.process, this)
+
+    this.$element       = $(element).is('body') ? $(window) : $(element)
+    this.$body          = $('body')
+    this.$scrollElement = this.$element.on('scroll.bs.scroll-spy.data-api', process)
+    this.options        = $.extend({}, ScrollSpy.DEFAULTS, options)
+    this.selector       = (this.options.target
+      || ((href = $(element).attr('href')) && href.replace(/.*(?=#[^\s]+$)/, '')) //strip for ie7
+      || '') + ' .nav li > a'
+    this.offsets        = $([])
+    this.targets        = $([])
+    this.activeTarget   = null
+
+    this.refresh()
+    this.process()
+  }
+
+  ScrollSpy.DEFAULTS = {
+    offset: 10
+  }
+
+  ScrollSpy.prototype.refresh = function () {
+    var offsetMethod = this.$element[0] == window ? 'offset' : 'position'
+
+    this.offsets = $([])
+    this.targets = $([])
+
+    var self     = this
+    var $targets = this.$body
+      .find(this.selector)
+      .map(function () {
+        var $el   = $(this)
+        var href  = $el.data('target') || $el.attr('href')
+        var $href = /^#./.test(href) && $(href)
+
+        return ($href
+          && $href.length
+          && $href.is(':visible')
+          && [[ $href[offsetMethod]().top + (!$.isWindow(self.$scrollElement.get(0)) && self.$scrollElement.scrollTop()), href ]]) || null
+      })
+      .sort(function (a, b) { return a[0] - b[0] })
+      .each(function () {
+        self.offsets.push(this[0])
+        self.targets.push(this[1])
+      })
+  }
+
+  ScrollSpy.prototype.process = function () {
+    var scrollTop    = this.$scrollElement.scrollTop() + this.options.offset
+    var scrollHeight = this.$scrollElement[0].scrollHeight || this.$body[0].scrollHeight
+    var maxScroll    = scrollHeight - this.$scrollElement.height()
+    var offsets      = this.offsets
+    var targets      = this.targets
+    var activeTarget = this.activeTarget
+    var i
+
+    if (scrollTop >= maxScroll) {
+      return activeTarget != (i = targets.last()[0]) && this.activate(i)
+    }
+
+    if (activeTarget && scrollTop <= offsets[0]) {
+      return activeTarget != (i = targets[0]) && this.activate(i)
+    }
+
+    for (i = offsets.length; i--;) {
+      activeTarget != targets[i]
+        && scrollTop >= offsets[i]
+        && (!offsets[i + 1] || scrollTop <= offsets[i + 1])
+        && this.activate( targets[i] )
+    }
+  }
+
+  ScrollSpy.prototype.activate = function (target) {
+    this.activeTarget = target
+
+    $(this.selector)
+      .parentsUntil(this.options.target, '.active')
+      .removeClass('active')
+
+    var selector = this.selector +
+        '[data-target="' + target + '"],' +
+        this.selector + '[href="' + target + '"]'
+
+    var active = $(selector)
+      .parents('li')
+      .addClass('active')
+
+    if (active.parent('.dropdown-menu').length) {
+      active = active
+        .closest('li.dropdown')
+        .addClass('active')
+    }
+
+    active.trigger('activate.bs.scrollspy')
+  }
+
+
+  // SCROLLSPY PLUGIN DEFINITION
+  // ===========================
+
+  var old = $.fn.scrollspy
+
+  $.fn.scrollspy = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.scrollspy')
+      var options = typeof option == 'object' && option
+
+      if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.scrollspy.Constructor = ScrollSpy
+
+
+  // SCROLLSPY NO CONFLICT
+  // =====================
+
+  $.fn.scrollspy.noConflict = function () {
+    $.fn.scrollspy = old
+    return this
+  }
+
+
+  // SCROLLSPY DATA-API
+  // ==================
+
+  $(window).on('load', function () {
+    $('[data-spy="scroll"]').each(function () {
+      var $spy = $(this)
+      $spy.scrollspy($spy.data())
+    })
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: tab.js v3.1.0
+ * http://getbootstrap.com/javascript/#tabs
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // TAB CLASS DEFINITION
+  // ====================
+
+  var Tab = function (element) {
+    this.element = $(element)
+  }
+
+  Tab.prototype.show = function () {
+    var $this    = this.element
+    var $ul      = $this.closest('ul:not(.dropdown-menu)')
+    var selector = $this.data('target')
+
+    if (!selector) {
+      selector = $this.attr('href')
+      selector = selector && selector.replace(/.*(?=#[^\s]*$)/, '') //strip for ie7
+    }
+
+    if ($this.parent('li').hasClass('active')) return
+
+    var previous = $ul.find('.active:last a')[0]
+    var e        = $.Event('show.bs.tab', {
+      relatedTarget: previous
+    })
+
+    $this.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    var $target = $(selector)
+
+    this.activate($this.parent('li'), $ul)
+    this.activate($target, $target.parent(), function () {
+      $this.trigger({
+        type: 'shown.bs.tab',
+        relatedTarget: previous
+      })
+    })
+  }
+
+  Tab.prototype.activate = function (element, container, callback) {
+    var $active    = container.find('> .active')
+    var transition = callback
+      && $.support.transition
+      && $active.hasClass('fade')
+
+    function next() {
+      $active
+        .removeClass('active')
+        .find('> .dropdown-menu > .active')
+        .removeClass('active')
+
+      element.addClass('active')
+
+      if (transition) {
+        element[0].offsetWidth // reflow for transition
+        element.addClass('in')
+      } else {
+        element.removeClass('fade')
+      }
+
+      if (element.parent('.dropdown-menu')) {
+        element.closest('li.dropdown').addClass('active')
+      }
+
+      callback && callback()
+    }
+
+    transition ?
+      $active
+        .one($.support.transition.end, next)
+        .emulateTransitionEnd(150) :
+      next()
+
+    $active.removeClass('in')
+  }
+
+
+  // TAB PLUGIN DEFINITION
+  // =====================
+
+  var old = $.fn.tab
+
+  $.fn.tab = function ( option ) {
+    return this.each(function () {
+      var $this = $(this)
+      var data  = $this.data('bs.tab')
+
+      if (!data) $this.data('bs.tab', (data = new Tab(this)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.tab.Constructor = Tab
+
+
+  // TAB NO CONFLICT
+  // ===============
+
+  $.fn.tab.noConflict = function () {
+    $.fn.tab = old
+    return this
+  }
+
+
+  // TAB DATA-API
+  // ============
+
+  $(document).on('click.bs.tab.data-api', '[data-toggle="tab"], [data-toggle="pill"]', function (e) {
+    e.preventDefault()
+    $(this).tab('show')
+  })
+
+}(jQuery);
+
+/* ========================================================================
+ * Bootstrap: affix.js v3.1.0
+ * http://getbootstrap.com/javascript/#affix
+ * ========================================================================
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ * ======================================================================== */
+
+
++function ($) {
+  'use strict';
+
+  // AFFIX CLASS DEFINITION
+  // ======================
+
+  var Affix = function (element, options) {
+    this.options = $.extend({}, Affix.DEFAULTS, options)
+    this.$window = $(window)
+      .on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))
+      .on('click.bs.affix.data-api',  $.proxy(this.checkPositionWithEventLoop, this))
+
+    this.$element     = $(element)
+    this.affixed      =
+    this.unpin        =
+    this.pinnedOffset = null
+
+    this.checkPosition()
+  }
+
+  Affix.RESET = 'affix affix-top affix-bottom'
+
+  Affix.DEFAULTS = {
+    offset: 0
+  }
+
+  Affix.prototype.getPinnedOffset = function () {
+    if (this.pinnedOffset) return this.pinnedOffset
+    this.$element.removeClass(Affix.RESET).addClass('affix')
+    var scrollTop = this.$window.scrollTop()
+    var position  = this.$element.offset()
+    return (this.pinnedOffset = position.top - scrollTop)
+  }
+
+  Affix.prototype.checkPositionWithEventLoop = function () {
+    setTimeout($.proxy(this.checkPosition, this), 1)
+  }
+
+  Affix.prototype.checkPosition = function () {
+    if (!this.$element.is(':visible')) return
+
+    var scrollHeight = $(document).height()
+    var scrollTop    = this.$window.scrollTop()
+    var position     = this.$element.offset()
+    var offset       = this.options.offset
+    var offsetTop    = offset.top
+    var offsetBottom = offset.bottom
+
+    if (this.affixed == 'top') position.top += scrollTop
+
+    if (typeof offset != 'object')         offsetBottom = offsetTop = offset
+    if (typeof offsetTop == 'function')    offsetTop    = offset.top(this.$element)
+    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)
+
+    var affix = this.unpin   != null && (scrollTop + this.unpin <= position.top) ? false :
+                offsetBottom != null && (position.top + this.$element.height() >= scrollHeight - offsetBottom) ? 'bottom' :
+                offsetTop    != null && (scrollTop <= offsetTop) ? 'top' : false
+
+    if (this.affixed === affix) return
+    if (this.unpin) this.$element.css('top', '')
+
+    var affixType = 'affix' + (affix ? '-' + affix : '')
+    var e         = $.Event(affixType + '.bs.affix')
+
+    this.$element.trigger(e)
+
+    if (e.isDefaultPrevented()) return
+
+    this.affixed = affix
+    this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null
+
+    this.$element
+      .removeClass(Affix.RESET)
+      .addClass(affixType)
+      .trigger($.Event(affixType.replace('affix', 'affixed')))
+
+    if (affix == 'bottom') {
+      this.$element.offset({ top: scrollHeight - offsetBottom - this.$element.height() })
+    }
+  }
+
+
+  // AFFIX PLUGIN DEFINITION
+  // =======================
+
+  var old = $.fn.affix
+
+  $.fn.affix = function (option) {
+    return this.each(function () {
+      var $this   = $(this)
+      var data    = $this.data('bs.affix')
+      var options = typeof option == 'object' && option
+
+      if (!data) $this.data('bs.affix', (data = new Affix(this, options)))
+      if (typeof option == 'string') data[option]()
+    })
+  }
+
+  $.fn.affix.Constructor = Affix
+
+
+  // AFFIX NO CONFLICT
+  // =================
+
+  $.fn.affix.noConflict = function () {
+    $.fn.affix = old
+    return this
+  }
+
+
+  // AFFIX DATA-API
+  // ==============
+
+  $(window).on('load', function () {
+    $('[data-spy="affix"]').each(function () {
+      var $spy = $(this)
+      var data = $spy.data()
+
+      data.offset = data.offset || {}
+
+      if (data.offsetBottom) data.offset.bottom = data.offsetBottom
+      if (data.offsetTop)    data.offset.top    = data.offsetTop
+
+      $spy.affix(data)
+    })
+  })
+
+}(jQuery);
diff --git a/doc/js/bootstrap.min.js b/doc/js/bootstrap.min.js
new file mode 100644 (file)
index 0000000..1d4a4ed
--- /dev/null
@@ -0,0 +1,6 @@
+/*!
+ * Bootstrap v3.1.0 (http://getbootstrap.com)
+ * Copyright 2011-2014 Twitter, Inc.
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
+ */
+if("undefined"==typeof jQuery)throw new Error("Bootstrap requires jQuery");+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one(a.support.transition.end,function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b()})}(jQuery),+function(a){"use strict";var b='[data-dismiss="alert"]',c=function(c){a(c).on("click",b,this.close)};c.prototype.close=function(b){function c(){f.trigger("closed.bs.alert").remove()}var d=a(this),e=d.attr("data-target");e||(e=d.attr("href"),e=e&&e.replace(/.*(?=#[^\s]*$)/,""));var f=a(e);b&&b.preventDefault(),f.length||(f=d.hasClass("alert")?d:d.parent()),f.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one(a.support.transition.end,c).emulateTransitionEnd(150):c())};var d=a.fn.alert;a.fn.alert=function(b){return this.each(function(){var d=a(this),e=d.data("bs.alert");e||d.data("bs.alert",e=new c(this)),"string"==typeof b&&e[b].call(d)})},a.fn.alert.Constructor=c,a.fn.alert.noConflict=function(){return a.fn.alert=d,this},a(document).on("click.bs.alert.data-api",b,c.prototype.close)}(jQuery),+function(a){"use strict";var b=function(c,d){this.$element=a(c),this.options=a.extend({},b.DEFAULTS,d),this.isLoading=!1};b.DEFAULTS={loadingText:"loading..."},b.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",f.resetText||d.data("resetText",d[e]()),d[e](f[b]||this.options[b]),setTimeout(a.proxy(function(){"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c))},this),0)},b.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")&&(c.prop("checked")&&this.$element.hasClass("active")?a=!1:b.find(".active").removeClass("active")),a&&c.prop("checked",!this.$element.hasClass("active")).trigger("change")}a&&this.$element.toggleClass("active")};var c=a.fn.button;a.fn.button=function(c){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof c&&c;e||d.data("bs.button",e=new b(this,f)),"toggle"==c?e.toggle():c&&e.setState(c)})},a.fn.button.Constructor=b,a.fn.button.noConflict=function(){return a.fn.button=c,this},a(document).on("click.bs.button.data-api","[data-toggle^=button]",function(b){var c=a(b.target);c.hasClass("btn")||(c=c.closest(".btn")),c.button("toggle"),b.preventDefault()})}(jQuery),+function(a){"use strict";var b=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=this.sliding=this.interval=this.$active=this.$items=null,"hover"==this.options.pause&&this.$element.on("mouseenter",a.proxy(this.pause,this)).on("mouseleave",a.proxy(this.cycle,this))};b.DEFAULTS={interval:5e3,pause:"hover",wrap:!0},b.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},b.prototype.getActiveIndex=function(){return this.$active=this.$element.find(".item.active"),this.$items=this.$active.parent().children(),this.$items.index(this.$active)},b.prototype.to=function(b){var c=this,d=this.getActiveIndex();return b>this.$items.length-1||0>b?void 0:this.sliding?this.$element.one("slid.bs.carousel",function(){c.to(b)}):d==b?this.pause().cycle():this.slide(b>d?"next":"prev",a(this.$items[b]))},b.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},b.prototype.next=function(){return this.sliding?void 0:this.slide("next")},b.prototype.prev=function(){return this.sliding?void 0:this.slide("prev")},b.prototype.slide=function(b,c){var d=this.$element.find(".item.active"),e=c||d[b](),f=this.interval,g="next"==b?"left":"right",h="next"==b?"first":"last",i=this;if(!e.length){if(!this.options.wrap)return;e=this.$element.find(".item")[h]()}if(e.hasClass("active"))return this.sliding=!1;var j=a.Event("slide.bs.carousel",{relatedTarget:e[0],direction:g});return this.$element.trigger(j),j.isDefaultPrevented()?void 0:(this.sliding=!0,f&&this.pause(),this.$indicators.length&&(this.$indicators.find(".active").removeClass("active"),this.$element.one("slid.bs.carousel",function(){var b=a(i.$indicators.children()[i.getActiveIndex()]);b&&b.addClass("active")})),a.support.transition&&this.$element.hasClass("slide")?(e.addClass(b),e[0].offsetWidth,d.addClass(g),e.addClass(g),d.one(a.support.transition.end,function(){e.removeClass([b,g].join(" ")).addClass("active"),d.removeClass(["active",g].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger("slid.bs.carousel")},0)}).emulateTransitionEnd(1e3*d.css("transition-duration").slice(0,-1))):(d.removeClass("active"),e.addClass("active"),this.sliding=!1,this.$element.trigger("slid.bs.carousel")),f&&this.cycle(),this)};var c=a.fn.carousel;a.fn.carousel=function(c){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},b.DEFAULTS,d.data(),"object"==typeof c&&c),g="string"==typeof c?c:f.slide;e||d.data("bs.carousel",e=new b(this,f)),"number"==typeof c?e.to(c):g?e[g]():f.interval&&e.pause().cycle()})},a.fn.carousel.Constructor=b,a.fn.carousel.noConflict=function(){return a.fn.carousel=c,this},a(document).on("click.bs.carousel.data-api","[data-slide], [data-slide-to]",function(b){var c,d=a(this),e=a(d.attr("data-target")||(c=d.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"")),f=a.extend({},e.data(),d.data()),g=d.attr("data-slide-to");g&&(f.interval=!1),e.carousel(f),(g=d.attr("data-slide-to"))&&e.data("bs.carousel").to(g),b.preventDefault()}),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var b=a(this);b.carousel(b.data())})})}(jQuery),+function(a){"use strict";var b=function(c,d){this.$element=a(c),this.options=a.extend({},b.DEFAULTS,d),this.transitioning=null,this.options.parent&&(this.$parent=a(this.options.parent)),this.options.toggle&&this.toggle()};b.DEFAULTS={toggle:!0},b.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},b.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b=a.Event("show.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.$parent&&this.$parent.find("> .panel > .in");if(c&&c.length){var d=c.data("bs.collapse");if(d&&d.transitioning)return;c.collapse("hide"),d||c.data("bs.collapse",null)}var e=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[e](0),this.transitioning=1;var f=function(){this.$element.removeClass("collapsing").addClass("collapse in")[e]("auto"),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return f.call(this);var g=a.camelCase(["scroll",e].join("-"));this.$element.one(a.support.transition.end,a.proxy(f,this)).emulateTransitionEnd(350)[e](this.$element[0][g])}}},b.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse").removeClass("in"),this.transitioning=1;var d=function(){this.transitioning=0,this.$element.trigger("hidden.bs.collapse").removeClass("collapsing").addClass("collapse")};return a.support.transition?void this.$element[c](0).one(a.support.transition.end,a.proxy(d,this)).emulateTransitionEnd(350):d.call(this)}}},b.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()};var c=a.fn.collapse;a.fn.collapse=function(c){return this.each(function(){var d=a(this),e=d.data("bs.collapse"),f=a.extend({},b.DEFAULTS,d.data(),"object"==typeof c&&c);!e&&f.toggle&&"show"==c&&(c=!c),e||d.data("bs.collapse",e=new b(this,f)),"string"==typeof c&&e[c]()})},a.fn.collapse.Constructor=b,a.fn.collapse.noConflict=function(){return a.fn.collapse=c,this},a(document).on("click.bs.collapse.data-api","[data-toggle=collapse]",function(b){var c,d=a(this),e=d.attr("data-target")||b.preventDefault()||(c=d.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,""),f=a(e),g=f.data("bs.collapse"),h=g?"toggle":d.data(),i=d.attr("data-parent"),j=i&&a(i);g&&g.transitioning||(j&&j.find('[data-toggle=collapse][data-parent="'+i+'"]').not(d).addClass("collapsed"),d[f.hasClass("in")?"addClass":"removeClass"]("collapsed")),f.collapse(h)})}(jQuery),+function(a){"use strict";function b(b){a(d).remove(),a(e).each(function(){var d=c(a(this)),e={relatedTarget:this};d.hasClass("open")&&(d.trigger(b=a.Event("hide.bs.dropdown",e)),b.isDefaultPrevented()||d.removeClass("open").trigger("hidden.bs.dropdown",e))})}function c(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}var d=".dropdown-backdrop",e="[data-toggle=dropdown]",f=function(b){a(b).on("click.bs.dropdown",this.toggle)};f.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=c(e),g=f.hasClass("open");if(b(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a('<div class="dropdown-backdrop"/>').insertAfter(a(this)).on("click",b);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;f.toggleClass("open").trigger("shown.bs.dropdown",h),e.focus()}return!1}},f.prototype.keydown=function(b){if(/(38|40|27)/.test(b.keyCode)){var d=a(this);if(b.preventDefault(),b.stopPropagation(),!d.is(".disabled, :disabled")){var f=c(d),g=f.hasClass("open");if(!g||g&&27==b.keyCode)return 27==b.which&&f.find(e).focus(),d.click();var h=" li:not(.divider):visible a",i=f.find("[role=menu]"+h+", [role=listbox]"+h);if(i.length){var j=i.index(i.filter(":focus"));38==b.keyCode&&j>0&&j--,40==b.keyCode&&j<i.length-1&&j++,~j||(j=0),i.eq(j).focus()}}}};var g=a.fn.dropdown;a.fn.dropdown=function(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new f(this)),"string"==typeof b&&d[b].call(c)})},a.fn.dropdown.Constructor=f,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=g,this},a(document).on("click.bs.dropdown.data-api",b).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",e,f.prototype.toggle).on("keydown.bs.dropdown.data-api",e+", [role=menu], [role=listbox]",f.prototype.keydown)}(jQuery),+function(a){"use strict";var b=function(b,c){this.options=c,this.$element=a(b),this.$backdrop=this.isShown=null,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};b.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},b.prototype.toggle=function(a){return this[this.isShown?"hide":"show"](a)},b.prototype.show=function(b){var c=this,d=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(d),this.isShown||d.isDefaultPrevented()||(this.isShown=!0,this.escape(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.backdrop(function(){var d=a.support.transition&&c.$element.hasClass("fade");c.$element.parent().length||c.$element.appendTo(document.body),c.$element.show().scrollTop(0),d&&c.$element[0].offsetWidth,c.$element.addClass("in").attr("aria-hidden",!1),c.enforceFocus();var e=a.Event("shown.bs.modal",{relatedTarget:b});d?c.$element.find(".modal-dialog").one(a.support.transition.end,function(){c.$element.focus().trigger(e)}).emulateTransitionEnd(300):c.$element.focus().trigger(e)}))},b.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").attr("aria-hidden",!0).off("click.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one(a.support.transition.end,a.proxy(this.hideModal,this)).emulateTransitionEnd(300):this.hideModal())},b.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.focus()},this))},b.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keyup.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keyup.dismiss.bs.modal")},b.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.removeBackdrop(),a.$element.trigger("hidden.bs.modal")})},b.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},b.prototype.backdrop=function(b){var c=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var d=a.support.transition&&c;if(this.$backdrop=a('<div class="modal-backdrop '+c+'" />').appendTo(document.body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus.call(this.$element[0]):this.hide.call(this))},this)),d&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;d?this.$backdrop.one(a.support.transition.end,b).emulateTransitionEnd(150):b()}else!this.isShown&&this.$backdrop?(this.$backdrop.removeClass("in"),a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one(a.support.transition.end,b).emulateTransitionEnd(150):b()):b&&b()};var c=a.fn.modal;a.fn.modal=function(c,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},b.DEFAULTS,e.data(),"object"==typeof c&&c);f||e.data("bs.modal",f=new b(this,g)),"string"==typeof c?f[c](d):g.show&&f.show(d)})},a.fn.modal.Constructor=b,a.fn.modal.noConflict=function(){return a.fn.modal=c,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(b){var c=a(this),d=c.attr("href"),e=a(c.attr("data-target")||d&&d.replace(/.*(?=#[^\s]+$)/,"")),f=e.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(d)&&d},e.data(),c.data());c.is("a")&&b.preventDefault(),e.modal(f,this).one("hide",function(){c.is(":visible")&&c.focus()})}),a(document).on("show.bs.modal",".modal",function(){a(document.body).addClass("modal-open")}).on("hidden.bs.modal",".modal",function(){a(document.body).removeClass("modal-open")})}(jQuery),+function(a){"use strict";var b=function(a,b){this.type=this.options=this.enabled=this.timeout=this.hoverState=this.$element=null,this.init("tooltip",a,b)};b.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1},b.prototype.init=function(b,c,d){this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d);for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},b.prototype.getDefaults=function(){return b.DEFAULTS},b.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},b.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},b.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget)[this.type](this.getDelegateOptions()).data("bs."+this.type);return clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show()},b.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget)[this.type](this.getDelegateOptions()).data("bs."+this.type);return clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},b.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){if(this.$element.trigger(b),b.isDefaultPrevented())return;var c=this,d=this.tip();this.setContent(),this.options.animation&&d.addClass("fade");var e="function"==typeof this.options.placement?this.options.placement.call(this,d[0],this.$element[0]):this.options.placement,f=/\s?auto?\s?/i,g=f.test(e);g&&(e=e.replace(f,"")||"top"),d.detach().css({top:0,left:0,display:"block"}).addClass(e),this.options.container?d.appendTo(this.options.container):d.insertAfter(this.$element);var h=this.getPosition(),i=d[0].offsetWidth,j=d[0].offsetHeight;if(g){var k=this.$element.parent(),l=e,m=document.documentElement.scrollTop||document.body.scrollTop,n="body"==this.options.container?window.innerWidth:k.outerWidth(),o="body"==this.options.container?window.innerHeight:k.outerHeight(),p="body"==this.options.container?0:k.offset().left;e="bottom"==e&&h.top+h.height+j-m>o?"top":"top"==e&&h.top-m-j<0?"bottom":"right"==e&&h.right+i>n?"left":"left"==e&&h.left-i<p?"right":e,d.removeClass(l).addClass(e)}var q=this.getCalculatedOffset(e,h,i,j);this.applyPlacement(q,e),this.hoverState=null;var r=function(){c.$element.trigger("shown.bs."+c.type)};a.support.transition&&this.$tip.hasClass("fade")?d.one(a.support.transition.end,r).emulateTransitionEnd(150):r()}},b.prototype.applyPlacement=function(b,c){var d,e=this.tip(),f=e[0].offsetWidth,g=e[0].offsetHeight,h=parseInt(e.css("margin-top"),10),i=parseInt(e.css("margin-left"),10);isNaN(h)&&(h=0),isNaN(i)&&(i=0),b.top=b.top+h,b.left=b.left+i,a.offset.setOffset(e[0],a.extend({using:function(a){e.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),e.addClass("in");var j=e[0].offsetWidth,k=e[0].offsetHeight;if("top"==c&&k!=g&&(d=!0,b.top=b.top+g-k),/bottom|top/.test(c)){var l=0;b.left<0&&(l=-2*b.left,b.left=0,e.offset(b),j=e[0].offsetWidth,k=e[0].offsetHeight),this.replaceArrow(l-f+j,j,"left")}else this.replaceArrow(k-g,k,"top");d&&e.offset(b)},b.prototype.replaceArrow=function(a,b,c){this.arrow().css(c,a?50*(1-a/b)+"%":"")},b.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},b.prototype.hide=function(){function b(){"in"!=c.hoverState&&d.detach(),c.$element.trigger("hidden.bs."+c.type)}var c=this,d=this.tip(),e=a.Event("hide.bs."+this.type);return this.$element.trigger(e),e.isDefaultPrevented()?void 0:(d.removeClass("in"),a.support.transition&&this.$tip.hasClass("fade")?d.one(a.support.transition.end,b).emulateTransitionEnd(150):b(),this.hoverState=null,this)},b.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},b.prototype.hasContent=function(){return this.getTitle()},b.prototype.getPosition=function(){var b=this.$element[0];return a.extend({},"function"==typeof b.getBoundingClientRect?b.getBoundingClientRect():{width:b.offsetWidth,height:b.offsetHeight},this.$element.offset())},b.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},b.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},b.prototype.tip=function(){return this.$tip=this.$tip||a(this.options.template)},b.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},b.prototype.validate=function(){this.$element[0].parentNode||(this.hide(),this.$element=null,this.options=null)},b.prototype.enable=function(){this.enabled=!0},b.prototype.disable=function(){this.enabled=!1},b.prototype.toggleEnabled=function(){this.enabled=!this.enabled},b.prototype.toggle=function(b){var c=b?a(b.currentTarget)[this.type](this.getDelegateOptions()).data("bs."+this.type):this;c.tip().hasClass("in")?c.leave(c):c.enter(c)},b.prototype.destroy=function(){clearTimeout(this.timeout),this.hide().$element.off("."+this.type).removeData("bs."+this.type)};var c=a.fn.tooltip;a.fn.tooltip=function(c){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof c&&c;(e||"destroy"!=c)&&(e||d.data("bs.tooltip",e=new b(this,f)),"string"==typeof c&&e[c]())})},a.fn.tooltip.Constructor=b,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=c,this}}(jQuery),+function(a){"use strict";var b=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");b.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),b.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),b.prototype.constructor=b,b.prototype.getDefaults=function(){return b.DEFAULTS},b.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content")[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},b.prototype.hasContent=function(){return this.getTitle()||this.getContent()},b.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},b.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")},b.prototype.tip=function(){return this.$tip||(this.$tip=a(this.options.template)),this.$tip};var c=a.fn.popover;a.fn.popover=function(c){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof c&&c;(e||"destroy"!=c)&&(e||d.data("bs.popover",e=new b(this,f)),"string"==typeof c&&e[c]())})},a.fn.popover.Constructor=b,a.fn.popover.noConflict=function(){return a.fn.popover=c,this}}(jQuery),+function(a){"use strict";function b(c,d){var e,f=a.proxy(this.process,this);this.$element=a(a(c).is("body")?window:c),this.$body=a("body"),this.$scrollElement=this.$element.on("scroll.bs.scroll-spy.data-api",f),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||(e=a(c).attr("href"))&&e.replace(/.*(?=#[^\s]+$)/,"")||"")+" .nav li > a",this.offsets=a([]),this.targets=a([]),this.activeTarget=null,this.refresh(),this.process()}b.DEFAULTS={offset:10},b.prototype.refresh=function(){var b=this.$element[0]==window?"offset":"position";this.offsets=a([]),this.targets=a([]);{var c=this;this.$body.find(this.selector).map(function(){var d=a(this),e=d.data("target")||d.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[b]().top+(!a.isWindow(c.$scrollElement.get(0))&&c.$scrollElement.scrollTop()),e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){c.offsets.push(this[0]),c.targets.push(this[1])})}},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.$scrollElement[0].scrollHeight||this.$body[0].scrollHeight,d=c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(b>=d)return g!=(a=f.last()[0])&&this.activate(a);if(g&&b<=e[0])return g!=(a=f[0])&&this.activate(a);for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(!e[a+1]||b<=e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){this.activeTarget=b,a(this.selector).parentsUntil(this.options.target,".active").removeClass("active");var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")};var c=a.fn.scrollspy;a.fn.scrollspy=function(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})},a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=c,this},a(window).on("load",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);b.scrollspy(b.data())})})}(jQuery),+function(a){"use strict";var b=function(b){this.element=a(b)};b.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a")[0],f=a.Event("show.bs.tab",{relatedTarget:e});if(b.trigger(f),!f.isDefaultPrevented()){var g=a(d);this.activate(b.parent("li"),c),this.activate(g,g.parent(),function(){b.trigger({type:"shown.bs.tab",relatedTarget:e})})}}},b.prototype.activate=function(b,c,d){function e(){f.removeClass("active").find("> .dropdown-menu > .active").removeClass("active"),b.addClass("active"),g?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu")&&b.closest("li.dropdown").addClass("active"),d&&d()}var f=c.find("> .active"),g=d&&a.support.transition&&f.hasClass("fade");g?f.one(a.support.transition.end,e).emulateTransitionEnd(150):e(),f.removeClass("in")};var c=a.fn.tab;a.fn.tab=function(c){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new b(this)),"string"==typeof c&&e[c]()})},a.fn.tab.Constructor=b,a.fn.tab.noConflict=function(){return a.fn.tab=c,this},a(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"]',function(b){b.preventDefault(),a(this).tab("show")})}(jQuery),+function(a){"use strict";var b=function(c,d){this.options=a.extend({},b.DEFAULTS,d),this.$window=a(window).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(c),this.affixed=this.unpin=this.pinnedOffset=null,this.checkPosition()};b.RESET="affix affix-top affix-bottom",b.DEFAULTS={offset:0},b.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(b.RESET).addClass("affix");var a=this.$window.scrollTop(),c=this.$element.offset();return this.pinnedOffset=c.top-a},b.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},b.prototype.checkPosition=function(){if(this.$element.is(":visible")){var c=a(document).height(),d=this.$window.scrollTop(),e=this.$element.offset(),f=this.options.offset,g=f.top,h=f.bottom;"top"==this.affixed&&(e.top+=d),"object"!=typeof f&&(h=g=f),"function"==typeof g&&(g=f.top(this.$element)),"function"==typeof h&&(h=f.bottom(this.$element));var i=null!=this.unpin&&d+this.unpin<=e.top?!1:null!=h&&e.top+this.$element.height()>=c-h?"bottom":null!=g&&g>=d?"top":!1;if(this.affixed!==i){this.unpin&&this.$element.css("top","");var j="affix"+(i?"-"+i:""),k=a.Event(j+".bs.affix");this.$element.trigger(k),k.isDefaultPrevented()||(this.affixed=i,this.unpin="bottom"==i?this.getPinnedOffset():null,this.$element.removeClass(b.RESET).addClass(j).trigger(a.Event(j.replace("affix","affixed"))),"bottom"==i&&this.$element.offset({top:c-h-this.$element.height()}))}}};var c=a.fn.affix;a.fn.affix=function(c){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof c&&c;e||d.data("bs.affix",e=new b(this,f)),"string"==typeof c&&e[c]()})},a.fn.affix.Constructor=b,a.fn.affix.noConflict=function(){return a.fn.affix=c,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var b=a(this),c=b.data();c.offset=c.offset||{},c.offsetBottom&&(c.offset.bottom=c.offsetBottom),c.offsetTop&&(c.offset.top=c.offsetTop),b.affix(c)})})}(jQuery);
\ No newline at end of file
diff --git a/doc/js/jquery.min.js b/doc/js/jquery.min.js
new file mode 100644 (file)
index 0000000..006e953
--- /dev/null
@@ -0,0 +1,5 @@
+/*! jQuery v1.9.1 | (c) 2005, 2012 jQuery Foundation, Inc. | jquery.org/license
+//@ sourceMappingURL=jquery.min.map
+*/(function(e,t){var n,r,i=typeof t,o=e.document,a=e.location,s=e.jQuery,u=e.$,l={},c=[],p="1.9.1",f=c.concat,d=c.push,h=c.slice,g=c.indexOf,m=l.toString,y=l.hasOwnProperty,v=p.trim,b=function(e,t){return new b.fn.init(e,t,r)},x=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,w=/\S+/g,T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,k=/^[\],:{}\s]*$/,E=/(?:^|:|,)(?:\s*\[)+/g,S=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,A=/"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g,j=/^-ms-/,D=/-([\da-z])/gi,L=function(e,t){return t.toUpperCase()},H=function(e){(o.addEventListener||"load"===e.type||"complete"===o.readyState)&&(q(),b.ready())},q=function(){o.addEventListener?(o.removeEventListener("DOMContentLoaded",H,!1),e.removeEventListener("load",H,!1)):(o.detachEvent("onreadystatechange",H),e.detachEvent("onload",H))};b.fn=b.prototype={jquery:p,constructor:b,init:function(e,n,r){var i,a;if(!e)return this;if("string"==typeof e){if(i="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&&e.length>=3?[null,e,null]:N.exec(e),!i||!i[1]&&n)return!n||n.jquery?(n||r).find(e):this.constructor(n).find(e);if(i[1]){if(n=n instanceof b?n[0]:n,b.merge(this,b.parseHTML(i[1],n&&n.nodeType?n.ownerDocument||n:o,!0)),C.test(i[1])&&b.isPlainObject(n))for(i in n)b.isFunction(this[i])?this[i](n[i]):this.attr(i,n[i]);return this}if(a=o.getElementById(i[2]),a&&a.parentNode){if(a.id!==i[2])return r.find(e);this.length=1,this[0]=a}return this.context=o,this.selector=e,this}return e.nodeType?(this.context=this[0]=e,this.length=1,this):b.isFunction(e)?r.ready(e):(e.selector!==t&&(this.selector=e.selector,this.context=e.context),b.makeArray(e,this))},selector:"",length:0,size:function(){return this.length},toArray:function(){return h.call(this)},get:function(e){return null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var t=b.merge(this.constructor(),e);return t.prevObject=this,t.context=this.context,t},each:function(e,t){return b.each(this,e,t)},ready:function(e){return b.ready.promise().done(e),this},slice:function(){return this.pushStack(h.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(0>e?t:0);return this.pushStack(n>=0&&t>n?[this[n]]:[])},map:function(e){return this.pushStack(b.map(this,function(t,n){return e.call(t,n,t)}))},end:function(){return this.prevObject||this.constructor(null)},push:d,sort:[].sort,splice:[].splice},b.fn.init.prototype=b.fn,b.extend=b.fn.extend=function(){var e,n,r,i,o,a,s=arguments[0]||{},u=1,l=arguments.length,c=!1;for("boolean"==typeof s&&(c=s,s=arguments[1]||{},u=2),"object"==typeof s||b.isFunction(s)||(s={}),l===u&&(s=this,--u);l>u;u++)if(null!=(o=arguments[u]))for(i in o)e=s[i],r=o[i],s!==r&&(c&&r&&(b.isPlainObject(r)||(n=b.isArray(r)))?(n?(n=!1,a=e&&b.isArray(e)?e:[]):a=e&&b.isPlainObject(e)?e:{},s[i]=b.extend(c,a,r)):r!==t&&(s[i]=r));return s},b.extend({noConflict:function(t){return e.$===b&&(e.$=u),t&&e.jQuery===b&&(e.jQuery=s),b},isReady:!1,readyWait:1,holdReady:function(e){e?b.readyWait++:b.ready(!0)},ready:function(e){if(e===!0?!--b.readyWait:!b.isReady){if(!o.body)return setTimeout(b.ready);b.isReady=!0,e!==!0&&--b.readyWait>0||(n.resolveWith(o,[b]),b.fn.trigger&&b(o).trigger("ready").off("ready"))}},isFunction:function(e){return"function"===b.type(e)},isArray:Array.isArray||function(e){return"array"===b.type(e)},isWindow:function(e){return null!=e&&e==e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[m.call(e)]||"object":typeof e},isPlainObject:function(e){if(!e||"object"!==b.type(e)||e.nodeType||b.isWindow(e))return!1;try{if(e.constructor&&!y.call(e,"constructor")&&!y.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(n){return!1}var r;for(r in e);return r===t||y.call(e,r)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},error:function(e){throw Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return null;"boolean"==typeof t&&(n=t,t=!1),t=t||o;var r=C.exec(e),i=!n&&[];return r?[t.createElement(r[1])]:(r=b.buildFragment([e],t,i),i&&b(i).remove(),b.merge([],r.childNodes))},parseJSON:function(n){return e.JSON&&e.JSON.parse?e.JSON.parse(n):null===n?n:"string"==typeof n&&(n=b.trim(n),n&&k.test(n.replace(S,"@").replace(A,"]").replace(E,"")))?Function("return "+n)():(b.error("Invalid JSON: "+n),t)},parseXML:function(n){var r,i;if(!n||"string"!=typeof n)return null;try{e.DOMParser?(i=new DOMParser,r=i.parseFromString(n,"text/xml")):(r=new ActiveXObject("Microsoft.XMLDOM"),r.async="false",r.loadXML(n))}catch(o){r=t}return r&&r.documentElement&&!r.getElementsByTagName("parsererror").length||b.error("Invalid XML: "+n),r},noop:function(){},globalEval:function(t){t&&b.trim(t)&&(e.execScript||function(t){e.eval.call(e,t)})(t)},camelCase:function(e){return e.replace(j,"ms-").replace(D,L)},nodeName:function(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()},each:function(e,t,n){var r,i=0,o=e.length,a=M(e);if(n){if(a){for(;o>i;i++)if(r=t.apply(e[i],n),r===!1)break}else for(i in e)if(r=t.apply(e[i],n),r===!1)break}else if(a){for(;o>i;i++)if(r=t.call(e[i],i,e[i]),r===!1)break}else for(i in e)if(r=t.call(e[i],i,e[i]),r===!1)break;return e},trim:v&&!v.call("\ufeff\u00a0")?function(e){return null==e?"":v.call(e)}:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(M(Object(e))?b.merge(n,"string"==typeof e?[e]:e):d.call(n,e)),n},inArray:function(e,t,n){var r;if(t){if(g)return g.call(t,e,n);for(r=t.length,n=n?0>n?Math.max(0,r+n):n:0;r>n;n++)if(n in t&&t[n]===e)return n}return-1},merge:function(e,n){var r=n.length,i=e.length,o=0;if("number"==typeof r)for(;r>o;o++)e[i++]=n[o];else while(n[o]!==t)e[i++]=n[o++];return e.length=i,e},grep:function(e,t,n){var r,i=[],o=0,a=e.length;for(n=!!n;a>o;o++)r=!!t(e[o],o),n!==r&&i.push(e[o]);return i},map:function(e,t,n){var r,i=0,o=e.length,a=M(e),s=[];if(a)for(;o>i;i++)r=t(e[i],i,n),null!=r&&(s[s.length]=r);else for(i in e)r=t(e[i],i,n),null!=r&&(s[s.length]=r);return f.apply([],s)},guid:1,proxy:function(e,n){var r,i,o;return"string"==typeof n&&(o=e[n],n=e,e=o),b.isFunction(e)?(r=h.call(arguments,2),i=function(){return e.apply(n||this,r.concat(h.call(arguments)))},i.guid=e.guid=e.guid||b.guid++,i):t},access:function(e,n,r,i,o,a,s){var u=0,l=e.length,c=null==r;if("object"===b.type(r)){o=!0;for(u in r)b.access(e,n,u,r[u],!0,a,s)}else if(i!==t&&(o=!0,b.isFunction(i)||(s=!0),c&&(s?(n.call(e,i),n=null):(c=n,n=function(e,t,n){return c.call(b(e),n)})),n))for(;l>u;u++)n(e[u],r,s?i:i.call(e[u],u,n(e[u],r)));return o?e:c?n.call(e):l?n(e[0],r):a},now:function(){return(new Date).getTime()}}),b.ready.promise=function(t){if(!n)if(n=b.Deferred(),"complete"===o.readyState)setTimeout(b.ready);else if(o.addEventListener)o.addEventListener("DOMContentLoaded",H,!1),e.addEventListener("load",H,!1);else{o.attachEvent("onreadystatechange",H),e.attachEvent("onload",H);var r=!1;try{r=null==e.frameElement&&o.documentElement}catch(i){}r&&r.doScroll&&function a(){if(!b.isReady){try{r.doScroll("left")}catch(e){return setTimeout(a,50)}q(),b.ready()}}()}return n.promise(t)},b.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function M(e){var t=e.length,n=b.type(e);return b.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===n||"function"!==n&&(0===t||"number"==typeof t&&t>0&&t-1 in e)}r=b(o);var _={};function F(e){var t=_[e]={};return b.each(e.match(w)||[],function(e,n){t[n]=!0}),t}b.Callbacks=function(e){e="string"==typeof e?_[e]||F(e):b.extend({},e);var n,r,i,o,a,s,u=[],l=!e.once&&[],c=function(t){for(r=e.memory&&t,i=!0,a=s||0,s=0,o=u.length,n=!0;u&&o>a;a++)if(u[a].apply(t[0],t[1])===!1&&e.stopOnFalse){r=!1;break}n=!1,u&&(l?l.length&&c(l.shift()):r?u=[]:p.disable())},p={add:function(){if(u){var t=u.length;(function i(t){b.each(t,function(t,n){var r=b.type(n);"function"===r?e.unique&&p.has(n)||u.push(n):n&&n.length&&"string"!==r&&i(n)})})(arguments),n?o=u.length:r&&(s=t,c(r))}return this},remove:function(){return u&&b.each(arguments,function(e,t){var r;while((r=b.inArray(t,u,r))>-1)u.splice(r,1),n&&(o>=r&&o--,a>=r&&a--)}),this},has:function(e){return e?b.inArray(e,u)>-1:!(!u||!u.length)},empty:function(){return u=[],this},disable:function(){return u=l=r=t,this},disabled:function(){return!u},lock:function(){return l=t,r||p.disable(),this},locked:function(){return!l},fireWith:function(e,t){return t=t||[],t=[e,t.slice?t.slice():t],!u||i&&!l||(n?l.push(t):c(t)),this},fire:function(){return p.fireWith(this,arguments),this},fired:function(){return!!i}};return p},b.extend({Deferred:function(e){var t=[["resolve","done",b.Callbacks("once memory"),"resolved"],["reject","fail",b.Callbacks("once memory"),"rejected"],["notify","progress",b.Callbacks("memory")]],n="pending",r={state:function(){return n},always:function(){return i.done(arguments).fail(arguments),this},then:function(){var e=arguments;return b.Deferred(function(n){b.each(t,function(t,o){var a=o[0],s=b.isFunction(e[t])&&e[t];i[o[1]](function(){var e=s&&s.apply(this,arguments);e&&b.isFunction(e.promise)?e.promise().done(n.resolve).fail(n.reject).progress(n.notify):n[a+"With"](this===r?n.promise():this,s?[e]:arguments)})}),e=null}).promise()},promise:function(e){return null!=e?b.extend(e,r):r}},i={};return r.pipe=r.then,b.each(t,function(e,o){var a=o[2],s=o[3];r[o[1]]=a.add,s&&a.add(function(){n=s},t[1^e][2].disable,t[2][2].lock),i[o[0]]=function(){return i[o[0]+"With"](this===i?r:this,arguments),this},i[o[0]+"With"]=a.fireWith}),r.promise(i),e&&e.call(i,i),i},when:function(e){var t=0,n=h.call(arguments),r=n.length,i=1!==r||e&&b.isFunction(e.promise)?r:0,o=1===i?e:b.Deferred(),a=function(e,t,n){return function(r){t[e]=this,n[e]=arguments.length>1?h.call(arguments):r,n===s?o.notifyWith(t,n):--i||o.resolveWith(t,n)}},s,u,l;if(r>1)for(s=Array(r),u=Array(r),l=Array(r);r>t;t++)n[t]&&b.isFunction(n[t].promise)?n[t].promise().done(a(t,l,n)).fail(o.reject).progress(a(t,u,s)):--i;return i||o.resolveWith(l,n),o.promise()}}),b.support=function(){var t,n,r,a,s,u,l,c,p,f,d=o.createElement("div");if(d.setAttribute("className","t"),d.innerHTML="  <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",n=d.getElementsByTagName("*"),r=d.getElementsByTagName("a")[0],!n||!r||!n.length)return{};s=o.createElement("select"),l=s.appendChild(o.createElement("option")),a=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t={getSetAttribute:"t"!==d.className,leadingWhitespace:3===d.firstChild.nodeType,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/top/.test(r.getAttribute("style")),hrefNormalized:"/a"===r.getAttribute("href"),opacity:/^0.5/.test(r.style.opacity),cssFloat:!!r.style.cssFloat,checkOn:!!a.value,optSelected:l.selected,enctype:!!o.createElement("form").enctype,html5Clone:"<:nav></:nav>"!==o.createElement("nav").cloneNode(!0).outerHTML,boxModel:"CSS1Compat"===o.compatMode,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,boxSizingReliable:!0,pixelPosition:!1},a.checked=!0,t.noCloneChecked=a.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!l.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}a=o.createElement("input"),a.setAttribute("value",""),t.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),t.radioValue="t"===a.value,a.setAttribute("checked","t"),a.setAttribute("name","t"),u=o.createDocumentFragment(),u.appendChild(a),t.appendChecked=a.checked,t.checkClone=u.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.cloneNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;return d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip,b(function(){var n,r,a,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",u=o.getElementsByTagName("body")[0];u&&(n=o.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",u.appendChild(n).appendChild(d),d.innerHTML="<table><tr><td></td><td>t</td></tr></table>",a=d.getElementsByTagName("td"),a[0].style.cssText="padding:0;margin:0;border:0;display:none",p=0===a[0].offsetHeight,a[0].style.display="",a[1].style.display="none",t.reliableHiddenOffsets=p&&0===a[0].offsetHeight,d.innerHTML="",d.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;",t.boxSizing=4===d.offsetWidth,t.doesNotIncludeMarginInBodyOffset=1!==u.offsetTop,e.getComputedStyle&&(t.pixelPosition="1%"!==(e.getComputedStyle(d,null)||{}).top,t.boxSizingReliable="4px"===(e.getComputedStyle(d,null)||{width:"4px"}).width,r=d.appendChild(o.createElement("div")),r.style.cssText=d.style.cssText=s,r.style.marginRight=r.style.width="0",d.style.width="1px",t.reliableMarginRight=!parseFloat((e.getComputedStyle(r,null)||{}).marginRight)),typeof d.style.zoom!==i&&(d.innerHTML="",d.style.cssText=s+"width:1px;padding:1px;display:inline;zoom:1",t.inlineBlockNeedsLayout=3===d.offsetWidth,d.style.display="block",d.innerHTML="<div></div>",d.firstChild.style.width="5px",t.shrinkWrapBlocks=3!==d.offsetWidth,t.inlineBlockNeedsLayout&&(u.style.zoom=1)),u.removeChild(n),n=d=a=r=null)}),n=s=u=l=r=a=null,t}();var O=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,B=/([A-Z])/g;function P(e,n,r,i){if(b.acceptData(e)){var o,a,s=b.expando,u="string"==typeof n,l=e.nodeType,p=l?b.cache:e,f=l?e[s]:e[s]&&s;if(f&&p[f]&&(i||p[f].data)||!u||r!==t)return f||(l?e[s]=f=c.pop()||b.guid++:f=s),p[f]||(p[f]={},l||(p[f].toJSON=b.noop)),("object"==typeof n||"function"==typeof n)&&(i?p[f]=b.extend(p[f],n):p[f].data=b.extend(p[f].data,n)),o=p[f],i||(o.data||(o.data={}),o=o.data),r!==t&&(o[b.camelCase(n)]=r),u?(a=o[n],null==a&&(a=o[b.camelCase(n)])):a=o,a}}function R(e,t,n){if(b.acceptData(e)){var r,i,o,a=e.nodeType,s=a?b.cache:e,u=a?e[b.expando]:b.expando;if(s[u]){if(t&&(o=n?s[u]:s[u].data)){b.isArray(t)?t=t.concat(b.map(t,b.camelCase)):t in o?t=[t]:(t=b.camelCase(t),t=t in o?[t]:t.split(" "));for(r=0,i=t.length;i>r;r++)delete o[t[r]];if(!(n?$:b.isEmptyObject)(o))return}(n||(delete s[u].data,$(s[u])))&&(a?b.cleanData([e],!0):b.support.deleteExpando||s!=s.window?delete s[u]:s[u]=null)}}}b.extend({cache:{},expando:"jQuery"+(p+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(e){return e=e.nodeType?b.cache[e[b.expando]]:e[b.expando],!!e&&!$(e)},data:function(e,t,n){return P(e,t,n)},removeData:function(e,t){return R(e,t)},_data:function(e,t,n){return P(e,t,n,!0)},_removeData:function(e,t){return R(e,t,!0)},acceptData:function(e){if(e.nodeType&&1!==e.nodeType&&9!==e.nodeType)return!1;var t=e.nodeName&&b.noData[e.nodeName.toLowerCase()];return!t||t!==!0&&e.getAttribute("classid")===t}}),b.fn.extend({data:function(e,n){var r,i,o=this[0],a=0,s=null;if(e===t){if(this.length&&(s=b.data(o),1===o.nodeType&&!b._data(o,"parsedAttrs"))){for(r=o.attributes;r.length>a;a++)i=r[a].name,i.indexOf("data-")||(i=b.camelCase(i.slice(5)),W(o,i,s[i]));b._data(o,"parsedAttrs",!0)}return s}return"object"==typeof e?this.each(function(){b.data(this,e)}):b.access(this,function(n){return n===t?o?W(o,e,b.data(o,e)):null:(this.each(function(){b.data(this,e,n)}),t)},null,n,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){b.removeData(this,e)})}});function W(e,n,r){if(r===t&&1===e.nodeType){var i="data-"+n.replace(B,"-$1").toLowerCase();if(r=e.getAttribute(i),"string"==typeof r){try{r="true"===r?!0:"false"===r?!1:"null"===r?null:+r+""===r?+r:O.test(r)?b.parseJSON(r):r}catch(o){}b.data(e,n,r)}else r=t}return r}function $(e){var t;for(t in e)if(("data"!==t||!b.isEmptyObject(e[t]))&&"toJSON"!==t)return!1;return!0}b.extend({queue:function(e,n,r){var i;return e?(n=(n||"fx")+"queue",i=b._data(e,n),r&&(!i||b.isArray(r)?i=b._data(e,n,b.makeArray(r)):i.push(r)),i||[]):t},dequeue:function(e,t){t=t||"fx";var n=b.queue(e,t),r=n.length,i=n.shift(),o=b._queueHooks(e,t),a=function(){b.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),o.cur=i,i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return b._data(e,n)||b._data(e,n,{empty:b.Callbacks("once memory").add(function(){b._removeData(e,t+"queue"),b._removeData(e,n)})})}}),b.fn.extend({queue:function(e,n){var r=2;return"string"!=typeof e&&(n=e,e="fx",r--),r>arguments.length?b.queue(this[0],e):n===t?this:this.each(function(){var t=b.queue(this,e,n);b._queueHooks(this,e),"fx"===e&&"inprogress"!==t[0]&&b.dequeue(this,e)})},dequeue:function(e){return this.each(function(){b.dequeue(this,e)})},delay:function(e,t){return e=b.fx?b.fx.speeds[e]||e:e,t=t||"fx",this.queue(t,function(t,n){var r=setTimeout(t,e);n.stop=function(){clearTimeout(r)}})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,n){var r,i=1,o=b.Deferred(),a=this,s=this.length,u=function(){--i||o.resolveWith(a,[a])};"string"!=typeof e&&(n=e,e=t),e=e||"fx";while(s--)r=b._data(a[s],e+"queueHooks"),r&&r.empty&&(i++,r.empty.add(u));return u(),o.promise(n)}});var I,z,X=/[\t\r\n]/g,U=/\r/g,V=/^(?:input|select|textarea|button|object)$/i,Y=/^(?:a|area)$/i,J=/^(?:checked|selected|autofocus|autoplay|async|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped)$/i,G=/^(?:checked|selected)$/i,Q=b.support.getSetAttribute,K=b.support.input;b.fn.extend({attr:function(e,t){return b.access(this,b.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){b.removeAttr(this,e)})},prop:function(e,t){return b.access(this,b.prop,e,t,arguments.length>1)},removeProp:function(e){return e=b.propFix[e]||e,this.each(function(){try{this[e]=t,delete this[e]}catch(n){}})},addClass:function(e){var t,n,r,i,o,a=0,s=this.length,u="string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).addClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):" ")){o=0;while(i=t[o++])0>r.indexOf(" "+i+" ")&&(r+=i+" ");n.className=b.trim(r)}return this},removeClass:function(e){var t,n,r,i,o,a=0,s=this.length,u=0===arguments.length||"string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).removeClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):"")){o=0;while(i=t[o++])while(r.indexOf(" "+i+" ")>=0)r=r.replace(" "+i+" "," ");n.className=e?b.trim(r):""}return this},toggleClass:function(e,t){var n=typeof e,r="boolean"==typeof t;return b.isFunction(e)?this.each(function(n){b(this).toggleClass(e.call(this,n,this.className,t),t)}):this.each(function(){if("string"===n){var o,a=0,s=b(this),u=t,l=e.match(w)||[];while(o=l[a++])u=r?u:!s.hasClass(o),s[u?"addClass":"removeClass"](o)}else(n===i||"boolean"===n)&&(this.className&&b._data(this,"__className__",this.className),this.className=this.className||e===!1?"":b._data(this,"__className__")||"")})},hasClass:function(e){var t=" "+e+" ",n=0,r=this.length;for(;r>n;n++)if(1===this[n].nodeType&&(" "+this[n].className+" ").replace(X," ").indexOf(t)>=0)return!0;return!1},val:function(e){var n,r,i,o=this[0];{if(arguments.length)return i=b.isFunction(e),this.each(function(n){var o,a=b(this);1===this.nodeType&&(o=i?e.call(this,n,a.val()):e,null==o?o="":"number"==typeof o?o+="":b.isArray(o)&&(o=b.map(o,function(e){return null==e?"":e+""})),r=b.valHooks[this.type]||b.valHooks[this.nodeName.toLowerCase()],r&&"set"in r&&r.set(this,o,"value")!==t||(this.value=o))});if(o)return r=b.valHooks[o.type]||b.valHooks[o.nodeName.toLowerCase()],r&&"get"in r&&(n=r.get(o,"value"))!==t?n:(n=o.value,"string"==typeof n?n.replace(U,""):null==n?"":n)}}}),b.extend({valHooks:{option:{get:function(e){var t=e.attributes.value;return!t||t.specified?e.value:e.text}},select:{get:function(e){var t,n,r=e.options,i=e.selectedIndex,o="select-one"===e.type||0>i,a=o?null:[],s=o?i+1:r.length,u=0>i?s:o?i:0;for(;s>u;u++)if(n=r[u],!(!n.selected&&u!==i||(b.support.optDisabled?n.disabled:null!==n.getAttribute("disabled"))||n.parentNode.disabled&&b.nodeName(n.parentNode,"optgroup"))){if(t=b(n).val(),o)return t;a.push(t)}return a},set:function(e,t){var n=b.makeArray(t);return b(e).find("option").each(function(){this.selected=b.inArray(b(this).val(),n)>=0}),n.length||(e.selectedIndex=-1),n}}},attr:function(e,n,r){var o,a,s,u=e.nodeType;if(e&&3!==u&&8!==u&&2!==u)return typeof e.getAttribute===i?b.prop(e,n,r):(a=1!==u||!b.isXMLDoc(e),a&&(n=n.toLowerCase(),o=b.attrHooks[n]||(J.test(n)?z:I)),r===t?o&&a&&"get"in o&&null!==(s=o.get(e,n))?s:(typeof e.getAttribute!==i&&(s=e.getAttribute(n)),null==s?t:s):null!==r?o&&a&&"set"in o&&(s=o.set(e,r,n))!==t?s:(e.setAttribute(n,r+""),r):(b.removeAttr(e,n),t))},removeAttr:function(e,t){var n,r,i=0,o=t&&t.match(w);if(o&&1===e.nodeType)while(n=o[i++])r=b.propFix[n]||n,J.test(n)?!Q&&G.test(n)?e[b.camelCase("default-"+n)]=e[r]=!1:e[r]=!1:b.attr(e,n,""),e.removeAttribute(Q?n:r)},attrHooks:{type:{set:function(e,t){if(!b.support.radioValue&&"radio"===t&&b.nodeName(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(e,n,r){var i,o,a,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return a=1!==s||!b.isXMLDoc(e),a&&(n=b.propFix[n]||n,o=b.propHooks[n]),r!==t?o&&"set"in o&&(i=o.set(e,r,n))!==t?i:e[n]=r:o&&"get"in o&&null!==(i=o.get(e,n))?i:e[n]},propHooks:{tabIndex:{get:function(e){var n=e.getAttributeNode("tabindex");return n&&n.specified?parseInt(n.value,10):V.test(e.nodeName)||Y.test(e.nodeName)&&e.href?0:t}}}}),z={get:function(e,n){var r=b.prop(e,n),i="boolean"==typeof r&&e.getAttribute(n),o="boolean"==typeof r?K&&Q?null!=i:G.test(n)?e[b.camelCase("default-"+n)]:!!i:e.getAttributeNode(n);return o&&o.value!==!1?n.toLowerCase():t},set:function(e,t,n){return t===!1?b.removeAttr(e,n):K&&Q||!G.test(n)?e.setAttribute(!Q&&b.propFix[n]||n,n):e[b.camelCase("default-"+n)]=e[n]=!0,n}},K&&Q||(b.attrHooks.value={get:function(e,n){var r=e.getAttributeNode(n);return b.nodeName(e,"input")?e.defaultValue:r&&r.specified?r.value:t},set:function(e,n,r){return b.nodeName(e,"input")?(e.defaultValue=n,t):I&&I.set(e,n,r)}}),Q||(I=b.valHooks.button={get:function(e,n){var r=e.getAttributeNode(n);return r&&("id"===n||"name"===n||"coords"===n?""!==r.value:r.specified)?r.value:t},set:function(e,n,r){var i=e.getAttributeNode(r);return i||e.setAttributeNode(i=e.ownerDocument.createAttribute(r)),i.value=n+="","value"===r||n===e.getAttribute(r)?n:t}},b.attrHooks.contenteditable={get:I.get,set:function(e,t,n){I.set(e,""===t?!1:t,n)}},b.each(["width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{set:function(e,r){return""===r?(e.setAttribute(n,"auto"),r):t}})})),b.support.hrefNormalized||(b.each(["href","src","width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{get:function(e){var r=e.getAttribute(n,2);return null==r?t:r}})}),b.each(["href","src"],function(e,t){b.propHooks[t]={get:function(e){return e.getAttribute(t,4)}}})),b.support.style||(b.attrHooks.style={get:function(e){return e.style.cssText||t},set:function(e,t){return e.style.cssText=t+""}}),b.support.optSelected||(b.propHooks.selected=b.extend(b.propHooks.selected,{get:function(e){var t=e.parentNode;return t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex),null}})),b.support.enctype||(b.propFix.enctype="encoding"),b.support.checkOn||b.each(["radio","checkbox"],function(){b.valHooks[this]={get:function(e){return null===e.getAttribute("value")?"on":e.value}}}),b.each(["radio","checkbox"],function(){b.valHooks[this]=b.extend(b.valHooks[this],{set:function(e,n){return b.isArray(n)?e.checked=b.inArray(b(e).val(),n)>=0:t}})});var Z=/^(?:input|select|textarea)$/i,et=/^key/,tt=/^(?:mouse|contextmenu)|click/,nt=/^(?:focusinfocus|focusoutblur)$/,rt=/^([^.]*)(?:\.(.+)|)$/;function it(){return!0}function ot(){return!1}b.event={global:{},add:function(e,n,r,o,a){var s,u,l,c,p,f,d,h,g,m,y,v=b._data(e);if(v){r.handler&&(c=r,r=c.handler,a=c.selector),r.guid||(r.guid=b.guid++),(u=v.events)||(u=v.events={}),(f=v.handle)||(f=v.handle=function(e){return typeof b===i||e&&b.event.triggered===e.type?t:b.event.dispatch.apply(f.elem,arguments)},f.elem=e),n=(n||"").match(w)||[""],l=n.length;while(l--)s=rt.exec(n[l])||[],g=y=s[1],m=(s[2]||"").split(".").sort(),p=b.event.special[g]||{},g=(a?p.delegateType:p.bindType)||g,p=b.event.special[g]||{},d=b.extend({type:g,origType:y,data:o,handler:r,guid:r.guid,selector:a,needsContext:a&&b.expr.match.needsContext.test(a),namespace:m.join(".")},c),(h=u[g])||(h=u[g]=[],h.delegateCount=0,p.setup&&p.setup.call(e,o,m,f)!==!1||(e.addEventListener?e.addEventListener(g,f,!1):e.attachEvent&&e.attachEvent("on"+g,f))),p.add&&(p.add.call(e,d),d.handler.guid||(d.handler.guid=r.guid)),a?h.splice(h.delegateCount++,0,d):h.push(d),b.event.global[g]=!0;e=null}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,p,f,d,h,g,m=b.hasData(e)&&b._data(e);if(m&&(c=m.events)){t=(t||"").match(w)||[""],l=t.length;while(l--)if(s=rt.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){p=b.event.special[d]||{},d=(r?p.delegateType:p.bindType)||d,f=c[d]||[],s=s[2]&&RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),u=o=f.length;while(o--)a=f[o],!i&&g!==a.origType||n&&n.guid!==a.guid||s&&!s.test(a.namespace)||r&&r!==a.selector&&("**"!==r||!a.selector)||(f.splice(o,1),a.selector&&f.delegateCount--,p.remove&&p.remove.call(e,a));u&&!f.length&&(p.teardown&&p.teardown.call(e,h,m.handle)!==!1||b.removeEvent(e,d,m.handle),delete c[d])}else for(d in c)b.event.remove(e,d+t[l],n,r,!0);b.isEmptyObject(c)&&(delete m.handle,b._removeData(e,"events"))}},trigger:function(n,r,i,a){var s,u,l,c,p,f,d,h=[i||o],g=y.call(n,"type")?n.type:n,m=y.call(n,"namespace")?n.namespace.split("."):[];if(l=f=i=i||o,3!==i.nodeType&&8!==i.nodeType&&!nt.test(g+b.event.triggered)&&(g.indexOf(".")>=0&&(m=g.split("."),g=m.shift(),m.sort()),u=0>g.indexOf(":")&&"on"+g,n=n[b.expando]?n:new b.Event(g,"object"==typeof n&&n),n.isTrigger=!0,n.namespace=m.join("."),n.namespace_re=n.namespace?RegExp("(^|\\.)"+m.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,n.result=t,n.target||(n.target=i),r=null==r?[n]:b.makeArray(r,[n]),p=b.event.special[g]||{},a||!p.trigger||p.trigger.apply(i,r)!==!1)){if(!a&&!p.noBubble&&!b.isWindow(i)){for(c=p.delegateType||g,nt.test(c+g)||(l=l.parentNode);l;l=l.parentNode)h.push(l),f=l;f===(i.ownerDocument||o)&&h.push(f.defaultView||f.parentWindow||e)}d=0;while((l=h[d++])&&!n.isPropagationStopped())n.type=d>1?c:p.bindType||g,s=(b._data(l,"events")||{})[n.type]&&b._data(l,"handle"),s&&s.apply(l,r),s=u&&l[u],s&&b.acceptData(l)&&s.apply&&s.apply(l,r)===!1&&n.preventDefault();if(n.type=g,!(a||n.isDefaultPrevented()||p._default&&p._default.apply(i.ownerDocument,r)!==!1||"click"===g&&b.nodeName(i,"a")||!b.acceptData(i)||!u||!i[g]||b.isWindow(i))){f=i[u],f&&(i[u]=null),b.event.triggered=g;try{i[g]()}catch(v){}b.event.triggered=t,f&&(i[u]=f)}return n.result}},dispatch:function(e){e=b.event.fix(e);var n,r,i,o,a,s=[],u=h.call(arguments),l=(b._data(this,"events")||{})[e.type]||[],c=b.event.special[e.type]||{};if(u[0]=e,e.delegateTarget=this,!c.preDispatch||c.preDispatch.call(this,e)!==!1){s=b.event.handlers.call(this,e,l),n=0;while((o=s[n++])&&!e.isPropagationStopped()){e.currentTarget=o.elem,a=0;while((i=o.handlers[a++])&&!e.isImmediatePropagationStopped())(!e.namespace_re||e.namespace_re.test(i.namespace))&&(e.handleObj=i,e.data=i.data,r=((b.event.special[i.origType]||{}).handle||i.handler).apply(o.elem,u),r!==t&&(e.result=r)===!1&&(e.preventDefault(),e.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,e),e.result}},handlers:function(e,n){var r,i,o,a,s=[],u=n.delegateCount,l=e.target;if(u&&l.nodeType&&(!e.button||"click"!==e.type))for(;l!=this;l=l.parentNode||this)if(1===l.nodeType&&(l.disabled!==!0||"click"!==e.type)){for(o=[],a=0;u>a;a++)i=n[a],r=i.selector+" ",o[r]===t&&(o[r]=i.needsContext?b(r,this).index(l)>=0:b.find(r,this,null,[l]).length),o[r]&&o.push(i);o.length&&s.push({elem:l,handlers:o})}return n.length>u&&s.push({elem:this,handlers:n.slice(u)}),s},fix:function(e){if(e[b.expando])return e;var t,n,r,i=e.type,a=e,s=this.fixHooks[i];s||(this.fixHooks[i]=s=tt.test(i)?this.mouseHooks:et.test(i)?this.keyHooks:{}),r=s.props?this.props.concat(s.props):this.props,e=new b.Event(a),t=r.length;while(t--)n=r[t],e[n]=a[n];return e.target||(e.target=a.srcElement||o),3===e.target.nodeType&&(e.target=e.target.parentNode),e.metaKey=!!e.metaKey,s.filter?s.filter(e,a):e},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(e,t){return null==e.which&&(e.which=null!=t.charCode?t.charCode:t.keyCode),e}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(e,n){var r,i,a,s=n.button,u=n.fromElement;return null==e.pageX&&null!=n.clientX&&(i=e.target.ownerDocument||o,a=i.documentElement,r=i.body,e.pageX=n.clientX+(a&&a.scrollLeft||r&&r.scrollLeft||0)-(a&&a.clientLeft||r&&r.clientLeft||0),e.pageY=n.clientY+(a&&a.scrollTop||r&&r.scrollTop||0)-(a&&a.clientTop||r&&r.clientTop||0)),!e.relatedTarget&&u&&(e.relatedTarget=u===e.target?n.toElement:u),e.which||s===t||(e.which=1&s?1:2&s?3:4&s?2:0),e}},special:{load:{noBubble:!0},click:{trigger:function(){return b.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):t}},focus:{trigger:function(){if(this!==o.activeElement&&this.focus)try{return this.focus(),!1}catch(e){}},delegateType:"focusin"},blur:{trigger:function(){return this===o.activeElement&&this.blur?(this.blur(),!1):t},delegateType:"focusout"},beforeunload:{postDispatch:function(e){e.result!==t&&(e.originalEvent.returnValue=e.result)}}},simulate:function(e,t,n,r){var i=b.extend(new b.Event,n,{type:e,isSimulated:!0,originalEvent:{}});r?b.event.trigger(i,null,t):b.event.dispatch.call(t,i),i.isDefaultPrevented()&&n.preventDefault()}},b.removeEvent=o.removeEventListener?function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n,!1)}:function(e,t,n){var r="on"+t;e.detachEvent&&(typeof e[r]===i&&(e[r]=null),e.detachEvent(r,n))},b.Event=function(e,n){return this instanceof b.Event?(e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||e.returnValue===!1||e.getPreventDefault&&e.getPreventDefault()?it:ot):this.type=e,n&&b.extend(this,n),this.timeStamp=e&&e.timeStamp||b.now(),this[b.expando]=!0,t):new b.Event(e,n)},b.Event.prototype={isDefaultPrevented:ot,isPropagationStopped:ot,isImmediatePropagationStopped:ot,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=it,e&&(e.preventDefault?e.preventDefault():e.returnValue=!1)},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=it,e&&(e.stopPropagation&&e.stopPropagation(),e.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=it,this.stopPropagation()}},b.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(e,t){b.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;
+return(!i||i!==r&&!b.contains(r,i))&&(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),b.support.submitBubbles||(b.event.special.submit={setup:function(){return b.nodeName(this,"form")?!1:(b.event.add(this,"click._submit keypress._submit",function(e){var n=e.target,r=b.nodeName(n,"input")||b.nodeName(n,"button")?n.form:t;r&&!b._data(r,"submitBubbles")&&(b.event.add(r,"submit._submit",function(e){e._submit_bubble=!0}),b._data(r,"submitBubbles",!0))}),t)},postDispatch:function(e){e._submit_bubble&&(delete e._submit_bubble,this.parentNode&&!e.isTrigger&&b.event.simulate("submit",this.parentNode,e,!0))},teardown:function(){return b.nodeName(this,"form")?!1:(b.event.remove(this,"._submit"),t)}}),b.support.changeBubbles||(b.event.special.change={setup:function(){return Z.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(b.event.add(this,"propertychange._change",function(e){"checked"===e.originalEvent.propertyName&&(this._just_changed=!0)}),b.event.add(this,"click._change",function(e){this._just_changed&&!e.isTrigger&&(this._just_changed=!1),b.event.simulate("change",this,e,!0)})),!1):(b.event.add(this,"beforeactivate._change",function(e){var t=e.target;Z.test(t.nodeName)&&!b._data(t,"changeBubbles")&&(b.event.add(t,"change._change",function(e){!this.parentNode||e.isSimulated||e.isTrigger||b.event.simulate("change",this.parentNode,e,!0)}),b._data(t,"changeBubbles",!0))}),t)},handle:function(e){var n=e.target;return this!==n||e.isSimulated||e.isTrigger||"radio"!==n.type&&"checkbox"!==n.type?e.handleObj.handler.apply(this,arguments):t},teardown:function(){return b.event.remove(this,"._change"),!Z.test(this.nodeName)}}),b.support.focusinBubbles||b.each({focus:"focusin",blur:"focusout"},function(e,t){var n=0,r=function(e){b.event.simulate(t,e.target,b.event.fix(e),!0)};b.event.special[t]={setup:function(){0===n++&&o.addEventListener(e,r,!0)},teardown:function(){0===--n&&o.removeEventListener(e,r,!0)}}}),b.fn.extend({on:function(e,n,r,i,o){var a,s;if("object"==typeof e){"string"!=typeof n&&(r=r||n,n=t);for(a in e)this.on(a,n,r,e[a],o);return this}if(null==r&&null==i?(i=n,r=n=t):null==i&&("string"==typeof n?(i=r,r=t):(i=r,r=n,n=t)),i===!1)i=ot;else if(!i)return this;return 1===o&&(s=i,i=function(e){return b().off(e),s.apply(this,arguments)},i.guid=s.guid||(s.guid=b.guid++)),this.each(function(){b.event.add(this,e,i,r,n)})},one:function(e,t,n,r){return this.on(e,t,n,r,1)},off:function(e,n,r){var i,o;if(e&&e.preventDefault&&e.handleObj)return i=e.handleObj,b(e.delegateTarget).off(i.namespace?i.origType+"."+i.namespace:i.origType,i.selector,i.handler),this;if("object"==typeof e){for(o in e)this.off(o,n,e[o]);return this}return(n===!1||"function"==typeof n)&&(r=n,n=t),r===!1&&(r=ot),this.each(function(){b.event.remove(this,e,r,n)})},bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},trigger:function(e,t){return this.each(function(){b.event.trigger(e,t,this)})},triggerHandler:function(e,n){var r=this[0];return r?b.event.trigger(e,n,r,!0):t}}),function(e,t){var n,r,i,o,a,s,u,l,c,p,f,d,h,g,m,y,v,x="sizzle"+-new Date,w=e.document,T={},N=0,C=0,k=it(),E=it(),S=it(),A=typeof t,j=1<<31,D=[],L=D.pop,H=D.push,q=D.slice,M=D.indexOf||function(e){var t=0,n=this.length;for(;n>t;t++)if(this[t]===e)return t;return-1},_="[\\x20\\t\\r\\n\\f]",F="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=F.replace("w","w#"),B="([*^$|!~]?=)",P="\\["+_+"*("+F+")"+_+"*(?:"+B+_+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+O+")|)|)"+_+"*\\]",R=":("+F+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+P.replace(3,8)+")*)|.*)\\)|)",W=RegExp("^"+_+"+|((?:^|[^\\\\])(?:\\\\.)*)"+_+"+$","g"),$=RegExp("^"+_+"*,"+_+"*"),I=RegExp("^"+_+"*([\\x20\\t\\r\\n\\f>+~])"+_+"*"),z=RegExp(R),X=RegExp("^"+O+"$"),U={ID:RegExp("^#("+F+")"),CLASS:RegExp("^\\.("+F+")"),NAME:RegExp("^\\[name=['\"]?("+F+")['\"]?\\]"),TAG:RegExp("^("+F.replace("w","w*")+")"),ATTR:RegExp("^"+P),PSEUDO:RegExp("^"+R),CHILD:RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+_+"*(even|odd|(([+-]|)(\\d*)n|)"+_+"*(?:([+-]|)"+_+"*(\\d+)|))"+_+"*\\)|)","i"),needsContext:RegExp("^"+_+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+_+"*((?:-\\d)?\\d*)"+_+"*\\)|)(?=[^-]|$)","i")},V=/[\x20\t\r\n\f]*[+~]/,Y=/^[^{]+\{\s*\[native code/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,G=/^(?:input|select|textarea|button)$/i,Q=/^h\d$/i,K=/'|\\/g,Z=/\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g,et=/\\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g,tt=function(e,t){var n="0x"+t-65536;return n!==n?t:0>n?String.fromCharCode(n+65536):String.fromCharCode(55296|n>>10,56320|1023&n)};try{q.call(w.documentElement.childNodes,0)[0].nodeType}catch(nt){q=function(e){var t,n=[];while(t=this[e++])n.push(t);return n}}function rt(e){return Y.test(e+"")}function it(){var e,t=[];return e=function(n,r){return t.push(n+=" ")>i.cacheLength&&delete e[t.shift()],e[n]=r}}function ot(e){return e[x]=!0,e}function at(e){var t=p.createElement("div");try{return e(t)}catch(n){return!1}finally{t=null}}function st(e,t,n,r){var i,o,a,s,u,l,f,g,m,v;if((t?t.ownerDocument||t:w)!==p&&c(t),t=t||p,n=n||[],!e||"string"!=typeof e)return n;if(1!==(s=t.nodeType)&&9!==s)return[];if(!d&&!r){if(i=J.exec(e))if(a=i[1]){if(9===s){if(o=t.getElementById(a),!o||!o.parentNode)return n;if(o.id===a)return n.push(o),n}else if(t.ownerDocument&&(o=t.ownerDocument.getElementById(a))&&y(t,o)&&o.id===a)return n.push(o),n}else{if(i[2])return H.apply(n,q.call(t.getElementsByTagName(e),0)),n;if((a=i[3])&&T.getByClassName&&t.getElementsByClassName)return H.apply(n,q.call(t.getElementsByClassName(a),0)),n}if(T.qsa&&!h.test(e)){if(f=!0,g=x,m=t,v=9===s&&e,1===s&&"object"!==t.nodeName.toLowerCase()){l=ft(e),(f=t.getAttribute("id"))?g=f.replace(K,"\\$&"):t.setAttribute("id",g),g="[id='"+g+"'] ",u=l.length;while(u--)l[u]=g+dt(l[u]);m=V.test(e)&&t.parentNode||t,v=l.join(",")}if(v)try{return H.apply(n,q.call(m.querySelectorAll(v),0)),n}catch(b){}finally{f||t.removeAttribute("id")}}}return wt(e.replace(W,"$1"),t,n,r)}a=st.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return t?"HTML"!==t.nodeName:!1},c=st.setDocument=function(e){var n=e?e.ownerDocument||e:w;return n!==p&&9===n.nodeType&&n.documentElement?(p=n,f=n.documentElement,d=a(n),T.tagNameNoComments=at(function(e){return e.appendChild(n.createComment("")),!e.getElementsByTagName("*").length}),T.attributes=at(function(e){e.innerHTML="<select></select>";var t=typeof e.lastChild.getAttribute("multiple");return"boolean"!==t&&"string"!==t}),T.getByClassName=at(function(e){return e.innerHTML="<div class='hidden e'></div><div class='hidden'></div>",e.getElementsByClassName&&e.getElementsByClassName("e").length?(e.lastChild.className="e",2===e.getElementsByClassName("e").length):!1}),T.getByName=at(function(e){e.id=x+0,e.innerHTML="<a name='"+x+"'></a><div name='"+x+"'></div>",f.insertBefore(e,f.firstChild);var t=n.getElementsByName&&n.getElementsByName(x).length===2+n.getElementsByName(x+0).length;return T.getIdNotName=!n.getElementById(x),f.removeChild(e),t}),i.attrHandle=at(function(e){return e.innerHTML="<a href='#'></a>",e.firstChild&&typeof e.firstChild.getAttribute!==A&&"#"===e.firstChild.getAttribute("href")})?{}:{href:function(e){return e.getAttribute("href",2)},type:function(e){return e.getAttribute("type")}},T.getIdNotName?(i.find.ID=function(e,t){if(typeof t.getElementById!==A&&!d){var n=t.getElementById(e);return n&&n.parentNode?[n]:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){return e.getAttribute("id")===t}}):(i.find.ID=function(e,n){if(typeof n.getElementById!==A&&!d){var r=n.getElementById(e);return r?r.id===e||typeof r.getAttributeNode!==A&&r.getAttributeNode("id").value===e?[r]:t:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){var n=typeof e.getAttributeNode!==A&&e.getAttributeNode("id");return n&&n.value===t}}),i.find.TAG=T.tagNameNoComments?function(e,n){return typeof n.getElementsByTagName!==A?n.getElementsByTagName(e):t}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},i.find.NAME=T.getByName&&function(e,n){return typeof n.getElementsByName!==A?n.getElementsByName(name):t},i.find.CLASS=T.getByClassName&&function(e,n){return typeof n.getElementsByClassName===A||d?t:n.getElementsByClassName(e)},g=[],h=[":focus"],(T.qsa=rt(n.querySelectorAll))&&(at(function(e){e.innerHTML="<select><option selected=''></option></select>",e.querySelectorAll("[selected]").length||h.push("\\["+_+"*(?:checked|disabled|ismap|multiple|readonly|selected|value)"),e.querySelectorAll(":checked").length||h.push(":checked")}),at(function(e){e.innerHTML="<input type='hidden' i=''/>",e.querySelectorAll("[i^='']").length&&h.push("[*^$]="+_+"*(?:\"\"|'')"),e.querySelectorAll(":enabled").length||h.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),h.push(",.*:")})),(T.matchesSelector=rt(m=f.matchesSelector||f.mozMatchesSelector||f.webkitMatchesSelector||f.oMatchesSelector||f.msMatchesSelector))&&at(function(e){T.disconnectedMatch=m.call(e,"div"),m.call(e,"[s!='']:x"),g.push("!=",R)}),h=RegExp(h.join("|")),g=RegExp(g.join("|")),y=rt(f.contains)||f.compareDocumentPosition?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},v=f.compareDocumentPosition?function(e,t){var r;return e===t?(u=!0,0):(r=t.compareDocumentPosition&&e.compareDocumentPosition&&e.compareDocumentPosition(t))?1&r||e.parentNode&&11===e.parentNode.nodeType?e===n||y(w,e)?-1:t===n||y(w,t)?1:0:4&r?-1:1:e.compareDocumentPosition?-1:1}:function(e,t){var r,i=0,o=e.parentNode,a=t.parentNode,s=[e],l=[t];if(e===t)return u=!0,0;if(!o||!a)return e===n?-1:t===n?1:o?-1:a?1:0;if(o===a)return ut(e,t);r=e;while(r=r.parentNode)s.unshift(r);r=t;while(r=r.parentNode)l.unshift(r);while(s[i]===l[i])i++;return i?ut(s[i],l[i]):s[i]===w?-1:l[i]===w?1:0},u=!1,[0,0].sort(v),T.detectDuplicates=u,p):p},st.matches=function(e,t){return st(e,null,null,t)},st.matchesSelector=function(e,t){if((e.ownerDocument||e)!==p&&c(e),t=t.replace(Z,"='$1']"),!(!T.matchesSelector||d||g&&g.test(t)||h.test(t)))try{var n=m.call(e,t);if(n||T.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(r){}return st(t,p,null,[e]).length>0},st.contains=function(e,t){return(e.ownerDocument||e)!==p&&c(e),y(e,t)},st.attr=function(e,t){var n;return(e.ownerDocument||e)!==p&&c(e),d||(t=t.toLowerCase()),(n=i.attrHandle[t])?n(e):d||T.attributes?e.getAttribute(t):((n=e.getAttributeNode(t))||e.getAttribute(t))&&e[t]===!0?t:n&&n.specified?n.value:null},st.error=function(e){throw Error("Syntax error, unrecognized expression: "+e)},st.uniqueSort=function(e){var t,n=[],r=1,i=0;if(u=!T.detectDuplicates,e.sort(v),u){for(;t=e[r];r++)t===e[r-1]&&(i=n.push(r));while(i--)e.splice(n[i],1)}return e};function ut(e,t){var n=t&&e,r=n&&(~t.sourceIndex||j)-(~e.sourceIndex||j);if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function lt(e){return function(t){var n=t.nodeName.toLowerCase();return"input"===n&&t.type===e}}function ct(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function pt(e){return ot(function(t){return t=+t,ot(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}o=st.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else for(;t=e[r];r++)n+=o(t);return n},i=st.selectors={cacheLength:50,createPseudo:ot,match:U,find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(et,tt),e[3]=(e[4]||e[5]||"").replace(et,tt),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||st.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&st.error(e[0]),e},PSEUDO:function(e){var t,n=!e[5]&&e[2];return U.CHILD.test(e[0])?null:(e[4]?e[2]=e[4]:n&&z.test(n)&&(t=ft(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){return"*"===e?function(){return!0}:(e=e.replace(et,tt).toLowerCase(),function(t){return t.nodeName&&t.nodeName.toLowerCase()===e})},CLASS:function(e){var t=k[e+" "];return t||(t=RegExp("(^|"+_+")"+e+"("+_+"|$)"))&&k(e,function(e){return t.test(e.className||typeof e.getAttribute!==A&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=st.attr(r,e);return null==i?"!="===t:t?(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i+" ").indexOf(n)>-1:"|="===t?i===n||i.slice(0,n.length+1)===n+"-":!1):!0}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,p,f,d,h,g=o!==a?"nextSibling":"previousSibling",m=t.parentNode,y=s&&t.nodeName.toLowerCase(),v=!u&&!s;if(m){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===y:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?m.firstChild:m.lastChild],a&&v){c=m[x]||(m[x]={}),l=c[e]||[],d=l[0]===N&&l[1],f=l[0]===N&&l[2],p=d&&m.childNodes[d];while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if(1===p.nodeType&&++f&&p===t){c[e]=[N,d,f];break}}else if(v&&(l=(t[x]||(t[x]={}))[e])&&l[0]===N)f=l[1];else while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===y:1===p.nodeType)&&++f&&(v&&((p[x]||(p[x]={}))[e]=[N,f]),p===t))break;return f-=i,f===r||0===f%r&&f/r>=0}}},PSEUDO:function(e,t){var n,r=i.pseudos[e]||i.setFilters[e.toLowerCase()]||st.error("unsupported pseudo: "+e);return r[x]?r(t):r.length>1?(n=[e,e,"",t],i.setFilters.hasOwnProperty(e.toLowerCase())?ot(function(e,n){var i,o=r(e,t),a=o.length;while(a--)i=M.call(e,o[a]),e[i]=!(n[i]=o[a])}):function(e){return r(e,0,n)}):r}},pseudos:{not:ot(function(e){var t=[],n=[],r=s(e.replace(W,"$1"));return r[x]?ot(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),!n.pop()}}),has:ot(function(e){return function(t){return st(e,t).length>0}}),contains:ot(function(e){return function(t){return(t.textContent||t.innerText||o(t)).indexOf(e)>-1}}),lang:ot(function(e){return X.test(e||"")||st.error("unsupported lang: "+e),e=e.replace(et,tt).toLowerCase(),function(t){var n;do if(n=d?t.getAttribute("xml:lang")||t.getAttribute("lang"):t.lang)return n=n.toLowerCase(),n===e||0===n.indexOf(e+"-");while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===f},focus:function(e){return e===p.activeElement&&(!p.hasFocus||p.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===!1},disabled:function(e){return e.disabled===!0},checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,e.selected===!0},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeName>"@"||3===e.nodeType||4===e.nodeType)return!1;return!0},parent:function(e){return!i.pseudos.empty(e)},header:function(e){return Q.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||t.toLowerCase()===e.type)},first:pt(function(){return[0]}),last:pt(function(e,t){return[t-1]}),eq:pt(function(e,t,n){return[0>n?n+t:n]}),even:pt(function(e,t){var n=0;for(;t>n;n+=2)e.push(n);return e}),odd:pt(function(e,t){var n=1;for(;t>n;n+=2)e.push(n);return e}),lt:pt(function(e,t,n){var r=0>n?n+t:n;for(;--r>=0;)e.push(r);return e}),gt:pt(function(e,t,n){var r=0>n?n+t:n;for(;t>++r;)e.push(r);return e})}};for(n in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})i.pseudos[n]=lt(n);for(n in{submit:!0,reset:!0})i.pseudos[n]=ct(n);function ft(e,t){var n,r,o,a,s,u,l,c=E[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=i.preFilter;while(s){(!n||(r=$.exec(s)))&&(r&&(s=s.slice(r[0].length)||s),u.push(o=[])),n=!1,(r=I.exec(s))&&(n=r.shift(),o.push({value:n,type:r[0].replace(W," ")}),s=s.slice(n.length));for(a in i.filter)!(r=U[a].exec(s))||l[a]&&!(r=l[a](r))||(n=r.shift(),o.push({value:n,type:a,matches:r}),s=s.slice(n.length));if(!n)break}return t?s.length:s?st.error(e):E(e,u).slice(0)}function dt(e){var t=0,n=e.length,r="";for(;n>t;t++)r+=e[t].value;return r}function ht(e,t,n){var i=t.dir,o=n&&"parentNode"===i,a=C++;return t.first?function(t,n,r){while(t=t[i])if(1===t.nodeType||o)return e(t,n,r)}:function(t,n,s){var u,l,c,p=N+" "+a;if(s){while(t=t[i])if((1===t.nodeType||o)&&e(t,n,s))return!0}else while(t=t[i])if(1===t.nodeType||o)if(c=t[x]||(t[x]={}),(l=c[i])&&l[0]===p){if((u=l[1])===!0||u===r)return u===!0}else if(l=c[i]=[p],l[1]=e(t,n,s)||r,l[1]===!0)return!0}}function gt(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function mt(e,t,n,r,i){var o,a=[],s=0,u=e.length,l=null!=t;for(;u>s;s++)(o=e[s])&&(!n||n(o,r,i))&&(a.push(o),l&&t.push(s));return a}function yt(e,t,n,r,i,o){return r&&!r[x]&&(r=yt(r)),i&&!i[x]&&(i=yt(i,o)),ot(function(o,a,s,u){var l,c,p,f=[],d=[],h=a.length,g=o||xt(t||"*",s.nodeType?[s]:s,[]),m=!e||!o&&t?g:mt(g,f,e,s,u),y=n?i||(o?e:h||r)?[]:a:m;if(n&&n(m,y,s,u),r){l=mt(y,d),r(l,[],s,u),c=l.length;while(c--)(p=l[c])&&(y[d[c]]=!(m[d[c]]=p))}if(o){if(i||e){if(i){l=[],c=y.length;while(c--)(p=y[c])&&l.push(m[c]=p);i(null,y=[],l,u)}c=y.length;while(c--)(p=y[c])&&(l=i?M.call(o,p):f[c])>-1&&(o[l]=!(a[l]=p))}}else y=mt(y===a?y.splice(h,y.length):y),i?i(null,a,y,u):H.apply(a,y)})}function vt(e){var t,n,r,o=e.length,a=i.relative[e[0].type],s=a||i.relative[" "],u=a?1:0,c=ht(function(e){return e===t},s,!0),p=ht(function(e){return M.call(t,e)>-1},s,!0),f=[function(e,n,r){return!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):p(e,n,r))}];for(;o>u;u++)if(n=i.relative[e[u].type])f=[ht(gt(f),n)];else{if(n=i.filter[e[u].type].apply(null,e[u].matches),n[x]){for(r=++u;o>r;r++)if(i.relative[e[r].type])break;return yt(u>1&&gt(f),u>1&&dt(e.slice(0,u-1)).replace(W,"$1"),n,r>u&&vt(e.slice(u,r)),o>r&&vt(e=e.slice(r)),o>r&&dt(e))}f.push(n)}return gt(f)}function bt(e,t){var n=0,o=t.length>0,a=e.length>0,s=function(s,u,c,f,d){var h,g,m,y=[],v=0,b="0",x=s&&[],w=null!=d,T=l,C=s||a&&i.find.TAG("*",d&&u.parentNode||u),k=N+=null==T?1:Math.random()||.1;for(w&&(l=u!==p&&u,r=n);null!=(h=C[b]);b++){if(a&&h){g=0;while(m=e[g++])if(m(h,u,c)){f.push(h);break}w&&(N=k,r=++n)}o&&((h=!m&&h)&&v--,s&&x.push(h))}if(v+=b,o&&b!==v){g=0;while(m=t[g++])m(x,y,u,c);if(s){if(v>0)while(b--)x[b]||y[b]||(y[b]=L.call(f));y=mt(y)}H.apply(f,y),w&&!s&&y.length>0&&v+t.length>1&&st.uniqueSort(f)}return w&&(N=k,l=T),x};return o?ot(s):s}s=st.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=ft(e)),n=t.length;while(n--)o=vt(t[n]),o[x]?r.push(o):i.push(o);o=S(e,bt(i,r))}return o};function xt(e,t,n){var r=0,i=t.length;for(;i>r;r++)st(e,t[r],n);return n}function wt(e,t,n,r){var o,a,u,l,c,p=ft(e);if(!r&&1===p.length){if(a=p[0]=p[0].slice(0),a.length>2&&"ID"===(u=a[0]).type&&9===t.nodeType&&!d&&i.relative[a[1].type]){if(t=i.find.ID(u.matches[0].replace(et,tt),t)[0],!t)return n;e=e.slice(a.shift().value.length)}o=U.needsContext.test(e)?0:a.length;while(o--){if(u=a[o],i.relative[l=u.type])break;if((c=i.find[l])&&(r=c(u.matches[0].replace(et,tt),V.test(a[0].type)&&t.parentNode||t))){if(a.splice(o,1),e=r.length&&dt(a),!e)return H.apply(n,q.call(r,0)),n;break}}}return s(e,p)(r,t,d,n,V.test(e)),n}i.pseudos.nth=i.pseudos.eq;function Tt(){}i.filters=Tt.prototype=i.pseudos,i.setFilters=new Tt,c(),st.attr=b.attr,b.find=st,b.expr=st.selectors,b.expr[":"]=b.expr.pseudos,b.unique=st.uniqueSort,b.text=st.getText,b.isXMLDoc=st.isXML,b.contains=st.contains}(e);var at=/Until$/,st=/^(?:parents|prev(?:Until|All))/,ut=/^.[^:#\[\.,]*$/,lt=b.expr.match.needsContext,ct={children:!0,contents:!0,next:!0,prev:!0};b.fn.extend({find:function(e){var t,n,r,i=this.length;if("string"!=typeof e)return r=this,this.pushStack(b(e).filter(function(){for(t=0;i>t;t++)if(b.contains(r[t],this))return!0}));for(n=[],t=0;i>t;t++)b.find(e,this[t],n);return n=this.pushStack(i>1?b.unique(n):n),n.selector=(this.selector?this.selector+" ":"")+e,n},has:function(e){var t,n=b(e,this),r=n.length;return this.filter(function(){for(t=0;r>t;t++)if(b.contains(this,n[t]))return!0})},not:function(e){return this.pushStack(ft(this,e,!1))},filter:function(e){return this.pushStack(ft(this,e,!0))},is:function(e){return!!e&&("string"==typeof e?lt.test(e)?b(e,this.context).index(this[0])>=0:b.filter(e,this).length>0:this.filter(e).length>0)},closest:function(e,t){var n,r=0,i=this.length,o=[],a=lt.test(e)||"string"!=typeof e?b(e,t||this.context):0;for(;i>r;r++){n=this[r];while(n&&n.ownerDocument&&n!==t&&11!==n.nodeType){if(a?a.index(n)>-1:b.find.matchesSelector(n,e)){o.push(n);break}n=n.parentNode}}return this.pushStack(o.length>1?b.unique(o):o)},index:function(e){return e?"string"==typeof e?b.inArray(this[0],b(e)):b.inArray(e.jquery?e[0]:e,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){var n="string"==typeof e?b(e,t):b.makeArray(e&&e.nodeType?[e]:e),r=b.merge(this.get(),n);return this.pushStack(b.unique(r))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),b.fn.andSelf=b.fn.addBack;function pt(e,t){do e=e[t];while(e&&1!==e.nodeType);return e}b.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return b.dir(e,"parentNode")},parentsUntil:function(e,t,n){return b.dir(e,"parentNode",n)},next:function(e){return pt(e,"nextSibling")},prev:function(e){return pt(e,"previousSibling")},nextAll:function(e){return b.dir(e,"nextSibling")},prevAll:function(e){return b.dir(e,"previousSibling")},nextUntil:function(e,t,n){return b.dir(e,"nextSibling",n)},prevUntil:function(e,t,n){return b.dir(e,"previousSibling",n)},siblings:function(e){return b.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return b.sibling(e.firstChild)},contents:function(e){return b.nodeName(e,"iframe")?e.contentDocument||e.contentWindow.document:b.merge([],e.childNodes)}},function(e,t){b.fn[e]=function(n,r){var i=b.map(this,t,n);return at.test(e)||(r=n),r&&"string"==typeof r&&(i=b.filter(r,i)),i=this.length>1&&!ct[e]?b.unique(i):i,this.length>1&&st.test(e)&&(i=i.reverse()),this.pushStack(i)}}),b.extend({filter:function(e,t,n){return n&&(e=":not("+e+")"),1===t.length?b.find.matchesSelector(t[0],e)?[t[0]]:[]:b.find.matches(e,t)},dir:function(e,n,r){var i=[],o=e[n];while(o&&9!==o.nodeType&&(r===t||1!==o.nodeType||!b(o).is(r)))1===o.nodeType&&i.push(o),o=o[n];return i},sibling:function(e,t){var n=[];for(;e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}});function ft(e,t,n){if(t=t||0,b.isFunction(t))return b.grep(e,function(e,r){var i=!!t.call(e,r,e);return i===n});if(t.nodeType)return b.grep(e,function(e){return e===t===n});if("string"==typeof t){var r=b.grep(e,function(e){return 1===e.nodeType});if(ut.test(t))return b.filter(t,r,!n);t=b.filter(t,r)}return b.grep(e,function(e){return b.inArray(e,t)>=0===n})}function dt(e){var t=ht.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return n}var ht="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",gt=/ jQuery\d+="(?:null|\d+)"/g,mt=RegExp("<(?:"+ht+")[\\s/>]","i"),yt=/^\s+/,vt=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bt=/<([\w:]+)/,xt=/<tbody/i,wt=/<|&#?\w+;/,Tt=/<(?:script|style|link)/i,Nt=/^(?:checkbox|radio)$/i,Ct=/checked\s*(?:[^=]|=\s*.checked.)/i,kt=/^$|\/(?:java|ecma)script/i,Et=/^true\/(.*)/,St=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,At={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:b.support.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},jt=dt(o),Dt=jt.appendChild(o.createElement("div"));At.optgroup=At.option,At.tbody=At.tfoot=At.colgroup=At.caption=At.thead,At.th=At.td,b.fn.extend({text:function(e){return b.access(this,function(e){return e===t?b.text(this):this.empty().append((this[0]&&this[0].ownerDocument||o).createTextNode(e))},null,e,arguments.length)},wrapAll:function(e){if(b.isFunction(e))return this.each(function(t){b(this).wrapAll(e.call(this,t))});if(this[0]){var t=b(e,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstChild&&1===e.firstChild.nodeType)e=e.firstChild;return e}).append(this)}return this},wrapInner:function(e){return b.isFunction(e)?this.each(function(t){b(this).wrapInner(e.call(this,t))}):this.each(function(){var t=b(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=b.isFunction(e);return this.each(function(n){b(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(){return this.parent().each(function(){b.nodeName(this,"body")||b(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.appendChild(e)})},prepend:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.insertBefore(e,this.firstChild)})},before:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},remove:function(e,t){var n,r=0;for(;null!=(n=this[r]);r++)(!e||b.filter(e,[n]).length>0)&&(t||1!==n.nodeType||b.cleanData(Ot(n)),n.parentNode&&(t&&b.contains(n.ownerDocument,n)&&Mt(Ot(n,"script")),n.parentNode.removeChild(n)));return this},empty:function(){var e,t=0;for(;null!=(e=this[t]);t++){1===e.nodeType&&b.cleanData(Ot(e,!1));while(e.firstChild)e.removeChild(e.firstChild);e.options&&b.nodeName(e,"select")&&(e.options.length=0)}return this},clone:function(e,t){return e=null==e?!1:e,t=null==t?e:t,this.map(function(){return b.clone(this,e,t)})},html:function(e){return b.access(this,function(e){var n=this[0]||{},r=0,i=this.length;if(e===t)return 1===n.nodeType?n.innerHTML.replace(gt,""):t;if(!("string"!=typeof e||Tt.test(e)||!b.support.htmlSerialize&&mt.test(e)||!b.support.leadingWhitespace&&yt.test(e)||At[(bt.exec(e)||["",""])[1].toLowerCase()])){e=e.replace(vt,"<$1></$2>");try{for(;i>r;r++)n=this[r]||{},1===n.nodeType&&(b.cleanData(Ot(n,!1)),n.innerHTML=e);n=0}catch(o){}}n&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(e){var t=b.isFunction(e);return t||"string"==typeof e||(e=b(e).not(this).detach()),this.domManip([e],!0,function(e){var t=this.nextSibling,n=this.parentNode;n&&(b(this).remove(),n.insertBefore(e,t))})},detach:function(e){return this.remove(e,!0)},domManip:function(e,n,r){e=f.apply([],e);var i,o,a,s,u,l,c=0,p=this.length,d=this,h=p-1,g=e[0],m=b.isFunction(g);if(m||!(1>=p||"string"!=typeof g||b.support.checkClone)&&Ct.test(g))return this.each(function(i){var o=d.eq(i);m&&(e[0]=g.call(this,i,n?o.html():t)),o.domManip(e,n,r)});if(p&&(l=b.buildFragment(e,this[0].ownerDocument,!1,this),i=l.firstChild,1===l.childNodes.length&&(l=i),i)){for(n=n&&b.nodeName(i,"tr"),s=b.map(Ot(l,"script"),Ht),a=s.length;p>c;c++)o=l,c!==h&&(o=b.clone(o,!0,!0),a&&b.merge(s,Ot(o,"script"))),r.call(n&&b.nodeName(this[c],"table")?Lt(this[c],"tbody"):this[c],o,c);if(a)for(u=s[s.length-1].ownerDocument,b.map(s,qt),c=0;a>c;c++)o=s[c],kt.test(o.type||"")&&!b._data(o,"globalEval")&&b.contains(u,o)&&(o.src?b.ajax({url:o.src,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0}):b.globalEval((o.text||o.textContent||o.innerHTML||"").replace(St,"")));l=i=null}return this}});function Lt(e,t){return e.getElementsByTagName(t)[0]||e.appendChild(e.ownerDocument.createElement(t))}function Ht(e){var t=e.getAttributeNode("type");return e.type=(t&&t.specified)+"/"+e.type,e}function qt(e){var t=Et.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function Mt(e,t){var n,r=0;for(;null!=(n=e[r]);r++)b._data(n,"globalEval",!t||b._data(t[r],"globalEval"))}function _t(e,t){if(1===t.nodeType&&b.hasData(e)){var n,r,i,o=b._data(e),a=b._data(t,o),s=o.events;if(s){delete a.handle,a.events={};for(n in s)for(r=0,i=s[n].length;i>r;r++)b.event.add(t,n,s[n][r])}a.data&&(a.data=b.extend({},a.data))}}function Ft(e,t){var n,r,i;if(1===t.nodeType){if(n=t.nodeName.toLowerCase(),!b.support.noCloneEvent&&t[b.expando]){i=b._data(t);for(r in i.events)b.removeEvent(t,r,i.handle);t.removeAttribute(b.expando)}"script"===n&&t.text!==e.text?(Ht(t).text=e.text,qt(t)):"object"===n?(t.parentNode&&(t.outerHTML=e.outerHTML),b.support.html5Clone&&e.innerHTML&&!b.trim(t.innerHTML)&&(t.innerHTML=e.innerHTML)):"input"===n&&Nt.test(e.type)?(t.defaultChecked=t.checked=e.checked,t.value!==e.value&&(t.value=e.value)):"option"===n?t.defaultSelected=t.selected=e.defaultSelected:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}}b.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){b.fn[e]=function(e){var n,r=0,i=[],o=b(e),a=o.length-1;for(;a>=r;r++)n=r===a?this:this.clone(!0),b(o[r])[t](n),d.apply(i,n.get());return this.pushStack(i)}});function Ot(e,n){var r,o,a=0,s=typeof e.getElementsByTagName!==i?e.getElementsByTagName(n||"*"):typeof e.querySelectorAll!==i?e.querySelectorAll(n||"*"):t;if(!s)for(s=[],r=e.childNodes||e;null!=(o=r[a]);a++)!n||b.nodeName(o,n)?s.push(o):b.merge(s,Ot(o,n));return n===t||n&&b.nodeName(e,n)?b.merge([e],s):s}function Bt(e){Nt.test(e.type)&&(e.defaultChecked=e.checked)}b.extend({clone:function(e,t,n){var r,i,o,a,s,u=b.contains(e.ownerDocument,e);if(b.support.html5Clone||b.isXMLDoc(e)||!mt.test("<"+e.nodeName+">")?o=e.cloneNode(!0):(Dt.innerHTML=e.outerHTML,Dt.removeChild(o=Dt.firstChild)),!(b.support.noCloneEvent&&b.support.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||b.isXMLDoc(e)))for(r=Ot(o),s=Ot(e),a=0;null!=(i=s[a]);++a)r[a]&&Ft(i,r[a]);if(t)if(n)for(s=s||Ot(e),r=r||Ot(o),a=0;null!=(i=s[a]);a++)_t(i,r[a]);else _t(e,o);return r=Ot(o,"script"),r.length>0&&Mt(r,!u&&Ot(e,"script")),r=s=i=null,o},buildFragment:function(e,t,n,r){var i,o,a,s,u,l,c,p=e.length,f=dt(t),d=[],h=0;for(;p>h;h++)if(o=e[h],o||0===o)if("object"===b.type(o))b.merge(d,o.nodeType?[o]:o);else if(wt.test(o)){s=s||f.appendChild(t.createElement("div")),u=(bt.exec(o)||["",""])[1].toLowerCase(),c=At[u]||At._default,s.innerHTML=c[1]+o.replace(vt,"<$1></$2>")+c[2],i=c[0];while(i--)s=s.lastChild;if(!b.support.leadingWhitespace&&yt.test(o)&&d.push(t.createTextNode(yt.exec(o)[0])),!b.support.tbody){o="table"!==u||xt.test(o)?"<table>"!==c[1]||xt.test(o)?0:s:s.firstChild,i=o&&o.childNodes.length;while(i--)b.nodeName(l=o.childNodes[i],"tbody")&&!l.childNodes.length&&o.removeChild(l)
+}b.merge(d,s.childNodes),s.textContent="";while(s.firstChild)s.removeChild(s.firstChild);s=f.lastChild}else d.push(t.createTextNode(o));s&&f.removeChild(s),b.support.appendChecked||b.grep(Ot(d,"input"),Bt),h=0;while(o=d[h++])if((!r||-1===b.inArray(o,r))&&(a=b.contains(o.ownerDocument,o),s=Ot(f.appendChild(o),"script"),a&&Mt(s),n)){i=0;while(o=s[i++])kt.test(o.type||"")&&n.push(o)}return s=null,f},cleanData:function(e,t){var n,r,o,a,s=0,u=b.expando,l=b.cache,p=b.support.deleteExpando,f=b.event.special;for(;null!=(n=e[s]);s++)if((t||b.acceptData(n))&&(o=n[u],a=o&&l[o])){if(a.events)for(r in a.events)f[r]?b.event.remove(n,r):b.removeEvent(n,r,a.handle);l[o]&&(delete l[o],p?delete n[u]:typeof n.removeAttribute!==i?n.removeAttribute(u):n[u]=null,c.push(o))}}});var Pt,Rt,Wt,$t=/alpha\([^)]*\)/i,It=/opacity\s*=\s*([^)]*)/,zt=/^(top|right|bottom|left)$/,Xt=/^(none|table(?!-c[ea]).+)/,Ut=/^margin/,Vt=RegExp("^("+x+")(.*)$","i"),Yt=RegExp("^("+x+")(?!px)[a-z%]+$","i"),Jt=RegExp("^([+-])=("+x+")","i"),Gt={BODY:"block"},Qt={position:"absolute",visibility:"hidden",display:"block"},Kt={letterSpacing:0,fontWeight:400},Zt=["Top","Right","Bottom","Left"],en=["Webkit","O","Moz","ms"];function tn(e,t){if(t in e)return t;var n=t.charAt(0).toUpperCase()+t.slice(1),r=t,i=en.length;while(i--)if(t=en[i]+n,t in e)return t;return r}function nn(e,t){return e=t||e,"none"===b.css(e,"display")||!b.contains(e.ownerDocument,e)}function rn(e,t){var n,r,i,o=[],a=0,s=e.length;for(;s>a;a++)r=e[a],r.style&&(o[a]=b._data(r,"olddisplay"),n=r.style.display,t?(o[a]||"none"!==n||(r.style.display=""),""===r.style.display&&nn(r)&&(o[a]=b._data(r,"olddisplay",un(r.nodeName)))):o[a]||(i=nn(r),(n&&"none"!==n||!i)&&b._data(r,"olddisplay",i?n:b.css(r,"display"))));for(a=0;s>a;a++)r=e[a],r.style&&(t&&"none"!==r.style.display&&""!==r.style.display||(r.style.display=t?o[a]||"":"none"));return e}b.fn.extend({css:function(e,n){return b.access(this,function(e,n,r){var i,o,a={},s=0;if(b.isArray(n)){for(o=Rt(e),i=n.length;i>s;s++)a[n[s]]=b.css(e,n[s],!1,o);return a}return r!==t?b.style(e,n,r):b.css(e,n)},e,n,arguments.length>1)},show:function(){return rn(this,!0)},hide:function(){return rn(this)},toggle:function(e){var t="boolean"==typeof e;return this.each(function(){(t?e:nn(this))?b(this).show():b(this).hide()})}}),b.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Wt(e,"opacity");return""===n?"1":n}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":b.support.cssFloat?"cssFloat":"styleFloat"},style:function(e,n,r,i){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var o,a,s,u=b.camelCase(n),l=e.style;if(n=b.cssProps[u]||(b.cssProps[u]=tn(l,u)),s=b.cssHooks[n]||b.cssHooks[u],r===t)return s&&"get"in s&&(o=s.get(e,!1,i))!==t?o:l[n];if(a=typeof r,"string"===a&&(o=Jt.exec(r))&&(r=(o[1]+1)*o[2]+parseFloat(b.css(e,n)),a="number"),!(null==r||"number"===a&&isNaN(r)||("number"!==a||b.cssNumber[u]||(r+="px"),b.support.clearCloneStyle||""!==r||0!==n.indexOf("background")||(l[n]="inherit"),s&&"set"in s&&(r=s.set(e,r,i))===t)))try{l[n]=r}catch(c){}}},css:function(e,n,r,i){var o,a,s,u=b.camelCase(n);return n=b.cssProps[u]||(b.cssProps[u]=tn(e.style,u)),s=b.cssHooks[n]||b.cssHooks[u],s&&"get"in s&&(a=s.get(e,!0,r)),a===t&&(a=Wt(e,n,i)),"normal"===a&&n in Kt&&(a=Kt[n]),""===r||r?(o=parseFloat(a),r===!0||b.isNumeric(o)?o||0:a):a},swap:function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i}}),e.getComputedStyle?(Rt=function(t){return e.getComputedStyle(t,null)},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s.getPropertyValue(n)||s[n]:t,l=e.style;return s&&(""!==u||b.contains(e.ownerDocument,e)||(u=b.style(e,n)),Yt.test(u)&&Ut.test(n)&&(i=l.width,o=l.minWidth,a=l.maxWidth,l.minWidth=l.maxWidth=l.width=u,u=s.width,l.width=i,l.minWidth=o,l.maxWidth=a)),u}):o.documentElement.currentStyle&&(Rt=function(e){return e.currentStyle},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s[n]:t,l=e.style;return null==u&&l&&l[n]&&(u=l[n]),Yt.test(u)&&!zt.test(n)&&(i=l.left,o=e.runtimeStyle,a=o&&o.left,a&&(o.left=e.currentStyle.left),l.left="fontSize"===n?"1em":u,u=l.pixelLeft+"px",l.left=i,a&&(o.left=a)),""===u?"auto":u});function on(e,t,n){var r=Vt.exec(t);return r?Math.max(0,r[1]-(n||0))+(r[2]||"px"):t}function an(e,t,n,r,i){var o=n===(r?"border":"content")?4:"width"===t?1:0,a=0;for(;4>o;o+=2)"margin"===n&&(a+=b.css(e,n+Zt[o],!0,i)),r?("content"===n&&(a-=b.css(e,"padding"+Zt[o],!0,i)),"margin"!==n&&(a-=b.css(e,"border"+Zt[o]+"Width",!0,i))):(a+=b.css(e,"padding"+Zt[o],!0,i),"padding"!==n&&(a+=b.css(e,"border"+Zt[o]+"Width",!0,i)));return a}function sn(e,t,n){var r=!0,i="width"===t?e.offsetWidth:e.offsetHeight,o=Rt(e),a=b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,o);if(0>=i||null==i){if(i=Wt(e,t,o),(0>i||null==i)&&(i=e.style[t]),Yt.test(i))return i;r=a&&(b.support.boxSizingReliable||i===e.style[t]),i=parseFloat(i)||0}return i+an(e,t,n||(a?"border":"content"),r,o)+"px"}function un(e){var t=o,n=Gt[e];return n||(n=ln(e,t),"none"!==n&&n||(Pt=(Pt||b("<iframe frameborder='0' width='0' height='0'/>").css("cssText","display:block !important")).appendTo(t.documentElement),t=(Pt[0].contentWindow||Pt[0].contentDocument).document,t.write("<!doctype html><html><body>"),t.close(),n=ln(e,t),Pt.detach()),Gt[e]=n),n}function ln(e,t){var n=b(t.createElement(e)).appendTo(t.body),r=b.css(n[0],"display");return n.remove(),r}b.each(["height","width"],function(e,n){b.cssHooks[n]={get:function(e,r,i){return r?0===e.offsetWidth&&Xt.test(b.css(e,"display"))?b.swap(e,Qt,function(){return sn(e,n,i)}):sn(e,n,i):t},set:function(e,t,r){var i=r&&Rt(e);return on(e,t,r?an(e,n,r,b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,i),i):0)}}}),b.support.opacity||(b.cssHooks.opacity={get:function(e,t){return It.test((t&&e.currentStyle?e.currentStyle.filter:e.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":t?"1":""},set:function(e,t){var n=e.style,r=e.currentStyle,i=b.isNumeric(t)?"alpha(opacity="+100*t+")":"",o=r&&r.filter||n.filter||"";n.zoom=1,(t>=1||""===t)&&""===b.trim(o.replace($t,""))&&n.removeAttribute&&(n.removeAttribute("filter"),""===t||r&&!r.filter)||(n.filter=$t.test(o)?o.replace($t,i):o+" "+i)}}),b(function(){b.support.reliableMarginRight||(b.cssHooks.marginRight={get:function(e,n){return n?b.swap(e,{display:"inline-block"},Wt,[e,"marginRight"]):t}}),!b.support.pixelPosition&&b.fn.position&&b.each(["top","left"],function(e,n){b.cssHooks[n]={get:function(e,r){return r?(r=Wt(e,n),Yt.test(r)?b(e).position()[n]+"px":r):t}}})}),b.expr&&b.expr.filters&&(b.expr.filters.hidden=function(e){return 0>=e.offsetWidth&&0>=e.offsetHeight||!b.support.reliableHiddenOffsets&&"none"===(e.style&&e.style.display||b.css(e,"display"))},b.expr.filters.visible=function(e){return!b.expr.filters.hidden(e)}),b.each({margin:"",padding:"",border:"Width"},function(e,t){b.cssHooks[e+t]={expand:function(n){var r=0,i={},o="string"==typeof n?n.split(" "):[n];for(;4>r;r++)i[e+Zt[r]+t]=o[r]||o[r-2]||o[0];return i}},Ut.test(e)||(b.cssHooks[e+t].set=on)});var cn=/%20/g,pn=/\[\]$/,fn=/\r?\n/g,dn=/^(?:submit|button|image|reset|file)$/i,hn=/^(?:input|select|textarea|keygen)/i;b.fn.extend({serialize:function(){return b.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=b.prop(this,"elements");return e?b.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!b(this).is(":disabled")&&hn.test(this.nodeName)&&!dn.test(e)&&(this.checked||!Nt.test(e))}).map(function(e,t){var n=b(this).val();return null==n?null:b.isArray(n)?b.map(n,function(e){return{name:t.name,value:e.replace(fn,"\r\n")}}):{name:t.name,value:n.replace(fn,"\r\n")}}).get()}}),b.param=function(e,n){var r,i=[],o=function(e,t){t=b.isFunction(t)?t():null==t?"":t,i[i.length]=encodeURIComponent(e)+"="+encodeURIComponent(t)};if(n===t&&(n=b.ajaxSettings&&b.ajaxSettings.traditional),b.isArray(e)||e.jquery&&!b.isPlainObject(e))b.each(e,function(){o(this.name,this.value)});else for(r in e)gn(r,e[r],n,o);return i.join("&").replace(cn,"+")};function gn(e,t,n,r){var i;if(b.isArray(t))b.each(t,function(t,i){n||pn.test(e)?r(e,i):gn(e+"["+("object"==typeof i?t:"")+"]",i,n,r)});else if(n||"object"!==b.type(t))r(e,t);else for(i in t)gn(e+"["+i+"]",t[i],n,r)}b.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(e,t){b.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),b.fn.hover=function(e,t){return this.mouseenter(e).mouseleave(t||e)};var mn,yn,vn=b.now(),bn=/\?/,xn=/#.*$/,wn=/([?&])_=[^&]*/,Tn=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Nn=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Cn=/^(?:GET|HEAD)$/,kn=/^\/\//,En=/^([\w.+-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/,Sn=b.fn.load,An={},jn={},Dn="*/".concat("*");try{yn=a.href}catch(Ln){yn=o.createElement("a"),yn.href="",yn=yn.href}mn=En.exec(yn.toLowerCase())||[];function Hn(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(w)||[];if(b.isFunction(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function qn(e,n,r,i){var o={},a=e===jn;function s(u){var l;return o[u]=!0,b.each(e[u]||[],function(e,u){var c=u(n,r,i);return"string"!=typeof c||a||o[c]?a?!(l=c):t:(n.dataTypes.unshift(c),s(c),!1)}),l}return s(n.dataTypes[0])||!o["*"]&&s("*")}function Mn(e,n){var r,i,o=b.ajaxSettings.flatOptions||{};for(i in n)n[i]!==t&&((o[i]?e:r||(r={}))[i]=n[i]);return r&&b.extend(!0,e,r),e}b.fn.load=function(e,n,r){if("string"!=typeof e&&Sn)return Sn.apply(this,arguments);var i,o,a,s=this,u=e.indexOf(" ");return u>=0&&(i=e.slice(u,e.length),e=e.slice(0,u)),b.isFunction(n)?(r=n,n=t):n&&"object"==typeof n&&(a="POST"),s.length>0&&b.ajax({url:e,type:a,dataType:"html",data:n}).done(function(e){o=arguments,s.html(i?b("<div>").append(b.parseHTML(e)).find(i):e)}).complete(r&&function(e,t){s.each(r,o||[e.responseText,t,e])}),this},b.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){b.fn[t]=function(e){return this.on(t,e)}}),b.each(["get","post"],function(e,n){b[n]=function(e,r,i,o){return b.isFunction(r)&&(o=o||i,i=r,r=t),b.ajax({url:e,type:n,dataType:o,data:r,success:i})}}),b.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:yn,type:"GET",isLocal:Nn.test(mn[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Dn,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText"},converters:{"* text":e.String,"text html":!0,"text json":b.parseJSON,"text xml":b.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?Mn(Mn(e,b.ajaxSettings),t):Mn(b.ajaxSettings,e)},ajaxPrefilter:Hn(An),ajaxTransport:Hn(jn),ajax:function(e,n){"object"==typeof e&&(n=e,e=t),n=n||{};var r,i,o,a,s,u,l,c,p=b.ajaxSetup({},n),f=p.context||p,d=p.context&&(f.nodeType||f.jquery)?b(f):b.event,h=b.Deferred(),g=b.Callbacks("once memory"),m=p.statusCode||{},y={},v={},x=0,T="canceled",N={readyState:0,getResponseHeader:function(e){var t;if(2===x){if(!c){c={};while(t=Tn.exec(a))c[t[1].toLowerCase()]=t[2]}t=c[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return 2===x?a:null},setRequestHeader:function(e,t){var n=e.toLowerCase();return x||(e=v[n]=v[n]||e,y[e]=t),this},overrideMimeType:function(e){return x||(p.mimeType=e),this},statusCode:function(e){var t;if(e)if(2>x)for(t in e)m[t]=[m[t],e[t]];else N.always(e[N.status]);return this},abort:function(e){var t=e||T;return l&&l.abort(t),k(0,t),this}};if(h.promise(N).complete=g.add,N.success=N.done,N.error=N.fail,p.url=((e||p.url||yn)+"").replace(xn,"").replace(kn,mn[1]+"//"),p.type=n.method||n.type||p.method||p.type,p.dataTypes=b.trim(p.dataType||"*").toLowerCase().match(w)||[""],null==p.crossDomain&&(r=En.exec(p.url.toLowerCase()),p.crossDomain=!(!r||r[1]===mn[1]&&r[2]===mn[2]&&(r[3]||("http:"===r[1]?80:443))==(mn[3]||("http:"===mn[1]?80:443)))),p.data&&p.processData&&"string"!=typeof p.data&&(p.data=b.param(p.data,p.traditional)),qn(An,p,n,N),2===x)return N;u=p.global,u&&0===b.active++&&b.event.trigger("ajaxStart"),p.type=p.type.toUpperCase(),p.hasContent=!Cn.test(p.type),o=p.url,p.hasContent||(p.data&&(o=p.url+=(bn.test(o)?"&":"?")+p.data,delete p.data),p.cache===!1&&(p.url=wn.test(o)?o.replace(wn,"$1_="+vn++):o+(bn.test(o)?"&":"?")+"_="+vn++)),p.ifModified&&(b.lastModified[o]&&N.setRequestHeader("If-Modified-Since",b.lastModified[o]),b.etag[o]&&N.setRequestHeader("If-None-Match",b.etag[o])),(p.data&&p.hasContent&&p.contentType!==!1||n.contentType)&&N.setRequestHeader("Content-Type",p.contentType),N.setRequestHeader("Accept",p.dataTypes[0]&&p.accepts[p.dataTypes[0]]?p.accepts[p.dataTypes[0]]+("*"!==p.dataTypes[0]?", "+Dn+"; q=0.01":""):p.accepts["*"]);for(i in p.headers)N.setRequestHeader(i,p.headers[i]);if(p.beforeSend&&(p.beforeSend.call(f,N,p)===!1||2===x))return N.abort();T="abort";for(i in{success:1,error:1,complete:1})N[i](p[i]);if(l=qn(jn,p,n,N)){N.readyState=1,u&&d.trigger("ajaxSend",[N,p]),p.async&&p.timeout>0&&(s=setTimeout(function(){N.abort("timeout")},p.timeout));try{x=1,l.send(y,k)}catch(C){if(!(2>x))throw C;k(-1,C)}}else k(-1,"No Transport");function k(e,n,r,i){var c,y,v,w,T,C=n;2!==x&&(x=2,s&&clearTimeout(s),l=t,a=i||"",N.readyState=e>0?4:0,r&&(w=_n(p,N,r)),e>=200&&300>e||304===e?(p.ifModified&&(T=N.getResponseHeader("Last-Modified"),T&&(b.lastModified[o]=T),T=N.getResponseHeader("etag"),T&&(b.etag[o]=T)),204===e?(c=!0,C="nocontent"):304===e?(c=!0,C="notmodified"):(c=Fn(p,w),C=c.state,y=c.data,v=c.error,c=!v)):(v=C,(e||!C)&&(C="error",0>e&&(e=0))),N.status=e,N.statusText=(n||C)+"",c?h.resolveWith(f,[y,C,N]):h.rejectWith(f,[N,C,v]),N.statusCode(m),m=t,u&&d.trigger(c?"ajaxSuccess":"ajaxError",[N,p,c?y:v]),g.fireWith(f,[N,C]),u&&(d.trigger("ajaxComplete",[N,p]),--b.active||b.event.trigger("ajaxStop")))}return N},getScript:function(e,n){return b.get(e,t,n,"script")},getJSON:function(e,t,n){return b.get(e,t,n,"json")}});function _n(e,n,r){var i,o,a,s,u=e.contents,l=e.dataTypes,c=e.responseFields;for(s in c)s in r&&(n[c[s]]=r[s]);while("*"===l[0])l.shift(),o===t&&(o=e.mimeType||n.getResponseHeader("Content-Type"));if(o)for(s in u)if(u[s]&&u[s].test(o)){l.unshift(s);break}if(l[0]in r)a=l[0];else{for(s in r){if(!l[0]||e.converters[s+" "+l[0]]){a=s;break}i||(i=s)}a=a||i}return a?(a!==l[0]&&l.unshift(a),r[a]):t}function Fn(e,t){var n,r,i,o,a={},s=0,u=e.dataTypes.slice(),l=u[0];if(e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u[1])for(i in e.converters)a[i.toLowerCase()]=e.converters[i];for(;r=u[++s];)if("*"!==r){if("*"!==l&&l!==r){if(i=a[l+" "+r]||a["* "+r],!i)for(n in a)if(o=n.split(" "),o[1]===r&&(i=a[l+" "+o[0]]||a["* "+o[0]])){i===!0?i=a[n]:a[n]!==!0&&(r=o[0],u.splice(s--,0,r));break}if(i!==!0)if(i&&e["throws"])t=i(t);else try{t=i(t)}catch(c){return{state:"parsererror",error:i?c:"No conversion from "+l+" to "+r}}}l=r}return{state:"success",data:t}}b.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(e){return b.globalEval(e),e}}}),b.ajaxPrefilter("script",function(e){e.cache===t&&(e.cache=!1),e.crossDomain&&(e.type="GET",e.global=!1)}),b.ajaxTransport("script",function(e){if(e.crossDomain){var n,r=o.head||b("head")[0]||o.documentElement;return{send:function(t,i){n=o.createElement("script"),n.async=!0,e.scriptCharset&&(n.charset=e.scriptCharset),n.src=e.url,n.onload=n.onreadystatechange=function(e,t){(t||!n.readyState||/loaded|complete/.test(n.readyState))&&(n.onload=n.onreadystatechange=null,n.parentNode&&n.parentNode.removeChild(n),n=null,t||i(200,"success"))},r.insertBefore(n,r.firstChild)},abort:function(){n&&n.onload(t,!0)}}}});var On=[],Bn=/(=)\?(?=&|$)|\?\?/;b.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=On.pop()||b.expando+"_"+vn++;return this[e]=!0,e}}),b.ajaxPrefilter("json jsonp",function(n,r,i){var o,a,s,u=n.jsonp!==!1&&(Bn.test(n.url)?"url":"string"==typeof n.data&&!(n.contentType||"").indexOf("application/x-www-form-urlencoded")&&Bn.test(n.data)&&"data");return u||"jsonp"===n.dataTypes[0]?(o=n.jsonpCallback=b.isFunction(n.jsonpCallback)?n.jsonpCallback():n.jsonpCallback,u?n[u]=n[u].replace(Bn,"$1"+o):n.jsonp!==!1&&(n.url+=(bn.test(n.url)?"&":"?")+n.jsonp+"="+o),n.converters["script json"]=function(){return s||b.error(o+" was not called"),s[0]},n.dataTypes[0]="json",a=e[o],e[o]=function(){s=arguments},i.always(function(){e[o]=a,n[o]&&(n.jsonpCallback=r.jsonpCallback,On.push(o)),s&&b.isFunction(a)&&a(s[0]),s=a=t}),"script"):t});var Pn,Rn,Wn=0,$n=e.ActiveXObject&&function(){var e;for(e in Pn)Pn[e](t,!0)};function In(){try{return new e.XMLHttpRequest}catch(t){}}function zn(){try{return new e.ActiveXObject("Microsoft.XMLHTTP")}catch(t){}}b.ajaxSettings.xhr=e.ActiveXObject?function(){return!this.isLocal&&In()||zn()}:In,Rn=b.ajaxSettings.xhr(),b.support.cors=!!Rn&&"withCredentials"in Rn,Rn=b.support.ajax=!!Rn,Rn&&b.ajaxTransport(function(n){if(!n.crossDomain||b.support.cors){var r;return{send:function(i,o){var a,s,u=n.xhr();if(n.username?u.open(n.type,n.url,n.async,n.username,n.password):u.open(n.type,n.url,n.async),n.xhrFields)for(s in n.xhrFields)u[s]=n.xhrFields[s];n.mimeType&&u.overrideMimeType&&u.overrideMimeType(n.mimeType),n.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");try{for(s in i)u.setRequestHeader(s,i[s])}catch(l){}u.send(n.hasContent&&n.data||null),r=function(e,i){var s,l,c,p;try{if(r&&(i||4===u.readyState))if(r=t,a&&(u.onreadystatechange=b.noop,$n&&delete Pn[a]),i)4!==u.readyState&&u.abort();else{p={},s=u.status,l=u.getAllResponseHeaders(),"string"==typeof u.responseText&&(p.text=u.responseText);try{c=u.statusText}catch(f){c=""}s||!n.isLocal||n.crossDomain?1223===s&&(s=204):s=p.text?200:404}}catch(d){i||o(-1,d)}p&&o(s,c,p,l)},n.async?4===u.readyState?setTimeout(r):(a=++Wn,$n&&(Pn||(Pn={},b(e).unload($n)),Pn[a]=r),u.onreadystatechange=r):r()},abort:function(){r&&r(t,!0)}}}});var Xn,Un,Vn=/^(?:toggle|show|hide)$/,Yn=RegExp("^(?:([+-])=|)("+x+")([a-z%]*)$","i"),Jn=/queueHooks$/,Gn=[nr],Qn={"*":[function(e,t){var n,r,i=this.createTween(e,t),o=Yn.exec(t),a=i.cur(),s=+a||0,u=1,l=20;if(o){if(n=+o[2],r=o[3]||(b.cssNumber[e]?"":"px"),"px"!==r&&s){s=b.css(i.elem,e,!0)||n||1;do u=u||".5",s/=u,b.style(i.elem,e,s+r);while(u!==(u=i.cur()/a)&&1!==u&&--l)}i.unit=r,i.start=s,i.end=o[1]?s+(o[1]+1)*n:n}return i}]};function Kn(){return setTimeout(function(){Xn=t}),Xn=b.now()}function Zn(e,t){b.each(t,function(t,n){var r=(Qn[t]||[]).concat(Qn["*"]),i=0,o=r.length;for(;o>i;i++)if(r[i].call(e,t,n))return})}function er(e,t,n){var r,i,o=0,a=Gn.length,s=b.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;var t=Xn||Kn(),n=Math.max(0,l.startTime+l.duration-t),r=n/l.duration||0,o=1-r,a=0,u=l.tweens.length;for(;u>a;a++)l.tweens[a].run(o);return s.notifyWith(e,[l,o,n]),1>o&&u?n:(s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:b.extend({},t),opts:b.extend(!0,{specialEasing:{}},n),originalProperties:t,originalOptions:n,startTime:Xn||Kn(),duration:n.duration,tweens:[],createTween:function(t,n){var r=b.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;r>n;n++)l.tweens[n].run(1);return t?s.resolveWith(e,[l,t]):s.rejectWith(e,[l,t]),this}}),c=l.props;for(tr(c,l.opts.specialEasing);a>o;o++)if(r=Gn[o].call(l,e,c,l.opts))return r;return Zn(l,c),b.isFunction(l.opts.start)&&l.opts.start.call(e,l),b.fx.timer(b.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always)}function tr(e,t){var n,r,i,o,a;for(i in e)if(r=b.camelCase(i),o=t[r],n=e[i],b.isArray(n)&&(o=n[1],n=e[i]=n[0]),i!==r&&(e[r]=n,delete e[i]),a=b.cssHooks[r],a&&"expand"in a){n=a.expand(n),delete e[r];for(i in n)i in e||(e[i]=n[i],t[i]=o)}else t[r]=o}b.Animation=b.extend(er,{tweener:function(e,t){b.isFunction(e)?(t=e,e=["*"]):e=e.split(" ");var n,r=0,i=e.length;for(;i>r;r++)n=e[r],Qn[n]=Qn[n]||[],Qn[n].unshift(t)},prefilter:function(e,t){t?Gn.unshift(e):Gn.push(e)}});function nr(e,t,n){var r,i,o,a,s,u,l,c,p,f=this,d=e.style,h={},g=[],m=e.nodeType&&nn(e);n.queue||(c=b._queueHooks(e,"fx"),null==c.unqueued&&(c.unqueued=0,p=c.empty.fire,c.empty.fire=function(){c.unqueued||p()}),c.unqueued++,f.always(function(){f.always(function(){c.unqueued--,b.queue(e,"fx").length||c.empty.fire()})})),1===e.nodeType&&("height"in t||"width"in t)&&(n.overflow=[d.overflow,d.overflowX,d.overflowY],"inline"===b.css(e,"display")&&"none"===b.css(e,"float")&&(b.support.inlineBlockNeedsLayout&&"inline"!==un(e.nodeName)?d.zoom=1:d.display="inline-block")),n.overflow&&(d.overflow="hidden",b.support.shrinkWrapBlocks||f.always(function(){d.overflow=n.overflow[0],d.overflowX=n.overflow[1],d.overflowY=n.overflow[2]}));for(i in t)if(a=t[i],Vn.exec(a)){if(delete t[i],u=u||"toggle"===a,a===(m?"hide":"show"))continue;g.push(i)}if(o=g.length){s=b._data(e,"fxshow")||b._data(e,"fxshow",{}),"hidden"in s&&(m=s.hidden),u&&(s.hidden=!m),m?b(e).show():f.done(function(){b(e).hide()}),f.done(function(){var t;b._removeData(e,"fxshow");for(t in h)b.style(e,t,h[t])});for(i=0;o>i;i++)r=g[i],l=f.createTween(r,m?s[r]:0),h[r]=s[r]||b.style(e,r),r in s||(s[r]=l.start,m&&(l.end=l.start,l.start="width"===r||"height"===r?1:0))}}function rr(e,t,n,r,i){return new rr.prototype.init(e,t,n,r,i)}b.Tween=rr,rr.prototype={constructor:rr,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||"swing",this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(b.cssNumber[n]?"":"px")},cur:function(){var e=rr.propHooks[this.prop];return e&&e.get?e.get(this):rr.propHooks._default.get(this)},run:function(e){var t,n=rr.propHooks[this.prop];return this.pos=t=this.options.duration?b.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):rr.propHooks._default.set(this),this}},rr.prototype.init.prototype=rr.prototype,rr.propHooks={_default:{get:function(e){var t;return null==e.elem[e.prop]||e.elem.style&&null!=e.elem.style[e.prop]?(t=b.css(e.elem,e.prop,""),t&&"auto"!==t?t:0):e.elem[e.prop]},set:function(e){b.fx.step[e.prop]?b.fx.step[e.prop](e):e.elem.style&&(null!=e.elem.style[b.cssProps[e.prop]]||b.cssHooks[e.prop])?b.style(e.elem,e.prop,e.now+e.unit):e.elem[e.prop]=e.now}}},rr.propHooks.scrollTop=rr.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},b.each(["toggle","show","hide"],function(e,t){var n=b.fn[t];b.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ir(t,!0),e,r,i)}}),b.fn.extend({fadeTo:function(e,t,n,r){return this.filter(nn).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=b.isEmptyObject(e),o=b.speed(t,n,r),a=function(){var t=er(this,b.extend({},e),o);a.finish=function(){t.stop(!0)},(i||b._data(this,"finish"))&&t.stop(!0)};return a.finish=a,i||o.queue===!1?this.each(a):this.queue(o.queue,a)},stop:function(e,n,r){var i=function(e){var t=e.stop;delete e.stop,t(r)};return"string"!=typeof e&&(r=n,n=e,e=t),n&&e!==!1&&this.queue(e||"fx",[]),this.each(function(){var t=!0,n=null!=e&&e+"queueHooks",o=b.timers,a=b._data(this);if(n)a[n]&&a[n].stop&&i(a[n]);else for(n in a)a[n]&&a[n].stop&&Jn.test(n)&&i(a[n]);for(n=o.length;n--;)o[n].elem!==this||null!=e&&o[n].queue!==e||(o[n].anim.stop(r),t=!1,o.splice(n,1));(t||!r)&&b.dequeue(this,e)})},finish:function(e){return e!==!1&&(e=e||"fx"),this.each(function(){var t,n=b._data(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=b.timers,a=r?r.length:0;for(n.finish=!0,b.queue(this,e,[]),i&&i.cur&&i.cur.finish&&i.cur.finish.call(this),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;a>t;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}});function ir(e,t){var n,r={height:e},i=0;for(t=t?1:0;4>i;i+=2-t)n=Zt[i],r["margin"+n]=r["padding"+n]=e;return t&&(r.opacity=r.width=e),r}b.each({slideDown:ir("show"),slideUp:ir("hide"),slideToggle:ir("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,t){b.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),b.speed=function(e,t,n){var r=e&&"object"==typeof e?b.extend({},e):{complete:n||!n&&t||b.isFunction(e)&&e,duration:e,easing:n&&t||t&&!b.isFunction(t)&&t};return r.duration=b.fx.off?0:"number"==typeof r.duration?r.duration:r.duration in b.fx.speeds?b.fx.speeds[r.duration]:b.fx.speeds._default,(null==r.queue||r.queue===!0)&&(r.queue="fx"),r.old=r.complete,r.complete=function(){b.isFunction(r.old)&&r.old.call(this),r.queue&&b.dequeue(this,r.queue)},r},b.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2}},b.timers=[],b.fx=rr.prototype.init,b.fx.tick=function(){var e,n=b.timers,r=0;for(Xn=b.now();n.length>r;r++)e=n[r],e()||n[r]!==e||n.splice(r--,1);n.length||b.fx.stop(),Xn=t},b.fx.timer=function(e){e()&&b.timers.push(e)&&b.fx.start()},b.fx.interval=13,b.fx.start=function(){Un||(Un=setInterval(b.fx.tick,b.fx.interval))},b.fx.stop=function(){clearInterval(Un),Un=null},b.fx.speeds={slow:600,fast:200,_default:400},b.fx.step={},b.expr&&b.expr.filters&&(b.expr.filters.animated=function(e){return b.grep(b.timers,function(t){return e===t.elem}).length}),b.fn.offset=function(e){if(arguments.length)return e===t?this:this.each(function(t){b.offset.setOffset(this,e,t)});var n,r,o={top:0,left:0},a=this[0],s=a&&a.ownerDocument;if(s)return n=s.documentElement,b.contains(n,a)?(typeof a.getBoundingClientRect!==i&&(o=a.getBoundingClientRect()),r=or(s),{top:o.top+(r.pageYOffset||n.scrollTop)-(n.clientTop||0),left:o.left+(r.pageXOffset||n.scrollLeft)-(n.clientLeft||0)}):o},b.offset={setOffset:function(e,t,n){var r=b.css(e,"position");"static"===r&&(e.style.position="relative");var i=b(e),o=i.offset(),a=b.css(e,"top"),s=b.css(e,"left"),u=("absolute"===r||"fixed"===r)&&b.inArray("auto",[a,s])>-1,l={},c={},p,f;u?(c=i.position(),p=c.top,f=c.left):(p=parseFloat(a)||0,f=parseFloat(s)||0),b.isFunction(t)&&(t=t.call(e,n,o)),null!=t.top&&(l.top=t.top-o.top+p),null!=t.left&&(l.left=t.left-o.left+f),"using"in t?t.using.call(e,l):i.css(l)}},b.fn.extend({position:function(){if(this[0]){var e,t,n={top:0,left:0},r=this[0];return"fixed"===b.css(r,"position")?t=r.getBoundingClientRect():(e=this.offsetParent(),t=this.offset(),b.nodeName(e[0],"html")||(n=e.offset()),n.top+=b.css(e[0],"borderTopWidth",!0),n.left+=b.css(e[0],"borderLeftWidth",!0)),{top:t.top-n.top-b.css(r,"marginTop",!0),left:t.left-n.left-b.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent||o.documentElement;while(e&&!b.nodeName(e,"html")&&"static"===b.css(e,"position"))e=e.offsetParent;return e||o.documentElement})}}),b.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,n){var r=/Y/.test(n);b.fn[e]=function(i){return b.access(this,function(e,i,o){var a=or(e);return o===t?a?n in a?a[n]:a.document.documentElement[i]:e[i]:(a?a.scrollTo(r?b(a).scrollLeft():o,r?o:b(a).scrollTop()):e[i]=o,t)},e,i,arguments.length,null)}});function or(e){return b.isWindow(e)?e:9===e.nodeType?e.defaultView||e.parentWindow:!1}b.each({Height:"height",Width:"width"},function(e,n){b.each({padding:"inner"+e,content:n,"":"outer"+e},function(r,i){b.fn[i]=function(i,o){var a=arguments.length&&(r||"boolean"!=typeof i),s=r||(i===!0||o===!0?"margin":"border");return b.access(this,function(n,r,i){var o;return b.isWindow(n)?n.document.documentElement["client"+e]:9===n.nodeType?(o=n.documentElement,Math.max(n.body["scroll"+e],o["scroll"+e],n.body["offset"+e],o["offset"+e],o["client"+e])):i===t?b.css(n,r,s):b.style(n,r,i,s)},n,a?i:t,a,null)}})}),e.jQuery=e.$=b,"function"==typeof define&&define.amd&&define.amd.jQuery&&define("jquery",[],function(){return b})})(window);
\ No newline at end of file
diff --git a/doc/sdk/cli/index.html.textile.liquid b/doc/sdk/cli/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..3d44250
--- /dev/null
@@ -0,0 +1,17 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "Overview"
+
+...
+
+The @arv@ CLI tool provides a set of wrappers to make API calls. Additionally, it provides access to a number of subcommands.
+
+h3. Wrappers for API calls
+
+See the "arv reference":{{site.baseurl}}/sdk/cli/reference.html page.
+
+h3. Subcommands
+
+See the "arv subcommands":{{site.baseurl}}/sdk/cli/subcommands.html page.
diff --git a/doc/sdk/cli/install.html.textile.liquid b/doc/sdk/cli/install.html.textile.liquid
new file mode 100644 (file)
index 0000000..df55077
--- /dev/null
@@ -0,0 +1,40 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "Installation"
+
+...
+
+To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.
+
+h4. Prerequisites: Ruby &gt;= 2.1.0 and curl libraries
+
+Make sure you have "Ruby and bundler":{{site.baseurl}}/install/install-manual-prerequisites-ruby.html installed.
+
+Install curl libraries with your system's package manager. For example, on Debian or Ubuntu:
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo apt-get install libcurl3 libcurl3-gnutls libcurl4-openssl-dev</code>
+</pre>
+</notextile>
+
+h4. Option 1: install with RubyGems
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo gem install arvados-cli</code>
+</pre>
+</notextile>
+
+h4. Option 2: build and install from source
+
+<notextile>
+<pre>
+$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+$ <code class="userinput">cd arvados/sdk/cli</code>
+$ <code class="userinput">gem build arvados-cli.gemspec</code>
+$ <code class="userinput">sudo gem install arvados-cli-*.gem</code>
+</pre>
+</notextile>
diff --git a/doc/sdk/cli/reference.html.textile.liquid b/doc/sdk/cli/reference.html.textile.liquid
new file mode 100644 (file)
index 0000000..bc5cf1e
--- /dev/null
@@ -0,0 +1,80 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "arv reference"
+...
+
+_In order to use the @arv@ command, make sure that you have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html_
+
+h3. Usage
+
+@arv [global_options] resource_type resource_method [method_parameters]@
+
+h4. Global options
+
+- @--format=json@ := Output response as JSON. This is the default format.
+
+- @--format=yaml@ := Output response as YAML
+
+- @--format=uuid@ := Output only the UUIDs of object(s) in the API response, one per line.
+
+
+h3. Resource types and methods
+
+Get list of resource types
+@arv --resources@
+
+Get list of resource methods for the "user" resource type
+@arv user --help@
+
+
+h3. Basic examples
+
+Get record for current user
+@arv user current@
+
+Get entire record for some specific user
+@arv user get --uuid 6dnxa-tpzed-iimd25zhzh84gbk@
+
+Update user record
+@arv user update --uuid 6dnxa-tpzed-iimd25zhzh84gbk --first-name "Bob"@
+
+Get list of groups
+@arv group list@
+
+Delete a group
+@arv group delete --uuid 6dnxa-j7d0g-iw7i6n43d37jtog@
+
+
+h3. Common commands
+
+Most @arv@ resources accept the following commands:
+
+* @get@
+* @list@
+* @create@
+* @update@
+* @delete@
+
+
+h4. @list@
+
+Arguments accepted by the @list@ subcommand include:
+
+<pre>
+    --limit, -l <i>:     Maximum number of resources to return.
+   --offset, -o <i>:     Number of users to skip before first returned record.
+  --filters, -f <s>:     Conditions for filtering users.
+    --order, -r <s>:     Order in which to return matching users.
+   --select, -s <s>:     Select which fields to return
+     --distinct, -d:     Return each distinct object
+</pre>
+
+The @--filters@ option takes a string describing a JSON list of filters on which the returned resources should be returned. Each filter is a three-element list of _[field, operator, value]_, where the _operator_ may be one of @=@, @<@, @<=@, @>@, @>=@, @!=@, @like@, or @ilike@.
+
+Example:
+
+@arv collection list --filters '[["name", "=", "PGP VAR inputs"], ["created_at", ">=", "2014-10-01"]]'@
+
+will return a list of all collections visible to the current user which are named "PGP VAR inputs" and were created on or after October 1, 2014.
diff --git a/doc/sdk/cli/subcommands.html.textile.liquid b/doc/sdk/cli/subcommands.html.textile.liquid
new file mode 100644 (file)
index 0000000..5d82f7a
--- /dev/null
@@ -0,0 +1,400 @@
+---
+layout: default
+navsection: sdk
+navmenu: CLI
+title: "arv subcommands"
+
+...
+
+_In order to use the @arv@ command, make sure that you have a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html_
+
+h3(#arv-create). arv create
+
+@arv create@ can be used to create Arvados objects from the command line. Arv create opens up the editor of your choice (set the EDITOR environment variable) and allows you to type or paste a json or yaml description. When saved the object will be created on the API server, if it passes validation.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv create --help</code>
+Options:
+  --project-uuid, -p &lt;s&gt;:   Project uuid in which to create the object
+              --help, -h:   Show this message
+</pre>
+</notextile>
+
+h3(#arv-edit). arv edit
+
+@arv edit@ can be used to edit Arvados objects from the command line. Arv edit opens up the editor of your choice (set the EDITOR environment variable) with the json or yaml description of the object. Saving the file will update the Arvados object on the API server, if it passes validation.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv edit --help</code>
+Arvados command line client
+Usage: arv edit [uuid] [fields...]
+
+Fetch the specified Arvados object, select the specified fields,
+open an interactive text editor on a text representation (json or
+yaml, use --format) and then update the object.  Will use 'nano'
+by default, customize with the EDITOR or VISUAL environment variable.
+</pre>
+</notextile>
+
+h3(#arv-copy). arv copy
+
+@arv copy@ can be used to copy a pipeline instance, template or collection from one Arvados instance to another. It takes care of copying the object and all its dependencies.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv copy --help</code>
+usage: arv-copy [-h] [-v] [--progress] [--no-progress] [-f] --src
+                SOURCE_ARVADOS --dst DESTINATION_ARVADOS [--recursive]
+                [--no-recursive] [--dst-git-repo DST_GIT_REPO]
+                [--project-uuid PROJECT_UUID] [--retries RETRIES]
+                object_uuid
+
+Copy a pipeline instance, template or collection from one Arvados instance to
+another.
+
+positional arguments:
+  object_uuid           The UUID of the object to be copied.
+
+optional arguments:
+  -h, --help            show this help message and exit
+  -v, --verbose         Verbose output.
+  --progress            Report progress on copying collections. (default)
+  --no-progress         Do not report progress on copying collections.
+  -f, --force           Perform copy even if the object appears to exist at
+                        the remote destination.
+  --src SOURCE_ARVADOS  The name of the source Arvados instance (required).
+                        May be either a pathname to a config file, or the
+                        basename of a file in
+                        $HOME/.config/arvados/instance_name.conf.
+  --dst DESTINATION_ARVADOS
+                        The name of the destination Arvados instance
+                        (required). May be either a pathname to a config file,
+                        or the basename of a file in
+                        $HOME/.config/arvados/instance_name.conf.
+  --recursive           Recursively copy any dependencies for this object.
+                        (default)
+  --no-recursive        Do not copy any dependencies. NOTE: if this option is
+                        given, the copied object will need to be updated
+                        manually in order to be functional.
+  --dst-git-repo DST_GIT_REPO
+                        The name of the destination git repository. Required
+                        when copying a pipeline recursively.
+  --project-uuid PROJECT_UUID
+                        The UUID of the project at the destination to which
+                        the pipeline should be copied.
+  --retries RETRIES     Maximum number of times to retry server requests that
+                        encounter temporary failures (e.g., server down).
+                        Default 3.
+</pre>
+</notextile>
+
+h3(#arv-tag). arv tag
+
+@arv tag@ is used to tag Arvados objects.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv tag --help</code>
+
+Usage:
+arv tag add tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]
+arv tag remove tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]
+arv tag remove --all
+
+  --dry-run, -n:   Don't actually do anything
+  --verbose, -v:   Print some things on stderr
+     --uuid, -u:   Return the UUIDs of the objects in the response, one per
+                   line (default)
+     --json, -j:   Return the entire response received from the API server, as
+                   a JSON object
+    --human, -h:   Return the response received from the API server, as a JSON
+                   object with whitespace added for human consumption
+   --pretty, -p:   Synonym of --human
+     --yaml, -y:   Return the response received from the API server, in YAML
+                   format
+     --help, -e:   Show this message
+</pre>
+</notextile>
+
+
+h3(#arv-ws). arv ws
+
+@arv ws@ provides access to the websockets event stream.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv ws --help</code>
+usage: arv-ws [-h] [-u UUID] [-f FILTERS]
+              [--poll-interval POLL_INTERVAL | --no-poll]
+              [-p PIPELINE | -j JOB]
+
+optional arguments:
+  -h, --help            show this help message and exit
+  -u UUID, --uuid UUID  Filter events on object_uuid
+  -f FILTERS, --filters FILTERS
+                        Arvados query filter to apply to log events (JSON
+                        encoded)
+  --poll-interval POLL_INTERVAL
+                        If websockets is not available, specify the polling
+                        interval, default is every 15 seconds
+  --no-poll             Do not poll if websockets are not available, just fail
+  -p PIPELINE, --pipeline PIPELINE
+                        Supply pipeline uuid, print log output from pipeline
+                        and its jobs
+  -j JOB, --job JOB     Supply job uuid, print log output from jobs
+</pre>
+</notextile>
+
+h3(#arv-keep). arv keep
+
+@arv keep@ provides access to the Keep storage service.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep --help</code>
+Usage: arv keep [method] [--parameters]
+Use 'arv keep [method] --help' to get more information about specific methods.
+
+Available methods: ls, get, put, less, check, docker
+</pre>
+</notextile>
+
+h3(#arv-keep-ls). arv keep ls
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep ls --help</code>
+usage: arv-ls [-h] [--retries RETRIES] [-s] locator
+
+List contents of a manifest
+
+positional arguments:
+  locator            Collection UUID or locator
+
+optional arguments:
+  -h, --help         show this help message and exit
+  --retries RETRIES  Maximum number of times to retry server requests that
+                     encounter temporary failures (e.g., server down). Default
+                     3.
+  -s                 List file sizes, in KiB.
+</pre>
+</notextile>
+
+h3(#arv-keep-get). arv keep get
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep get --help</code>
+usage: arv-get [-h] [--retries RETRIES]
+               [--progress | --no-progress | --batch-progress]
+               [--hash HASH | --md5sum] [-n] [-r] [-f | --skip-existing]
+               locator [destination]
+
+Copy data from Keep to a local file or pipe.
+
+positional arguments:
+  locator            Collection locator, optionally with a file path or
+                     prefix.
+  destination        Local file or directory where the data is to be written.
+                     Default: /dev/stdout.
+
+optional arguments:
+  -h, --help         show this help message and exit
+  --retries RETRIES  Maximum number of times to retry server requests that
+                     encounter temporary failures (e.g., server down). Default
+                     3.
+  --progress         Display human-readable progress on stderr (bytes and, if
+                     possible, percentage of total data size). This is the
+                     default behavior when it is not expected to interfere
+                     with the output: specifically, stderr is a tty _and_
+                     either stdout is not a tty, or output is being written to
+                     named files rather than stdout.
+  --no-progress      Do not display human-readable progress on stderr.
+  --batch-progress   Display machine-readable progress on stderr (bytes and,
+                     if known, total data size).
+  --hash HASH        Display the hash of each file as it is read from Keep,
+                     using the given hash algorithm. Supported algorithms
+                     include md5, sha1, sha224, sha256, sha384, and sha512.
+  --md5sum           Display the MD5 hash of each file as it is read from
+                     Keep.
+  -n                 Do not write any data -- just read from Keep, and report
+                     md5sums if requested.
+  -r                 Retrieve all files in the specified collection/prefix.
+                     This is the default behavior if the "locator" argument
+                     ends with a forward slash.
+  -f                 Overwrite existing files while writing. The default
+                     behavior is to refuse to write *anything* if any of the
+                     output files already exist. As a special case, -f is not
+                     needed to write to /dev/stdout.
+  --skip-existing    Skip files that already exist. The default behavior is to
+                     refuse to write *anything* if any files exist that would
+                     have to be overwritten. This option causes even devices,
+                     sockets, and fifos to be skipped.
+</pre>
+</notextile>
+
+h3(#arv-keep-put). arv keep put
+
+<notextile>
+<pre>
+$ <code class="userinput">arv keep put --help</code>
+usage: arv-put [-h] [--max-manifest-depth N | --normalize]
+               [--as-stream | --stream | --as-manifest | --in-manifest | --manifest | --as-raw | --raw]
+               [--use-filename FILENAME] [--filename FILENAME]
+               [--portable-data-hash] [--project-uuid UUID] [--name NAME]
+               [--progress | --no-progress | --batch-progress]
+               [--resume | --no-resume] [--retries RETRIES]
+               [path [path ...]]
+
+Copy data from the local filesystem to Keep.
+
+positional arguments:
+  path                  Local file or directory. Default: read from standard
+                        input.
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --max-manifest-depth N
+                        Maximum depth of directory tree to represent in the
+                        manifest structure. A directory structure deeper than
+                        this will be represented as a single stream in the
+                        manifest. If N=0, the manifest will contain a single
+                        stream. Default: -1 (unlimited), i.e., exactly one
+                        manifest stream per filesystem directory that contains
+                        files.
+  --normalize           Normalize the manifest by re-ordering files and
+                        streams after writing data.
+  --as-stream           Synonym for --stream.
+  --stream              Store the file content and display the resulting
+                        manifest on stdout. Do not write the manifest to Keep
+                        or save a Collection object in Arvados.
+  --as-manifest         Synonym for --manifest.
+  --in-manifest         Synonym for --manifest.
+  --manifest            Store the file data and resulting manifest in Keep,
+                        save a Collection object in Arvados, and display the
+                        manifest locator (Collection uuid) on stdout. This is
+                        the default behavior.
+  --as-raw              Synonym for --raw.
+  --raw                 Store the file content and display the data block
+                        locators on stdout, separated by commas, with a
+                        trailing newline. Do not store a manifest.
+  --use-filename FILENAME
+                        Synonym for --filename.
+  --filename FILENAME   Use the given filename in the manifest, instead of the
+                        name of the local file. This is useful when "-" or
+                        "/dev/stdin" is given as an input file. It can be used
+                        only if there is exactly one path given and it is not
+                        a directory. Implies --manifest.
+  --portable-data-hash  Print the portable data hash instead of the Arvados
+                        UUID for the collection created by the upload.
+  --project-uuid UUID   Store the collection in the specified project, instead
+                        of your Home project.
+  --name NAME           Save the collection with the specified name.
+  --progress            Display human-readable progress on stderr (bytes and,
+                        if possible, percentage of total data size). This is
+                        the default behavior when stderr is a tty.
+  --no-progress         Do not display human-readable progress on stderr, even
+                        if stderr is a tty.
+  --batch-progress      Display machine-readable progress on stderr (bytes
+                        and, if known, total data size).
+  --resume              Continue interrupted uploads from cached state
+                        (default).
+  --no-resume           Do not continue interrupted uploads from cached state.
+  --retries RETRIES     Maximum number of times to retry server requests that
+                        encounter temporary failures (e.g., server down).
+                        Default 3.
+</pre>
+</notextile>
+
+
+h3(#arv-pipeline-run). arv pipeline run
+
+@arv pipeline run@ can be used to start a pipeline run from the command line.
+
+The User Guide has a page with a bit more information on "using arv pipeline run":{{site.baseurl}}/user/topics/running-pipeline-command-line.html.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv pipeline run --help</code>
+Options:
+        --dry-run, -n:   Do not start any new jobs or wait for existing jobs to
+                         finish. Just find out whether jobs are finished,
+                         queued, or running for each component.
+    --status-text &lt;s&gt;:   Store plain text status in given file. (Default:
+                         /dev/stdout)
+    --status-json &lt;s&gt;:   Store json-formatted pipeline in given file. (Default:
+                         /dev/null)
+            --no-wait:   Do not wait for jobs to finish. Just look up status,
+                         submit new jobs if needed, and exit.
+           --no-reuse:   Do not reuse existing jobs to satisfy pipeline
+                         components. Submit a new job for every component.
+          --debug, -d:   Print extra debugging information on stderr.
+    --debug-level &lt;i&gt;:   Set debug verbosity level.
+       --template &lt;s&gt;:   UUID of pipeline template, or path to local pipeline
+                         template file.
+       --instance &lt;s&gt;:   UUID of pipeline instance.
+             --submit:   Submit the pipeline instance to the server, and exit.
+                         Let the Crunch dispatch service satisfy the components
+                         by finding/running jobs.
+  --run-pipeline-here:   Manage the pipeline instance in-process. Submit jobs
+                         to Crunch as needed. Do not exit until the pipeline
+                         finishes (or fails).
+      --run-jobs-here:   Run jobs in the local terminal session instead of
+                         submitting them to Crunch. Implies
+                         --run-pipeline-here. Note: this results in a
+                         significantly different job execution environment, and
+                         some Crunch features are not supported. It can be
+                         necessary to modify a pipeline in order to make it run
+                         this way.
+           --run-here:   Synonym for --run-jobs-here.
+    --description &lt;s&gt;:   Description for the pipeline instance.
+        --version, -v:   Print version and exit
+           --help, -h:   Show this message
+</pre>
+</notextile>
+
+h3(#arv-run). arv run
+
+The @arv-run@ command creates Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvados compute nodes.
+
+The User Guide has a page on "using arv-run":{{site.baseurl}}/user/topics/arv-run.html.
+
+<notextile>
+<pre>
+$ <code class="userinput">arv run --help</code>
+usage: arv-run [-h] [--retries RETRIES] [--dry-run] [--local]
+               [--docker-image DOCKER_IMAGE] [--ignore-rcode] [--no-reuse]
+               [--no-wait] [--project-uuid PROJECT_UUID] [--git-dir GIT_DIR]
+               [--repository REPOSITORY] [--script-version SCRIPT_VERSION]
+               ...
+
+positional arguments:
+  args
+
+optional arguments:
+  -h, --help            show this help message and exit
+  --retries RETRIES     Maximum number of times to retry server requests that
+                        encounter temporary failures (e.g., server down).
+                        Default 3.
+  --dry-run             Print out the pipeline that would be submitted and
+                        exit
+  --local               Run locally using arv-run-pipeline-instance
+  --docker-image DOCKER_IMAGE
+                        Docker image to use, default arvados/jobs
+  --ignore-rcode        Commands that return non-zero return codes should not
+                        be considered failed.
+  --no-reuse            Do not reuse past jobs.
+  --no-wait             Do not wait and display logs after submitting command,
+                        just exit.
+  --project-uuid PROJECT_UUID
+                        Parent project of the pipeline
+  --git-dir GIT_DIR     Git repository passed to arv-crunch-job when using
+                        --local
+  --repository REPOSITORY
+                        repository field of component, default 'arvados'
+  --script-version SCRIPT_VERSION
+                        script_version field of component, default 'master'
+</pre>
+</notextile>
diff --git a/doc/sdk/go/index.html.textile.liquid b/doc/sdk/go/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..58446a9
--- /dev/null
@@ -0,0 +1,25 @@
+---
+layout: default
+navsection: sdk
+navmenu: Go
+title: "Go SDK"
+
+...
+
+The Go ("Golang":http://golang.org) SDK provides a generic set of wrappers so you can make API calls easily.
+
+h3. Installation
+
+You don't need to install anything. Just import the client like this. The go tools will fetch the relevant code and dependencies for you.
+
+<notextile>{% code 'example_sdk_go_imports' as go %}</notextile>
+
+If you need pre-release client code, you can use the latest version from the repo by following "these instructions.":https://arvados.org/projects/arvados/wiki/Go#Using-Go-with-Arvados
+
+h3. Example
+
+You can save this source as a .go file and run it:
+
+<notextile>{% code 'example_sdk_go' as go %}</notextile>
+
+A few more usage examples can be found in the "services/keepproxy":https://arvados.org/projects/arvados/repository/revisions/master/show/services/keepproxy and "sdk/go/keepclient":https://arvados.org/projects/arvados/repository/revisions/master/show/sdk/go/keepclient directories in the arvados source tree.
diff --git a/doc/sdk/index.html.textile.liquid b/doc/sdk/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..db5d6f1
--- /dev/null
@@ -0,0 +1,19 @@
+---
+layout: default
+navsection: sdk
+title: "Arvados SDK Reference"
+...
+
+This section documents how to access the Arvados API and Keep using various programming languages.
+
+* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
+* "Perl SDK":{{site.baseurl}}/sdk/perl/index.html
+* "Ruby SDK":{{site.baseurl}}/sdk/ruby/index.html
+* "Java SDK":{{site.baseurl}}/sdk/java/index.html
+* "Go SDK":{{site.baseurl}}/sdk/go/index.html
+* "Command line SDK":{{site.baseurl}}/sdk/cli/index.html ("arv")
+
+SDKs not yet implemented:
+
+* Rails SDK: Workbench uses an ActiveRecord-like interface to Arvados. This hasn't yet been extracted from Workbench and packaged as a gem.
+* R: We plan to support this, but it has not been implemented yet.
diff --git a/doc/sdk/java/index.html.textile.liquid b/doc/sdk/java/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..11b1172
--- /dev/null
@@ -0,0 +1,140 @@
+---
+layout: default
+navsection: sdk
+navmenu: Java
+title: "Java SDK"
+
+...
+
+The Java SDK provides a generic set of wrappers so you can make API calls in java.
+
+h3. Introdution
+
+* The Java SDK requires Java 6 or later
+  
+* The Java SDK is implemented as a maven project. Hence, you would need a working
+maven environment to be able to build the source code. If you do not have maven setup,
+you may find the "Maven in 5 Minutes":http://maven.apache.org/guides/getting-started/maven-in-five-minutes.html link useful. 
+
+* In this document $ARVADOS_HOME is used to refer to the directory where
+arvados code is cloned in your system. For ex: $ARVADOS_HOME = $HOME/arvados
+
+
+h3. Setting up the environment
+
+* The SDK requires a running Arvados API server. The following information
+         about the API server needs to be passed to the SDK using environment
+         variables or during the construction of the Arvados instance.
+
+<notextile>
+<pre>
+ARVADOS_API_TOKEN: API client token to be used to authorize with API server.
+
+ARVADOS_API_HOST: Host name of the API server.
+
+ARVADOS_API_HOST_INSECURE: Set this to true if you are using self-signed
+    certificates and would like to bypass certificate validations.
+</pre>
+</notextile>
+
+* Please see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for full details.
+         
+
+h3. Building the Arvados SDK
+
+<notextile>
+<pre>
+$ <code class="userinput">cd $ARVADOS_HOME/sdk/java</code>
+
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+  This will generate arvados sdk jar file in the target directory
+</pre>
+</notextile>
+
+
+h3. Implementing your code to use SDK
+
+* The following two sample programs serve as sample implementations using the SDK.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExample.java</code> is a simple program
+        that makes a few calls to API server.
+<code class="userinput">$ARVADOS_HOME/sdk/java/ArvadosSDKJavaExampleWithPrompt.java</code> can be
+        used to make calls to API server interactively.
+
+Please use these implementations to see how you would want use the SDK from your java program.
+
+Also, refer to <code class="userinput">$ARVADOS_HOME/arvados/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java</code>
+for more sample API invocation examples.
+
+Below are the steps to compile and run these java program.
+
+* These programs create an instance of Arvados SDK class and use it to
+make various <code class="userinput">call</code> requests.
+
+* To compile the examples
+<notextile>
+<pre>
+$ <code class="userinput">javac -cp $ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExample*.java</code>
+This results in the generation of the ArvadosSDKJavaExample*.class files
+in the same directory as the java files
+</pre>
+</notextile>
+
+* To run the samples
+<notextile>
+<pre>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExample</code>
+$ <code class="userinput">java -cp .:$ARVADOS_HOME/sdk/java/target/arvados-sdk-1.0-jar-with-dependencies.jar \
+ArvadosSDKJavaExampleWithPrompt</code>
+</pre>
+</notextile>
+
+
+h3. Viewing and Managing SDK logging
+
+* SDK uses log4j logging
+
+* The default location of the log file is
+  <code class="userinput">$ARVADOS_HOME/sdk/java/log/arvados_sdk_java.log</code>
+
+* Update <code class="userinput">log4j.properties</code> file to change name and location of the log file.
+
+<notextile>
+<pre>
+$ <code class="userinput">nano $ARVADOS_HOME/sdk/java/src/main/resources/log4j.properties</code>
+and modify the <code class="userinput">log4j.appender.fileAppender.File</code> property as needed.
+
+Rebuild the SDK:
+$ <code class="userinput">mvn -Dmaven.test.skip=true clean package</code>
+</pre>
+</notextile>
+
+
+h3. Using the SDK in eclipse
+
+* To develop in eclipse, you can use the provided <code class="userinput">eclipse project</code>
+
+* Install "m2eclipse":https://www.eclipse.org/m2e/ plugin in your eclipse
+
+* Set <code class="userinput">M2_REPO</code> classpath variable in eclipse to point to your local repository.
+The local repository is usually located in your home directory at <code class="userinput">$HOME/.m2/repository</code>.
+
+<notextile>
+<pre>
+In Eclipse IDE:
+Window -> Preferences -> Java -> Build Path -> Classpath Variables
+    Click on the "New..." button and add a new 
+    M2_REPO variable and set it to your local Maven repository
+</pre>
+</notextile>
+
+
+* Open the SDK project in eclipse
+<notextile>
+<pre>
+In Eclipse IDE:
+File -> Import -> Existing Projects into Workspace -> Next -> Browse
+    and select $ARVADOS_HOME/sdk/java
+</pre>
+</notextile>
diff --git a/doc/sdk/perl/index.html.textile.liquid b/doc/sdk/perl/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..448cbb1
--- /dev/null
@@ -0,0 +1,99 @@
+---
+layout: default
+navsection: sdk
+navmenu: Perl
+title: "Perl SDK"
+
+...
+
+The Perl SDK provides a generic set of wrappers so you can make API calls easily.
+
+It should be treated as alpha/experimental. Currently, limitations include:
+* Verbose syntax.
+* No native Keep client.
+* No CPAN package.
+
+h3. Installation
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo apt-get install libjson-perl libio-socket-ssl-perl libwww-perl libipc-system-simple-perl</code>
+$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+$ <code class="userinput">cd arvados/sdk/perl</code>
+$ <code class="userinput">perl Makefile.PL</code>
+$ <code class="userinput">sudo make install</code>
+</pre>
+</notextile>
+
+h4. Test installation
+
+If the SDK is installed, @perl -MArvados -e ''@ should produce no errors.
+
+If your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), the following test script should work:
+
+<notextile>
+<pre>$ <code class="userinput">perl &lt;&lt;'EOF'
+use Arvados;
+my $arv = Arvados-&gt;new('apiVersion' => 'v1');
+my $me = $arv-&gt;{'users'}-&gt;{'current'}-&gt;execute;
+print ("arvados.v1.users.current.full_name = '", $me-&gt;{'full_name'}, "'\n");
+EOF</code>
+arvados.v1.users.current.full_name = 'Your Name'
+</pre>
+</notextile>
+
+h3. Examples
+
+Set up an API client user agent:
+
+<notextile>
+<pre><code class="userinput">my $arv = Arvados->new('apiVersion' => 'v1');
+</code></pre>
+</notextile>
+
+Get the User object for the current user:
+
+<notextile>
+<pre><code class="userinput">my $current_user = $arv->{'users'}->{'current'}->execute;
+</code></pre>
+</notextile>
+
+Get the UUID of an object that was retrieved using the SDK:
+
+<notextile>
+<pre><code class="userinput">my $current_user_uuid = $current_user->{'uuid'}
+</code></pre>
+</notextile>
+
+Retrieve an object by ID:
+
+<notextile>
+<pre><code class="userinput">my $some_user = $arv->{'users'}->{'get'}->execute('uuid' => $current_user_uuid);
+</code></pre>
+</notextile>
+
+Create an object:
+
+<notextile>
+<pre><code class="userinput">my $test_link = $arv->{'links'}->{'create'}->execute('link' => { 'link_class' => 'test', 'name' => 'test' });
+</code></pre>
+</notextile>
+
+Update an object:
+
+<notextile>
+<pre><code class="userinput">my $test_link = $arv->{'links'}->{'update'}->execute(
+        'uuid' => $test_link->{'uuid'},
+        'link' => { 'properties' => { 'foo' => 'bar' } });
+</code></pre>
+</notextile>
+
+Get a list of objects:
+
+<notextile>
+<pre><code class="userinput">my $repos = $arv->{'repositories'}->{'list'}->execute;
+print ("UUID of first repo returned is ", $repos->{'items'}->[0], "\n");
+</code></pre>
+</notextile>
+
+The SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.
diff --git a/doc/sdk/python/crunch-utility-libraries.html.textile.liquid b/doc/sdk/python/crunch-utility-libraries.html.textile.liquid
new file mode 100644 (file)
index 0000000..e7f3603
--- /dev/null
@@ -0,0 +1,223 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: "Crunch utility libraries"
+
+...
+
+Several utility libraries are included with Arvados. They are intended to make it quicker and easier to write your own crunch scripts.
+
+* "Python SDK extras":#pythonsdk
+* "Toolkit wrappers":#toolkit_wrappers
+
+h2(#pythonsdk). Python SDK extras
+
+The Python SDK adds some convenience features that are particularly useful in crunch scripts, in addition to the standard set of API calls.
+
+In a crunch job, the environment variables @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ will be set up so the job has the privileges of the user who submitted the job.
+
+<pre>
+import arvados
+
+my_user = arvados.api().users().current().execute()
+my_uuid = my_user['uuid']
+</pre>
+
+h3. Get the current job and task parameters
+
+@arvados.current_job()@ and @arvados.current_task()@ are convenient ways to retrieve the current Job and Task, using the @JOB_UUID@ and @TASK_UUID@ environment variables provided to each crunch task process.
+
+<pre>
+this_job = arvados.current_job()
+this_task = arvados.current_task()
+this_job_input = this_job['script_parameters']['input']
+this_task_input = this_task['parameters']['input']
+</pre>
+
+h3(#one_task_per_input). Queue a task for each input file
+
+A common pattern for a crunch job is to run one task to scan the input, and one task per input file to do the work.
+
+The @one_task_per_input_file()@ function implements this pattern. Pseudocode:
+
+<pre>
+if this is the job's first (default) task:
+    for each file in the 'input' collection:
+        queue a new task, with parameters['input'] = file
+    exit
+else:
+    return
+</pre>
+
+Usage:
+
+<pre>
+import arvados
+arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
+
+# Now do the work on a single file
+my_input = this_task['parameters']['input']
+</pre>
+
+h3. Set the current task's output and success flag
+
+Each task in a crunch job must make an API call to record its output and set its @success@ attribute to True. The object returned by @current_task()@ has a @set_output()@ method to make the process more succinct.
+
+<pre>
+arvados.current_task().set_output(my_output_locator)
+</pre>
+
+h3. arvados_ipc.py
+
+Manage child processes and FIFOs (pipes).
+
+
+This module makes it easier to check the exit status of every child process you start, and close the unused end of each FIFO at the appropriate time.
+
+<pre>
+from arvados_ipc import *
+
+children = {}
+pipes = {}
+
+pipe_setup(pipes, 'hellopipe')
+if 0 == named_fork(children, 'child_a'):
+    pipe_closeallbut(pipes, ('hellopipe', 'w'))
+    os.write(pipes['hellopipe', 'w'], "Hello, parent.")
+    os._exit(0)
+
+pipe_closeallbut(pipes, ('hellopipe', 'r'))
+with os.fdopen(pipes['hellopipe', 'r'], 'rb') as f:
+    message = f.read()
+    sys.stderr.write("Child says: " + message + "\n")
+
+if not waitpid_and_check_children(children):
+    raise Exception("Child process exited non-zero.")
+</pre>
+
+The "crunch scripts" included with Arvados include some more examples of using the arvados_ipc module.
+
+h2(#toolkit_wrappers). Toolkit wrappers
+
+The following *arvados-&lowast;.py* modules provide "extract, build, run" helpers to make it easy to incorporate common analysis tools in your crunch scripts.
+
+h3. arvados_bwa.py
+
+Build and run the "bwa":http://bio-bwa.sourceforge.net/bwa.shtml program.
+
+The module retrieves the bwa source code from Keep, using the job's @bwa_tbz@ parameter.
+
+<pre>
+import arvados_bwa
+arvados_bwa.run('aln', [ref_basename, '-'],
+                stdin=open(fastq_filename,'rb'),
+                stdout=open(aln_filename,'wb'))
+</pre>
+
+On qr1hi.arvadosapi.com, the source distribution @bwa-0.7.5a.tar.bz2@ is available in the collection @8b6e2c4916133e1d859c9e812861ce13+70@.
+
+<pre>
+{
+ "script_parameters":{
+  "bwa_tbz":"8b6e2c4916133e1d859c9e812861ce13+70",
+  ...
+ },
+ ...
+}
+</pre>
+
+h3. arvados_gatk2.py
+
+Extract and run the "Genome Analysis Toolkit":http://www.broadinstitute.org/gatk/ programs.
+
+The module retrieves the binary distribution tarball from Keep, using the job's @gatk_tbz@ parameter.
+
+<pre>
+arvados_gatk2.run(
+    args=[
+        '-nct', 8,
+        '-T', 'BaseRecalibrator',
+        '-R', ref_fasta_files[0],
+        '-I', input_bam_files[0],
+        '-o', recal_file,
+        ])
+</pre>
+
+On qr1hi.arvadosapi.com, the binary distribution @GenomeAnalysisTK-2.6-4.tar.bz2@ is available in the collection @5790482512cf6d5d6dfd50b7fd61e1d1+86@.
+
+The GATK data bundle is available in the collection @d237a90bae3870b3b033aea1e99de4a9+10820@.
+
+<pre>
+{
+ "script_parameters":{
+  "gatk_tbz":"7e0a277d6d2353678a11f56bab3b13f2+87",
+  "gatk_bundle":"d237a90bae3870b3b033aea1e99de4a9+10820",
+  ...
+ },
+ ...
+}
+</pre>
+
+h3. arvados_samtools.py
+
+Build and run the "samtools":http://samtools.sourceforge.net/samtools.shtml program.
+
+
+The module retrieves the samtools source code from Keep, using the job's @samtools_tgz@ parameter.
+
+<pre>
+import arvados_samtools
+arvados_samtools.run('view', ['-S', '-b', '-'],
+                     stdin=open(sam_filename,'rb'),
+                     stdout=open(bam_filename,'wb'))
+</pre>
+
+On qr1hi.arvadosapi.com, the source distribution @samtools-0.1.19.tar.gz@ is available in the collection @c777e23cf13e5d5906abfdc08d84bfdb+74@.
+
+<pre>
+{
+ "script_parameters":{
+  "samtools_tgz":"c777e23cf13e5d5906abfdc08d84bfdb+74",
+  ...
+ },
+ ...
+}
+</pre>
+
+
+h3. arvados_picard.py
+
+Build and run the "picard":http://picard.sourceforge.net/command-line-overview.shtml program.
+
+
+The module retrieves the picard binary distribution from Keep, using the job's @picard_zip@ parameter.
+
+<pre>
+import arvados_picard
+arvados_picard.run(
+    'FixMateInformation',
+    params={
+        'i': input_bam_path,
+        'o': '/dev/stdout',
+        'quiet': 'true',
+        'so': 'coordinate',
+        'validation_stringency': 'LENIENT',
+        'compression_level': 0
+        },
+    stdout=open('out.bam','wb'))
+</pre>
+
+On qr1hi.arvadosapi.com, the binary distribution @picard-tools-1.82.zip@ is available in the collection @687f74675c6a0e925dec619cc2bec25f+77@.
+
+<pre>
+{
+ "script_parameters":{
+  "picard_zip":"687f74675c6a0e925dec619cc2bec25f+77",
+  ...
+ },
+ ...
+}
+</pre>
+
+
diff --git a/doc/sdk/python/python.html.textile.liquid b/doc/sdk/python/python.html.textile.liquid
new file mode 100644 (file)
index 0000000..26fbc88
--- /dev/null
@@ -0,0 +1,10 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: "PyDoc Reference"
+
+no_nav_left: true
+...
+
+notextile. <iframe src="arvados/" style="width:100%; height:100%; border:none" />
diff --git a/doc/sdk/python/sdk-python.html.textile.liquid b/doc/sdk/python/sdk-python.html.textile.liquid
new file mode 100644 (file)
index 0000000..ead804e
--- /dev/null
@@ -0,0 +1,163 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: "Python SDK"
+
+...
+
+The Python SDK provides a generic set of wrappers so you can make API calls easily. It performs some validation before connecting to the API server: for example, it refuses to do an API call if a required parameter is missing.
+
+The library also includes some conveniences for use in Crunch scripts; see "Crunch utility libraries":crunch-utility-libraries.html for details.
+
+h3. Installation
+
+If you are logged in to an Arvados VM, the Python SDK should be installed.
+
+To use the Python SDK elsewhere, you can either install the Python SDK via PyPI or build and install the package using the arvados source tree.
+
+{% include 'notebox_begin' %}
+The Python SDK requires Python 2.7
+{% include 'notebox_end' %}
+
+h4. Option 1: install with PyPI
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo apt-get install python-pip python-dev libattr1-dev libfuse-dev pkg-config python-yaml</code>
+$ <code class="userinput">sudo pip install arvados-python-client</code>
+</pre>
+</notextile>
+
+_If your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: "Could not find a version that satisfies the requirement arvados-python-client". If this happens, fix it by adding a @--pre@ flag:_
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo pip install --pre arvados-python-client</code>
+</pre>
+</notextile>
+
+h4. Option 2: install from distribution packages (Debian/Ubuntu only)
+
+First add @http://apt.arvados.org@ to your list of apt repositories:
+
+<notextile>
+<pre>
+$ <code class="userinput">echo "deb http://apt.arvados.org/ wheezy main" | sudo tee /etc/apt/sources.list.d/apt.arvados.org.list</code>
+</pre>
+</notextile>
+
+Then install the package:
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo apt-get update</code>
+$ <code class="userinput">sudo apt-get install python-arvados-python-client</code>
+</pre>
+</notextile>
+
+h4. Option 3: build and install from source
+
+<notextile>
+<pre>
+~$ <code class="userinput">sudo apt-get install python-dev libattr1-dev libfuse-dev pkg-config</code>
+~$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+~$ <code class="userinput">cd arvados/sdk/python</code>
+~/arvados/sdk/python$ <code class="userinput">sudo python setup.py install</code>
+</pre>
+</notextile>
+
+h4. Test installation
+
+If the SDK is installed and your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), @import arvados@ should produce no errors:
+
+<notextile>
+<pre>$ <code class="userinput">python</code>
+Python 2.7.4 (default, Sep 26 2013, 03:20:26) 
+[GCC 4.7.3] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+>>> <code class="userinput">import arvados</code>
+>>> <code class="userinput">arvados.api('v1')</code>
+&lt;apiclient.discovery.Resource object at 0x233bb50&gt;
+</pre>
+</notextile>
+
+h3. Examples
+
+Get the User object for the current user:
+
+<notextile>
+<pre><code class="userinput">current_user = arvados.api('v1').users().current().execute()
+</code></pre>
+</notextile>
+
+Get the UUID of an object that was retrieved using the SDK:
+
+<notextile>
+<pre><code class="userinput">my_uuid = current_user['uuid']
+</code></pre>
+</notextile>
+
+Retrieve an object by ID:
+
+<notextile>
+<pre><code class="userinput">some_user = arvados.api('v1').users().get(uuid=my_uuid).execute()
+</code></pre>
+</notextile>
+
+Create an object:
+
+<notextile>
+<pre><code class="userinput">test_link = arvados.api('v1').links().create(
+    body={'link_class':'test','name':'test'}).execute()
+</code></pre>
+</notextile>
+
+Update an object:
+
+<notextile>
+<pre><code class="userinput">arvados.api('v1').links().update(
+    uuid=test_link['uuid'],
+    body={'properties':{'foo':'bar'}}).execute()
+</code></pre>
+</notextile>
+
+Get a list of objects:
+
+<notextile>
+<pre><code class="userinput">repos = arvados.api('v1').repositories().list().execute()
+len(repos['items'])</code>
+2
+<code class="userinput">repos['items'][0]['uuid']</code>
+u'qr1hi-s0uqq-kg8cawglrf74bmw'
+</code></pre>
+</notextile>
+
+h3. Notes
+
+The general form of an API call is:
+
+<notextile>
+<pre><code class="userinput">arvados.api(<i>api_version</i>).<i>plural_resource_type</i>().<i>api_method</i>(<i>parameter</i>=<i>value</i>, ...).execute()
+</code></pre>
+</notextile>
+
+Many API methods accept a parameter whose name is the same as the resource type. For example, @links.create@ accepts a parameter called @link@. This parameter should be given as @body@.
+
+<notextile>
+<pre><code class="userinput">arvados.api('v1').links().create(
+    uuid=test_link['uuid'],
+    body={'properties':{'foo':'bar'}}).execute()
+</code></pre>
+</notextile>
+
+One way to make API calls slightly less verbose is:
+
+<notextile>
+<pre><code class="userinput">arv = arvados.api('v1')
+j = arv.jobs().list().execute()
+</code></pre>
+</notextile>
+
+The SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.
+
diff --git a/doc/sdk/ruby/index.html.textile.liquid b/doc/sdk/ruby/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..11dfcfb
--- /dev/null
@@ -0,0 +1,125 @@
+---
+layout: default
+navsection: sdk
+navmenu: Ruby
+title: "Ruby SDK"
+
+...
+
+The Ruby SDK provides a generic set of wrappers so you can make API calls easily.
+
+h3. Installation
+
+If you are logged in to an Arvados VM, the Ruby SDK should be installed.
+
+To use it elsewhere, you can either install the @arvados@ gem via RubyGems or build and install the package using the arvados source tree.
+
+h4. Prerequisites: Ruby &gt;= 2.0.0
+
+You can use "RVM":http://rvm.io/rvm/install to install and manage Ruby versions.
+
+h4. Option 1: install with RubyGems
+
+<notextile>
+<pre>
+$ <code class="userinput">sudo gem install arvados</code>
+</pre>
+</notextile>
+
+h4. Option 2: build and install from source
+
+<notextile>
+<pre>
+$ <code class="userinput">git clone https://github.com/curoverse/arvados.git</code>
+$ <code class="userinput">cd arvados/sdk/ruby</code>
+$ <code class="userinput">gem build arvados.gemspec</code>
+$ <code class="userinput">sudo gem install arvados-*.gem</code>
+</pre>
+</notextile>
+
+h4. Test installation
+
+If the SDK is installed, @ruby -r arvados -e 'puts "OK!"'@ should produce no errors.
+
+If your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.baseurl}}/user/reference/api-tokens.html for details), the following test script should work:
+
+<notextile>
+<pre>$ <code class="userinput">ruby -r arvados &lt;&lt;'EOF'
+arv = Arvados.new api_version: 'v1'
+my_full_name = arv.user.current[:full_name]
+puts "arvados.v1.users.current.full_name = '#{my_full_name}'"
+EOF</code>
+arvados.v1.users.current.full_name = 'Your Name'
+</pre>
+</notextile>
+
+h3. Examples
+
+Import the module (we skipped this step above by using "ruby -r arvados"):
+
+<notextile>
+<pre><code class="userinput">require 'arvados'
+</code></pre>
+</notextile>
+
+Set up an API client user agent:
+
+<notextile>
+<pre><code class="userinput">arv = Arvados.new(apiVersion: 'v1')
+</code></pre>
+</notextile>
+
+Get the User object for the current user:
+
+<notextile>
+<pre><code class="userinput">current_user = arv.user.current
+</code></pre>
+</notextile>
+
+Get the UUID of an object that was retrieved using the SDK:
+
+<notextile>
+<pre><code class="userinput">current_user_uuid = current_user[:uuid]
+</code></pre>
+</notextile>
+
+Retrieve an object by ID:
+
+<notextile>
+<pre><code class="userinput">some_user = arv.user.get(uuid: current_user_uuid)
+</code></pre>
+</notextile>
+
+Create an object:
+
+<notextile>
+<pre><code class="userinput">new_link = arv.link.create(link: {link_class: 'test', name: 'test'})
+</code></pre>
+</notextile>
+
+Update an object:
+
+<notextile>
+<pre><code class="userinput">updated_link = arv.link.update(uuid: new_link[:uuid],
+                               link: {properties: {foo: 'bar'}})
+</code></pre>
+</notextile>
+
+Delete an object:
+
+<notextile>
+<pre><code class="userinput">arv.link.delete(uuid: new_link[:uuid])
+</code></pre>
+</notextile>
+
+Get a list of objects:
+
+<notextile>
+<pre><code class="userinput">repos = arv.repository.list
+first_repo = repos[:items][0]
+puts "UUID of first repo returned is #{first_repo[:uuid]}"</code>
+UUID of first repo returned is qr1hi-s0uqq-b1bnybpx3u5temz
+</pre>
+</notextile>
+
+The SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.
diff --git a/doc/user/copying/LICENSE-2.0.html b/doc/user/copying/LICENSE-2.0.html
new file mode 100644 (file)
index 0000000..129916f
--- /dev/null
@@ -0,0 +1,182 @@
+---
+layout: default
+navsection: userguide
+title: "Apache License"
+...
+
+<div id="content" class="grid_16"><div class="section-content"></br>Version 2.0, January 2004<br></br>
+<a href="http://www.apache.org/licenses/">http://www.apache.org/licenses/</a> </p>
+<p>TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION</p>
+<p><strong><a name="definitions">1. Definitions</a></strong>.</p>
+<p>"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.</p>
+<p>"Licensor" shall mean the copyright owner or entity authorized by the
+copyright owner that is granting the License.</p>
+<p>"Legal Entity" shall mean the union of the acting entity and all other
+entities that control, are controlled by, or are under common control with
+that entity. For the purposes of this definition, "control" means (i) the
+power, direct or indirect, to cause the direction or management of such
+entity, whether by contract or otherwise, or (ii) ownership of fifty
+percent (50%) or more of the outstanding shares, or (iii) beneficial
+ownership of such entity.</p>
+<p>"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.</p>
+<p>"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation source,
+and configuration files.</p>
+<p>"Object" form shall mean any form resulting from mechanical transformation
+or translation of a Source form, including but not limited to compiled
+object code, generated documentation, and conversions to other media types.</p>
+<p>"Work" shall mean the work of authorship, whether in Source or Object form,
+made available under the License, as indicated by a copyright notice that
+is included in or attached to the work (an example is provided in the
+Appendix below).</p>
+<p>"Derivative Works" shall mean any work, whether in Source or Object form,
+that is based on (or derived from) the Work and for which the editorial
+revisions, annotations, elaborations, or other modifications represent, as
+a whole, an original work of authorship. For the purposes of this License,
+Derivative Works shall not include works that remain separable from, or
+merely link (or bind by name) to the interfaces of, the Work and Derivative
+Works thereof.</p>
+<p>"Contribution" shall mean any work of authorship, including the original
+version of the Work and any modifications or additions to that Work or
+Derivative Works thereof, that is intentionally submitted to Licensor for
+inclusion in the Work by the copyright owner or by an individual or Legal
+Entity authorized to submit on behalf of the copyright owner. For the
+purposes of this definition, "submitted" means any form of electronic,
+verbal, or written communication sent to the Licensor or its
+representatives, including but not limited to communication on electronic
+mailing lists, source code control systems, and issue tracking systems that
+are managed by, or on behalf of, the Licensor for the purpose of discussing
+and improving the Work, but excluding communication that is conspicuously
+marked or otherwise designated in writing by the copyright owner as "Not a
+Contribution."</p>
+<p>"Contributor" shall mean Licensor and any individual or Legal Entity on
+behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.</p>
+<p><strong><a name="copyright">2. Grant of Copyright License</a></strong>. Subject to the
+terms and conditions of this License, each Contributor hereby grants to You
+a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+copyright license to reproduce, prepare Derivative Works of, publicly
+display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.</p>
+<p><strong><a name="patent">3. Grant of Patent License</a></strong>. Subject to the terms
+and conditions of this License, each Contributor hereby grants to You a
+perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+(except as stated in this section) patent license to make, have made, use,
+offer to sell, sell, import, and otherwise transfer the Work, where such
+license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by
+combination of their Contribution(s) with the Work to which such
+Contribution(s) was submitted. If You institute patent litigation against
+any entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that the Work or a Contribution incorporated within the Work constitutes
+direct or contributory patent infringement, then any patent licenses
+granted to You under this License for that Work shall terminate as of the
+date such litigation is filed.</p>
+<p><strong><a name="redistribution">4. Redistribution</a></strong>. You may reproduce and
+distribute copies of the Work or Derivative Works thereof in any medium,
+with or without modifications, and in Source or Object form, provided that
+You meet the following conditions:</p>
+<ol style="list-style: lower-latin;">
+<li>You must give any other recipients of the Work or Derivative Works a
+copy of this License; and</li>
+
+<li>You must cause any modified files to carry prominent notices stating
+that You changed the files; and</li>
+
+<li>You must retain, in the Source form of any Derivative Works that You
+distribute, all copyright, patent, trademark, and attribution notices from
+the Source form of the Work, excluding those notices that do not pertain to
+any part of the Derivative Works; and</li>
+
+<li>If the Work includes a "NOTICE" text file as part of its distribution,
+then any Derivative Works that You distribute must include a readable copy
+of the attribution notices contained within such NOTICE file, excluding
+those notices that do not pertain to any part of the Derivative Works, in
+at least one of the following places: within a NOTICE text file distributed
+as part of the Derivative Works; within the Source form or documentation,
+if provided along with the Derivative Works; or, within a display generated
+by the Derivative Works, if and wherever such third-party notices normally
+appear. The contents of the NOTICE file are for informational purposes only
+and do not modify the License. You may add Your own attribution notices
+within Derivative Works that You distribute, alongside or as an addendum to
+the NOTICE text from the Work, provided that such additional attribution
+notices cannot be construed as modifying the License.
+<br/>
+<br/>
+You may add Your own copyright statement to Your modifications and may
+provide additional or different license terms and conditions for use,
+reproduction, or distribution of Your modifications, or for any such
+Derivative Works as a whole, provided Your use, reproduction, and
+distribution of the Work otherwise complies with the conditions stated in
+this License.
+</li>
+
+</ol>
+
+<p><strong><a name="contributions">5. Submission of Contributions</a></strong>. Unless You
+explicitly state otherwise, any Contribution intentionally submitted for
+inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the
+terms of any separate license agreement you may have executed with Licensor
+regarding such Contributions.</p>
+<p><strong><a name="trademarks">6. Trademarks</a></strong>. This License does not grant
+permission to use the trade names, trademarks, service marks, or product
+names of the Licensor, except as required for reasonable and customary use
+in describing the origin of the Work and reproducing the content of the
+NOTICE file.</p>
+<p><strong><a name="no-warranty">7. Disclaimer of Warranty</a></strong>. Unless required by
+applicable law or agreed to in writing, Licensor provides the Work (and
+each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including,
+without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You
+are solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise
+of permissions under this License.</p>
+<p><strong><a name="no-liability">8. Limitation of Liability</a></strong>. In no event and
+under no legal theory, whether in tort (including negligence), contract, or
+otherwise, unless required by applicable law (such as deliberate and
+grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special,
+incidental, or consequential damages of any character arising as a result
+of this License or out of the use or inability to use the Work (including
+but not limited to damages for loss of goodwill, work stoppage, computer
+failure or malfunction, or any and all other commercial damages or losses),
+even if such Contributor has been advised of the possibility of such
+damages.</p>
+<p><strong><a name="additional">9. Accepting Warranty or Additional Liability</a></strong>.
+While redistributing the Work or Derivative Works thereof, You may choose
+to offer, and charge a fee for, acceptance of support, warranty, indemnity,
+or other liability obligations and/or rights consistent with this License.
+However, in accepting such obligations, You may act only on Your own behalf
+and on Your sole responsibility, not on behalf of any other Contributor,
+and only if You agree to indemnify, defend, and hold each Contributor
+harmless for any liability incurred by, or claims asserted against, such
+Contributor by reason of your accepting any such warranty or additional
+liability.</p>
+<p>END OF TERMS AND CONDITIONS</p>
+<h1 id="apply">APPENDIX: How to apply the Apache License to your work</h1>
+<p>To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included
+on the same "printed page" as the copyright notice for easier
+identification within third-party archives.</p>
+<div class="codehilite"><pre>Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an &quot;AS IS&quot; BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+</pre></div></div></div>
diff --git a/doc/user/copying/agpl-3.0.html b/doc/user/copying/agpl-3.0.html
new file mode 100644 (file)
index 0000000..aad493a
--- /dev/null
@@ -0,0 +1,684 @@
+---
+layout: default
+navsection: userguide
+title: "GNU Affero General Public License"
+...
+
+<p style="text-align: center;">Version 3, 19 November 2007</p>
+
+<p>Copyright &copy; 2007 Free Software Foundation,
+Inc. &lt;<a href="http://www.fsf.org/">http://fsf.org/</a>&gt;
+ <br />
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.</p>
+
+<h3><a name="preamble"></a>Preamble</h3>
+
+<p>The GNU Affero General Public License is a free, copyleft license
+for software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.</p>
+
+<p>The licenses for most software and other practical works are
+designed to take away your freedom to share and change the works.  By
+contrast, our General Public Licenses are intended to guarantee your
+freedom to share and change all versions of a program--to make sure it
+remains free software for all its users.</p>
+
+<p>When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.</p>
+
+<p>Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.</p>
+
+<p>A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.</p>
+
+<p>The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.</p>
+
+<p>An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.</p>
+
+<p>The precise terms and conditions for copying, distribution and
+modification follow.</p>
+
+<h3><a name="terms"></a>TERMS AND CONDITIONS</h3>
+
+<h4><a name="section0"></a>0. Definitions.</h4>
+
+<p>&quot;This License&quot; refers to version 3 of the GNU Affero General Public
+License.</p>
+
+<p>&quot;Copyright&quot; also means copyright-like laws that apply to other kinds
+of works, such as semiconductor masks.</p>
+
+<p>&quot;The Program&quot; refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as &quot;you&quot;.  &quot;Licensees&quot; and
+&quot;recipients&quot; may be individuals or organizations.</p>
+
+<p>To &quot;modify&quot; a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a &quot;modified version&quot; of the
+earlier work or a work &quot;based on&quot; the earlier work.</p>
+
+<p>A &quot;covered work&quot; means either the unmodified Program or a work based
+on the Program.</p>
+
+<p>To &quot;propagate&quot; a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.</p>
+
+<p>To &quot;convey&quot; a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.</p>
+
+<p>An interactive user interface displays &quot;Appropriate Legal Notices&quot;
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.</p>
+
+<h4><a name="section1"></a>1. Source Code.</h4>
+
+<p>The &quot;source code&quot; for a work means the preferred form of the work
+for making modifications to it.  &quot;Object code&quot; means any non-source
+form of a work.</p>
+
+<p>A &quot;Standard Interface&quot; means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.</p>
+
+<p>The &quot;System Libraries&quot; of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+&quot;Major Component&quot;, in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.</p>
+
+<p>The &quot;Corresponding Source&quot; for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.</p>
+
+<p>The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.</p>
+
+<p>The Corresponding Source for a work in source code form is that
+same work.</p>
+
+<h4><a name="section2"></a>2. Basic Permissions.</h4>
+
+<p>All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.</p>
+
+<p>You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.</p>
+
+<p>Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.</p>
+
+<h4><a name="section3"></a>3. Protecting Users' Legal Rights From Anti-Circumvention Law.</h4>
+
+<p>No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.</p>
+
+<p>When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.</p>
+
+<h4><a name="section4"></a>4. Conveying Verbatim Copies.</h4>
+
+<p>You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.</p>
+
+<p>You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.</p>
+
+<h4><a name="section5"></a>5. Conveying Modified Source Versions.</h4>
+
+<p>You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:</p>
+
+<ul>
+
+<li>a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.</li>
+
+<li>b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    &quot;keep intact all notices&quot;.</li>
+
+<li>c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.</li>
+
+<li>d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.</li>
+
+</ul>
+
+<p>A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+&quot;aggregate&quot; if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.</p>
+
+<h4><a name="section6"></a>6. Conveying Non-Source Forms.</h4>
+
+<p>You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:</p>
+
+<ul>
+
+<li>a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.</li>
+
+<li>b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.</li>
+
+<li>c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.</li>
+
+<li>d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.</li>
+
+<li>e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.</li>
+
+</ul>
+
+<p>A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.</p>
+
+<p>A &quot;User Product&quot; is either (1) a &quot;consumer product&quot;, which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, &quot;normally used&quot; refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.</p>
+
+<p>&quot;Installation Information&quot; for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.</p>
+
+<p>If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).</p>
+
+<p>The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.</p>
+
+<p>Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.</p>
+
+<h4><a name="section7"></a>7. Additional Terms.</h4>
+
+<p>&quot;Additional permissions&quot; are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.</p>
+
+<p>When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.</p>
+
+<p>Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:</p>
+
+<ul>
+
+<li>a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or</li>
+
+<li>b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or</li>
+
+<li>c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or</li>
+
+<li>d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or</li>
+
+<li>e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or</li>
+
+<li>f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.</li>
+
+</ul>
+
+<p>All other non-permissive additional terms are considered &quot;further
+restrictions&quot; within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further restriction,
+you may remove that term.  If a license document contains a further
+restriction but permits relicensing or conveying under this License, you
+may add to a covered work material governed by the terms of that license
+document, provided that the further restriction does not survive such
+relicensing or conveying.</p>
+
+<p>If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.</p>
+
+<p>Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.</p>
+
+<h4><a name="section8"></a>8. Termination.</h4>
+
+<p>You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).</p>
+
+<p>However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.</p>
+
+<p>Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.</p>
+
+<p>Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.</p>
+
+<h4><a name="section9"></a>9. Acceptance Not Required for Having Copies.</h4>
+
+<p>You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.</p>
+
+<h4><a name="section10"></a>10. Automatic Licensing of Downstream Recipients.</h4>
+
+<p>Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.</p>
+
+<p>An &quot;entity transaction&quot; is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.</p>
+
+<p>You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.</p>
+
+<h4><a name="section11"></a>11. Patents.</h4>
+
+<p>A &quot;contributor&quot; is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's &quot;contributor version&quot;.</p>
+
+<p>A contributor's &quot;essential patent claims&quot; are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, &quot;control&quot; includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.</p>
+
+<p>Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.</p>
+
+<p>In the following three paragraphs, a &quot;patent license&quot; is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To &quot;grant&quot; such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.</p>
+
+<p>If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  &quot;Knowingly relying&quot; means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.</p>
+
+<p>If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.</p>
+
+<p>A patent license is &quot;discriminatory&quot; if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.</p>
+
+<p>Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.</p>
+
+<h4><a name="section12"></a>12. No Surrender of Others' Freedom.</h4>
+
+<p>If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.</p>
+
+<h4><a name="section13"></a>13. Remote Network Interaction; Use with the GNU General Public License.</h4>
+
+<p>Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.</p>
+
+<p>Notwithstanding any other provision of this License, you have permission
+to link or combine any covered work with a work licensed under version 3
+of the GNU General Public License into a single combined work, and to
+convey the resulting work.  The terms of this License will continue to
+apply to the part which is the covered work, but the work with which it is
+combined will remain governed by version 3 of the GNU General Public
+License.</p>
+
+<h4><a name="section14"></a>14. Revised Versions of this License.</h4>
+
+<p>The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new
+versions will be similar in spirit to the present version, but may differ
+in detail to address new problems or concerns.</p>
+
+<p>Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero
+General Public License &quot;or any later version&quot; applies to it, you have
+the option of following the terms and conditions either of that
+numbered version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number
+of the GNU Affero General Public License, you may choose any version
+ever published by the Free Software Foundation.</p>
+
+<p>If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that
+proxy's public statement of acceptance of a version permanently
+authorizes you to choose that version for the Program.</p>
+
+<p>Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.</p>
+
+<h4><a name="section15"></a>15. Disclaimer of Warranty.</h4>
+
+<p>THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM &quot;AS IS&quot; WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.</p>
+
+<h4><a name="section16"></a>16. Limitation of Liability.</h4>
+
+<p>IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.</p>
+
+<h4><a name="section17"></a>17. Interpretation of Sections 15 and 16.</h4>
+
+<p>If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.</p>
+
+<p>END OF TERMS AND CONDITIONS</p>
+
+<h3><a name="howto"></a>How to Apply These Terms to Your New Programs</h3>
+
+<p>If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.</p>
+
+<p>To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the &quot;copyright&quot; line and a pointer to where the full notice is found.</p>
+
+<pre>    &lt;one line to give the program's name and a brief idea of what it does.&gt;
+    Copyright (C) &lt;year&gt;  &lt;name of author&gt;
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as
+    published by the Free Software Foundation, either version 3 of the
+    License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see &lt;http://www.gnu.org/licenses/&gt;.
+</pre>
+
+<p>Also add information on how to contact you by electronic and paper mail.</p>
+
+<p>If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a &quot;Source&quot; link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.</p>
+
+<p>You should also get your employer (if you work as a programmer) or school,
+if any, to sign a &quot;copyright disclaimer&quot; for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+&lt;<a href="http://www.gnu.org/licenses/">http://www.gnu.org/licenses/</a>&gt;.</p>
+
+
diff --git a/doc/user/copying/by-sa-3.0.html b/doc/user/copying/by-sa-3.0.html
new file mode 100644 (file)
index 0000000..f88374a
--- /dev/null
@@ -0,0 +1,418 @@
+---
+layout: default
+navsection: userguide
+title: "Creative Commons"
+...
+
+<div id="deed" class="green">
+    <div id="deed-head">
+
+      <div id="deed-license">
+        <h2>Attribution-ShareAlike 3.0 United States</h2>
+      </div>
+    </div>
+
+        <h3><em>License</em></h3>
+
+        <p>THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS
+        OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR
+        "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER
+        APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
+        AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS
+        PROHIBITED.</p>
+
+        <p>BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU
+        ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE.
+        TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A
+        CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE
+        IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
+        CONDITIONS.</p>
+
+        <p><strong>1. Definitions</strong></p>
+
+        <ol type="a">
+          <li><strong>"Collective Work"</strong> means a work, such
+          as a periodical issue, anthology or encyclopedia, in
+          which the Work in its entirety in unmodified form, along
+          with one or more other contributions, constituting
+          separate and independent works in themselves, are
+          assembled into a collective whole. A work that
+          constitutes a Collective Work will not be considered a
+          Derivative Work (as defined below) for the purposes of
+          this License.</li>
+
+          <li><strong>"Creative Commons Compatible
+          License"</strong> means a license that is listed at
+          http://creativecommons.org/compatiblelicenses that has
+          been approved by Creative Commons as being essentially
+          equivalent to this License, including, at a minimum,
+          because that license: (i) contains terms that have the
+          same purpose, meaning and effect as the License Elements
+          of this License; and, (ii) explicitly permits the
+          relicensing of derivatives of works made available under
+          that license under this License or either a Creative
+          Commons unported license or a Creative Commons
+          jurisdiction license with the same License Elements as
+          this License.</li>
+
+          <li><strong>"Derivative Work"</strong> means a work based
+          upon the Work or upon the Work and other pre-existing
+          works, such as a translation, musical arrangement,
+          dramatization, fictionalization, motion picture version,
+          sound recording, art reproduction, abridgment,
+          condensation, or any other form in which the Work may be
+          recast, transformed, or adapted, except that a work that
+          constitutes a Collective Work will not be considered a
+          Derivative Work for the purpose of this License. For the
+          avoidance of doubt, where the Work is a musical
+          composition or sound recording, the synchronization of
+          the Work in timed-relation with a moving image
+          ("synching") will be considered a Derivative Work for the
+          purpose of this License.</li>
+
+          <li><strong>"License Elements"</strong> means the
+          following high-level license attributes as selected by
+          Licensor and indicated in the title of this License:
+          Attribution, ShareAlike.</li>
+
+          <li><strong>"Licensor"</strong> means the individual,
+          individuals, entity or entities that offers the Work
+          under the terms of this License.</li>
+
+          <li><strong>"Original Author"</strong> means the
+          individual, individuals, entity or entities who created
+          the Work.</li>
+
+          <li><strong>"Work"</strong> means the copyrightable work
+          of authorship offered under the terms of this
+          License.</li>
+
+          <li><strong>"You"</strong> means an individual or entity
+          exercising rights under this License who has not
+          previously violated the terms of this License with
+          respect to the Work, or who has received express
+          permission from the Licensor to exercise rights under
+          this License despite a previous violation.</li>
+        </ol>
+
+        <p><strong>2. Fair Use Rights.</strong> Nothing in this
+        license is intended to reduce, limit, or restrict any
+        rights arising from fair use, first sale or other
+        limitations on the exclusive rights of the copyright owner
+        under copyright law or other applicable laws.</p>
+
+        <p><strong>3. License Grant.</strong> Subject to the terms
+        and conditions of this License, Licensor hereby grants You
+        a worldwide, royalty-free, non-exclusive, perpetual (for
+        the duration of the applicable copyright) license to
+        exercise the rights in the Work as stated below:</p>
+
+        <ol type="a">
+          <li>to reproduce the Work, to incorporate the Work into
+          one or more Collective Works, and to reproduce the Work
+          as incorporated in the Collective Works;</li>
+
+          <li>to create and reproduce Derivative Works provided
+          that any such Derivative Work, including any translation
+          in any medium, takes reasonable steps to clearly label,
+          demarcate or otherwise identify that changes were made to
+          the original Work. For example, a translation could be
+          marked "The original work was translated from English to
+          Spanish," or a modification could indicate "The original
+          work has been modified.";</li>
+
+          <li>to distribute copies or phonorecords of, display
+          publicly, perform publicly, and perform publicly by means
+          of a digital audio transmission the Work including as
+          incorporated in Collective Works;</li>
+
+          <li>to distribute copies or phonorecords of, display
+          publicly, perform publicly, and perform publicly by means
+          of a digital audio transmission Derivative Works.</li>
+
+          <li>
+            <p>For the avoidance of doubt, where the Work is a
+            musical composition:</p>
+
+            <ol type="i">
+              <li><strong>Performance Royalties Under Blanket
+              Licenses</strong>. Licensor waives the exclusive
+              right to collect, whether individually or, in the
+              event that Licensor is a member of a performance
+              rights society (e.g. ASCAP, BMI, SESAC), via that
+              society, royalties for the public performance or
+              public digital performance (e.g. webcast) of the
+              Work.</li>
+
+              <li><strong>Mechanical Rights and Statutory
+              Royalties</strong>. Licensor waives the exclusive
+              right to collect, whether individually or via a music
+              rights agency or designated agent (e.g. Harry Fox
+              Agency), royalties for any phonorecord You create
+              from the Work ("cover version") and distribute,
+              subject to the compulsory license created by 17 USC
+              Section 115 of the US Copyright Act (or the
+              equivalent in other jurisdictions).</li>
+            </ol>
+          </li>
+
+          <li><strong>Webcasting Rights and Statutory
+          Royalties</strong>. For the avoidance of doubt, where the
+          Work is a sound recording, Licensor waives the exclusive
+          right to collect, whether individually or via a
+          performance-rights society (e.g. SoundExchange),
+          royalties for the public digital performance (e.g.
+          webcast) of the Work, subject to the compulsory license
+          created by 17 USC Section 114 of the US Copyright Act (or
+          the equivalent in other jurisdictions).</li>
+        </ol>
+
+        <p>The above rights may be exercised in all media and
+        formats whether now known or hereafter devised. The above
+        rights include the right to make such modifications as are
+        technically necessary to exercise the rights in other media
+        and formats. All rights not expressly granted by Licensor
+        are hereby reserved.</p>
+
+        <p><strong>4. Restrictions.</strong> The license granted in
+        Section 3 above is expressly made subject to and limited by
+        the following restrictions:</p>
+
+        <ol type="a">
+          <li>You may distribute, publicly display, publicly
+          perform, or publicly digitally perform the Work only
+          under the terms of this License, and You must include a
+          copy of, or the Uniform Resource Identifier for, this
+          License with every copy or phonorecord of the Work You
+          distribute, publicly display, publicly perform, or
+          publicly digitally perform. You may not offer or impose
+          any terms on the Work that restrict the terms of this
+          License or the ability of a recipient of the Work to
+          exercise of the rights granted to that recipient under
+          the terms of the License. You may not sublicense the
+          Work. You must keep intact all notices that refer to this
+          License and to the disclaimer of warranties. When You
+          distribute, publicly display, publicly perform, or
+          publicly digitally perform the Work, You may not impose
+          any technological measures on the Work that restrict the
+          ability of a recipient of the Work from You to exercise
+          of the rights granted to that recipient under the terms
+          of the License. This Section 4(a) applies to the Work as
+          incorporated in a Collective Work, but this does not
+          require the Collective Work apart from the Work itself to
+          be made subject to the terms of this License. If You
+          create a Collective Work, upon notice from any Licensor
+          You must, to the extent practicable, remove from the
+          Collective Work any credit as required by Section 4(c),
+          as requested. If You create a Derivative Work, upon
+          notice from any Licensor You must, to the extent
+          practicable, remove from the Derivative Work any credit
+          as required by Section 4(c), as requested.</li>
+
+          <li>You may distribute, publicly display, publicly
+          perform, or publicly digitally perform a Derivative Work
+          only under: (i) the terms of this License; (ii) a later
+          version of this License with the same License Elements as
+          this License; (iii) either the Creative Commons
+          (Unported) license or a Creative Commons jurisdiction
+          license (either this or a later license version) that
+          contains the same License Elements as this License (e.g.
+          Attribution-ShareAlike 3.0 (Unported)); (iv) a Creative
+          Commons Compatible License. If you license the Derivative
+          Work under one of the licenses mentioned in (iv), you
+          must comply with the terms of that license. If you
+          license the Derivative Work under the terms of any of the
+          licenses mentioned in (i), (ii) or (iii) (the "Applicable
+          License"), you must comply with the terms of the
+          Applicable License generally and with the following
+          provisions: (I) You must include a copy of, or the
+          Uniform Resource Identifier for, the Applicable License
+          with every copy or phonorecord of each Derivative Work
+          You distribute, publicly display, publicly perform, or
+          publicly digitally perform; (II) You may not offer or
+          impose any terms on the Derivative Works that restrict
+          the terms of the Applicable License or the ability of a
+          recipient of the Work to exercise the rights granted to
+          that recipient under the terms of the Applicable License;
+          (III) You must keep intact all notices that refer to the
+          Applicable License and to the disclaimer of warranties;
+          and, (IV) when You distribute, publicly display, publicly
+          perform, or publicly digitally perform the Work, You may
+          not impose any technological measures on the Derivative
+          Work that restrict the ability of a recipient of the
+          Derivative Work from You to exercise the rights granted
+          to that recipient under the terms of the Applicable
+          License. This Section 4(b) applies to the Derivative Work
+          as incorporated in a Collective Work, but this does not
+          require the Collective Work apart from the Derivative
+          Work itself to be made subject to the terms of the
+          Applicable License.</li>
+
+          <li>If You distribute, publicly display, publicly
+          perform, or publicly digitally perform the Work (as
+          defined in Section 1 above) or any Derivative Works (as
+          defined in Section 1 above) or Collective Works (as
+          defined in Section 1 above), You must, unless a request
+          has been made pursuant to Section 4(a), keep intact all
+          copyright notices for the Work and provide, reasonable to
+          the medium or means You are utilizing: (i) the name of
+          the Original Author (or pseudonym, if applicable) if
+          supplied, and/or (ii) if the Original Author and/or
+          Licensor designate another party or parties (e.g. a
+          sponsor institute, publishing entity, journal) for
+          attribution ("Attribution Parties") in Licensor's
+          copyright notice, terms of service or by other reasonable
+          means, the name of such party or parties; the title of
+          the Work if supplied; to the extent reasonably
+          practicable, the Uniform Resource Identifier, if any,
+          that Licensor specifies to be associated with the Work,
+          unless such URI does not refer to the copyright notice or
+          licensing information for the Work; and, consistent with
+          Section 3(b) in the case of a Derivative Work, a credit
+          identifying the use of the Work in the Derivative Work
+          (e.g., "French translation of the Work by Original
+          Author," or "Screenplay based on original Work by
+          Original Author"). The credit required by this Section
+          4(c) may be implemented in any reasonable manner;
+          provided, however, that in the case of a Derivative Work
+          or Collective Work, at a minimum such credit will appear,
+          if a credit for all contributing authors of the
+          Derivative Work or Collective Work appears, then as part
+          of these credits and in a manner at least as prominent as
+          the credits for the other contributing authors. For the
+          avoidance of doubt, You may only use the credit required
+          by this Section for the purpose of attribution in the
+          manner set out above and, by exercising Your rights under
+          this License, You may not implicitly or explicitly assert
+          or imply any connection with, sponsorship or endorsement
+          by the Original Author, Licensor and/or Attribution
+          Parties, as appropriate, of You or Your use of the Work,
+          without the separate, express prior written permission of
+          the Original Author, Licensor and/or Attribution
+          Parties.</li>
+        </ol>
+
+        <p><strong>5. Representations, Warranties and
+        Disclaimer</strong></p>
+
+        <p>UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN
+        WRITING, LICENSOR OFFERS THE WORK AS-IS AND ONLY TO THE
+        EXTENT OF ANY RIGHTS HELD IN THE LICENSED WORK BY THE
+        LICENSOR. THE LICENSOR MAKES NO REPRESENTATIONS OR
+        WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS,
+        IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT
+        LIMITATION, WARRANTIES OF TITLE, MARKETABILITY,
+        MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE,
+        NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS,
+        ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR
+        NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE
+        EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT
+        APPLY TO YOU.</p>
+
+        <p><strong>6. Limitation on Liability.</strong> EXCEPT TO
+        THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL
+        LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY
+        SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY
+        DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK,
+        EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF
+        SUCH DAMAGES.</p>
+
+        <p><strong>7. Termination</strong></p>
+
+        <ol type="a">
+          <li>This License and the rights granted hereunder will
+          terminate automatically upon any breach by You of the
+          terms of this License. Individuals or entities who have
+          received Derivative Works or Collective Works from You
+          under this License, however, will not have their licenses
+          terminated provided such individuals or entities remain
+          in full compliance with those licenses. Sections 1, 2, 5,
+          6, 7, and 8 will survive any termination of this
+          License.</li>
+
+          <li>Subject to the above terms and conditions, the
+          license granted here is perpetual (for the duration of
+          the applicable copyright in the Work). Notwithstanding
+          the above, Licensor reserves the right to release the
+          Work under different license terms or to stop
+          distributing the Work at any time; provided, however that
+          any such election will not serve to withdraw this License
+          (or any other license that has been, or is required to
+          be, granted under the terms of this License), and this
+          License will continue in full force and effect unless
+          terminated as stated above.</li>
+        </ol>
+
+        <p><strong>8. Miscellaneous</strong></p>
+
+        <ol type="a">
+          <li>Each time You distribute or publicly digitally
+          perform the Work (as defined in Section 1 above) or a
+          Collective Work (as defined in Section 1 above), the
+          Licensor offers to the recipient a license to the Work on
+          the same terms and conditions as the license granted to
+          You under this License.</li>
+
+          <li>Each time You distribute or publicly digitally
+          perform a Derivative Work, Licensor offers to the
+          recipient a license to the original Work on the same
+          terms and conditions as the license granted to You under
+          this License.</li>
+
+          <li>If any provision of this License is invalid or
+          unenforceable under applicable law, it shall not affect
+          the validity or enforceability of the remainder of the
+          terms of this License, and without further action by the
+          parties to this agreement, such provision shall be
+          reformed to the minimum extent necessary to make such
+          provision valid and enforceable.</li>
+
+          <li>No term or provision of this License shall be deemed
+          waived and no breach consented to unless such waiver or
+          consent shall be in writing and signed by the party to be
+          charged with such waiver or consent.</li>
+
+          <li>This License constitutes the entire agreement between
+          the parties with respect to the Work licensed here. There
+          are no understandings, agreements or representations with
+          respect to the Work not specified here. Licensor shall
+          not be bound by any additional provisions that may appear
+          in any communication from You. This License may not be
+          modified without the mutual written agreement of the
+          Licensor and You.</li>
+        </ol>
+        <!-- BREAKOUT FOR CC NOTICE.  NOT A PART OF THE LICENSE -->
+
+        <blockquote>
+          <h3>Creative Commons Notice</h3>
+
+          <p>Creative Commons is not a party to this License, and
+          makes no warranty whatsoever in connection with the Work.
+          Creative Commons will not be liable to You or any party
+          on any legal theory for any damages whatsoever, including
+          without limitation any general, special, incidental or
+          consequential damages arising in connection to this
+          license. Notwithstanding the foregoing two (2) sentences,
+          if Creative Commons has expressly identified itself as
+          the Licensor hereunder, it shall have all rights and
+          obligations of Licensor.</p>
+
+          <p>Except for the limited purpose of indicating to the
+          public that the Work is licensed under the CCPL, Creative
+          Commons does not authorize the use by either party of the
+          trademark "Creative Commons" or any related trademark or
+          logo of Creative Commons without the prior written
+          consent of Creative Commons. Any permitted use will be in
+          compliance with Creative Commons' then-current trademark
+          usage guidelines, as may be published on its website or
+          otherwise made available upon request from time to time.
+          For the avoidance of doubt, this trademark restriction
+          does not form part of this License.</p>
+
+          <p>Creative Commons may be contacted at <a href=
+          "http://creativecommons.org/">http://creativecommons.org/</a>.</p>
+        </blockquote>
+      </div>
+    </div>
+
+  </div>
diff --git a/doc/user/copying/copying.html.textile.liquid b/doc/user/copying/copying.html.textile.liquid
new file mode 100644 (file)
index 0000000..1ad928c
--- /dev/null
@@ -0,0 +1,11 @@
+---
+layout: default
+navsection: userguide
+title: "Arvados Free Software Licenses"
+...
+
+Server-side components of Arvados contained in the apps/ and services/ directories, including the API Server, Workbench, Keep, and Crunch, are licenced under the "GNU Affero General Public License version 3":agpl-3.0.html.
+
+The Arvados client Software Development Kits contained in the sdk/ directory, example scripts in the crunch_scripts/ directory, and code samples in the Aravados documentation are licensed under the "Apache License, Version 2.0":LICENSE-2.0.html
+
+The Arvados Documentation located in the doc/ directory is licensed under the "Creative Commons Attribution-Share Alike 3.0 United States":by-sa-3.0.html
diff --git a/doc/user/examples/crunch-examples.html.textile.liquid b/doc/user/examples/crunch-examples.html.textile.liquid
new file mode 100644 (file)
index 0000000..03e9d7c
--- /dev/null
@@ -0,0 +1,95 @@
+---
+layout: default
+navsection: userguide
+title: "Scripts provided by Arvados"
+...
+
+Several crunch scripts are included with Arvados in the "/crunch_scripts directory":https://arvados.org/projects/arvados/repository/revisions/master/show/crunch_scripts. They are intended to provide examples and starting points for writing your own scripts.
+
+h4. bwa-aln
+
+Run the bwa aligner on a set of paired-end fastq files, producing a BAM file for each pair. "View source.":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/bwa-aln
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|bwa_tbz|Collection with the bwa source distribution.|@8b6e2c4916133e1d859c9e812861ce13+70@|
+|samtools_tgz|Collection with the samtools source distribution.|@c777e23cf13e5d5906abfdc08d84bfdb+74@|
+|input|Collection with fastq reads (pairs of *_1.fastq.gz and *_2.fastq.gz).|@d0136bc494c21f79fc1b6a390561e6cb+2778@|
+</div>
+
+h4. bwa-index
+
+Generate an index of a fasta reference genome suitable for use by bwa-aln. "View source.":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/bwa-index
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|bwa_tbz|Collection with the bwa source distribution.|@8b6e2c4916133e1d859c9e812861ce13+70@|
+|input|Collection with reference data (*.fasta.gz, *.fasta.fai.gz, *.dict.gz).|@c361dbf46ee3397b0958802b346e9b5a+925@|
+</div>
+
+h4. picard-gatk2-prep
+
+Using the FixMateInformation, SortSam, ReorderSam, AddOrReplaceReadGroups, and BuildBamIndex modules from picard, prepare a BAM file for use with the GATK2 tools. Additionally, run picard's CollectAlignmentSummaryMetrics module to produce a @*.casm.tsv@ statistics file for each BAM file. "View source.":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/picard-gatk2-prep
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing aligned bam files.||
+|picard_zip|Collection with the picard binary distribution.|@687f74675c6a0e925dec619cc2bec25f+77@|
+|reference|Collection with reference data (*.fasta.gz, *.fasta.fai.gz, *.dict.gz).|@c361dbf46ee3397b0958802b346e9b5a+925@|
+</div>
+
+h4. GATK2-realign
+
+Run GATK's RealignerTargetCreator and IndelRealigner modules on a set of BAM files. "View source.":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-realign
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing aligned bam files.||
+|picard_zip|Collection with the picard binary distribution.|@687f74675c6a0e925dec619cc2bec25f+77@|
+|gatk_tbz|Collection with the GATK2 binary distribution.|@7e0a277d6d2353678a11f56bab3b13f2+87@|
+|gatk_bundle|Collection with the GATK data bundle.|@d237a90bae3870b3b033aea1e99de4a9+10820@|
+|known_sites|List of files in the data bundle to use as GATK @-known@ arguments. Optional. |@["dbsnp_137.b37.vcf","Mills_and_1000G_gold_standard.indels.b37.vcf"]@ (this is the default value)|
+|regions|Collection with .bed files indicating sequencing target regions. Optional.||
+|region_padding|Corresponds to GATK @--interval_padding@ argument. Required if a regions parameter is given.|10|
+</div>
+
+h4. GATK2-bqsr
+
+Run GATK's BaseQualityScoreRecalibration module on a set of BAM files. "View source.":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-bqsr
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing bam files.||
+|gatk_tbz|Collection with the GATK2 binary distribution.|@7e0a277d6d2353678a11f56bab3b13f2+87@|
+|gatk_bundle|Collection with the GATK data bundle.|@d237a90bae3870b3b033aea1e99de4a9+10820@|
+</div>
+
+h4. GATK2-merge-call
+
+Merge a set of BAM files using picard, and run GATK's UnifiedGenotyper module on the merged set to produce a VCF file. "View source.":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-merge-call
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|input|Collection containing bam files.||
+|picard_zip|Collection with the picard binary distribution.|@687f74675c6a0e925dec619cc2bec25f+77@|
+|gatk_tbz|Collection with the GATK2 binary distribution.|@7e0a277d6d2353678a11f56bab3b13f2+87@|
+|gatk_bundle|Collection with the GATK data bundle.|@d237a90bae3870b3b033aea1e99de4a9+10820@|
+|regions|Collection with .bed files indicating sequencing target regions. Optional.||
+|region_padding|Corresponds to GATK @--interval_padding@ argument. Required if a regions parameter is given.|10|
+</div>
+
+h4. file-select
+
+Pass through the named files from input to output collection, and ignore the rest. "View source.":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/file-select
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_Parameter_|_Description_|_Example_|
+|names|List of filenames to include in the output.|@["human_g1k_v37.fasta.gz","human_g1k_v37.fasta.fai.gz"]@|
+</div>
diff --git a/doc/user/getting_started/check-environment.html.textile.liquid b/doc/user/getting_started/check-environment.html.textile.liquid
new file mode 100644 (file)
index 0000000..46156b7
--- /dev/null
@@ -0,0 +1,40 @@
+---
+layout: default
+navsection: userguide
+title: "Checking your environment"
+...
+
+First, log into an Arvados VM instance (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or install the Arvados "Command line SDK":{{site.baseurl}}/sdk/cli/install.html and "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation.
+
+Check that you are able to access the Arvados API server using @arv user current@.  If it is able to access the API server, it will print out information about your account:
+
+<notextile>
+<pre><code>$ <span class="userinput">arv user current</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/users/qr1hi-xioed-9z2p3pn12yqdaem",
+ "kind":"arvados#user",
+ "etag":"8u0xwb9f3otb2xx9hto4wyo03",
+ "uuid":"qr1hi-tpzed-92d3kxnimy3d4e8",
+ "owner_uuid":"qr1hi-tpqed-23iddeohxta2r59",
+ "created_at":"2013-12-02T17:05:47Z",
+ "modified_by_client_uuid":"qr1hi-xxfg8-owxa2oa2s33jyej",
+ "modified_by_user_uuid":"qr1hi-tpqed-23iddeohxta2r59",
+ "modified_at":"2013-12-02T17:07:08Z",
+ "updated_at":"2013-12-05T19:51:08Z",
+ "email":"you@example.com",
+ "full_name":"Example User",
+ "first_name":"Example",
+ "last_name":"User",
+ "identity_url":"https://www.google.com/accounts/o8/id?id=AItOawnhlZr-pQ_Ic2f2W22XaO02oL3avJ322k1",
+ "is_active": true,
+ "is_admin": false,
+ "prefs":{}
+}
+</code></pre>
+</notextile>
+
+However, if you receive the following message:
+
+bc. ARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables
+
+follow the instructions for "getting an API token,":{{site.baseurl}}/user/reference/api-tokens.html and try @arv user current@ again.
diff --git a/doc/user/getting_started/community.html.textile.liquid b/doc/user/getting_started/community.html.textile.liquid
new file mode 100644 (file)
index 0000000..8b6e22d
--- /dev/null
@@ -0,0 +1,21 @@
+---
+layout: default
+navsection: userguide
+title: Arvados Community and Getting Help
+...
+
+h2. On the web
+
+The Arvados Free Sofware project page is located at "http://arvados.org":http://arvados.org .  The "Arvados Wiki":https://arvados.org/projects/arvados/wiki is a collaborative site for documenting Arvados has an overview of the Arvados Platform and Components.  The "Arvados blog":https://arvados.org/projects/arvados/blogs posts articles of interest about Arvados.
+
+h2. Mailing lists
+
+The "Arvados user mailing list":http://lists.arvados.org/mailman/listinfo/arvados is a forum for general discussion, questions, and news about Arvados development.  The "Arvados developer mailing list":http://lists.arvados.org/mailman/listinfo/arvados-dev is a forum for more technical discussion, intended for developers and contributers to Arvados.
+
+h2. IRC
+
+The "#arvados":irc://irc.oftc.net:6667/#arvados IRC (Internet Relay Chat) channel at on the "Open and Free Technology Community (irc.oftc.net)":http://www.oftc.net/oftc/ is available for live discussion and support.  You can use a traditional IRC client or "join OFTC over the web.":https://webchat.oftc.net/?channels=arvados
+
+h2. Bug tracking
+
+If you think you have found a bug, or would like to make a feature request, check the "Arvados issue tracker":https://arvados.org/projects/arvados/issues to see if has already been reported or "add a new issue.":https://arvados.org/projects/arvados/issues/new
diff --git a/doc/user/getting_started/ssh-access-unix.html.textile.liquid b/doc/user/getting_started/ssh-access-unix.html.textile.liquid
new file mode 100644 (file)
index 0000000..83513b8
--- /dev/null
@@ -0,0 +1,115 @@
+---
+layout: default
+navsection: userguide
+title: Accessing an Arvados VM with SSH - Unix Environments
+...
+
+This document is for Unix environments (Linux, OS X, Cygwin). If you are using a Windows environment, please visit the "Accessing an Arvados VM with SSH - Windows Environments":ssh-access-windows.html page.
+
+{% include 'ssh_intro' %}
+
+h1(#gettingkey). Getting your SSH key
+
+h3(#unix). Generate a key using ssh-keygen
+
+Start by opening a terminal window.  Check if you have an existing public key:
+
+notextile. <pre><code>$ <span class="userinput">ls ~/.ssh/id_rsa.pub</span></code></pre>
+
+If the file @id_rsa.pub@ exists, then you may use your existing key.  Copy the contents of @~/.ssh/id_rsa.pub@ onto the clipboard (this is your public key).  You can skip the rest of this section and proceed by "adding your key to the Arvados Workbench.":#workbench
+
+If there is no file @~/.ssh/id_rsa.pub@, you must generate a new key.  Use @ssh-keygen@ to do this:
+
+<notextile>
+<pre><code>$ <span class="userinput">ssh-keygen -t rsa -C "you@example.com"</span>
+Generating public/private rsa key pair.
+Enter file in which to save the key (/home/example/.ssh/id_rsa):
+Enter passphrase (empty for no passphrase):
+Enter same passphrase again:
+</code></pre>
+</notextile>
+
+* @-t@ specifies the key type (must be "rsa")
+* @-C@ specifies a comment (to remember which account the key is associated with)
+
+We strongly recommend that you protect your key with a passphrase.  This means that when the key is used, you will be required to enter the passphrase.  However, unlike logging into remote system using a password, the passphrase is never sent over the network, it is only used to decrypt your private key.
+
+Display the contents of @~/.ssh/id_rsa.pub@ (this is your public key) using @cat@ and then copy it onto the clipboard:
+
+<notextile>
+<pre><code>$ <span class="userinput">cat ~/.ssh/id_rsa.pub</span>
+ssh-rsa AAAAB3NzaC1ycEDoNotUseExampleKeyDoNotUseExampleKeyDoNotUseExampleKeyDoNotUse9lmzkpBq983bQradKGT3LuKda9QOGe8MatI6wzSrJLSGhHm3hk6D8OWWUG4SneuCtKIk2bH0pgBj1G29+uzDIez90WzfCTZKbz4RcVQmPkowSSUAQDwb0ffwvRDhCgcJ1loT1wQAJzqJmljQ7xEYaCOIMqnfYE0lX7B3MSvCV6Ie2rWL33YecLp48LVtqiCOZU4XRyO8RSDFRFLVW+mjkLirwtDHZCRtORScaIEN0jw51p+T+9X5iA9QH/Mn+xlgk7fCgH+JtpBj808N/Qds2Gpff+Kb6ulUrVVfMK6L you@example.com
+</code></pre>
+</notextile>
+
+Now you can set up @ssh-agent@ (next) or proceed with "adding your key to the Arvados Workbench.":#workbench
+
+h3. Set up ssh-agent (recommended)
+
+If you find you are entering your passphrase frequently, you can use @ssh-agent@ to manage your credentials.  Use @ssh-add -l@ to test if you already have ssh-agent running:
+
+notextile. <pre><code>$ <span class="userinput">ssh-add -l</span></code></pre>
+
+If you get the error "Could not open a connection to your authentication agent" you will need to run @ssh-agent@ with the following command:
+
+notextile. <pre><code>$ <span class="userinput">eval $(ssh-agent -s)</span></code></pre>
+
+@ssh-agent -s@ prints out values for environment variables SSH_AUTH_SOCK and SSH_AGENT_PID and then runs in the background.  Using "eval" on the output as shown here causes those variables to be set in the current shell environment so that subsequent calls to SSH can discover how to access the agent process.
+
+After running @ssh-agent@, or if @ssh-add -l@ prints "The agent has no identities", add your key using the following command.  The passphrase to decrypt the key is the same used to protect the key when it was created with @ssh-keygen@:
+
+<notextile>
+<pre><code>$ <span class="userinput">ssh-add</span>
+Enter passphrase for /home/example/.ssh/id_rsa:
+Identity added: /home/example/.ssh/id_rsa (/home/example/.ssh/id_rsa)
+</code></pre>
+</notextile>
+
+When everything is set up, @ssh-add -l@ should yield output that looks something like this:
+
+<notextile>
+<pre><code>$ <span class="userinput">ssh-add -l</span>
+2048 eb:fa:15:f2:44:26:95:58:37:37:f4:aa:ff:ee:c2:85 you@example.com (RSA)
+</code></pre>
+</notextile>
+
+{% include 'ssh_addkey' %}
+
+h3. Connecting to the virtual machine
+
+Use the following command to connect to the _shell_ VM instance as _you_.  Replace *<code>you@shell</code>* at the end of the following command with your *login* and *hostname* from Workbench:
+
+notextile. <pre><code>$ <span class="userinput">ssh -o "ProxyCommand ssh -a -x -p2222 turnout@switchyard.{{ site.arvados_api_host }} <b>shell</b>" -A -x <b>you@shell</b></span></code></pre>
+
+This command does several things at once. You usually cannot log in directly to virtual machines over the public Internet.  Instead, you log into a "switchyard" server and then tell the switchyard which virtual machine you want to connect to.
+
+* @-o "ProxyCommand ..."@ configures SSH to run the specified command to create a proxy and route your connection through it.
+* @-a@ tells SSH not to forward your ssh-agent credentials to the switchyard.
+* @-x@ tells SSH not to forward your X session to the switchyard.
+* @-p2222@ specifies that the switchyard is running on non-standard port 2222.
+* <code>turnout@switchyard.{{ site.arvados_api_host }}</code> specifies the user (@turnout@) and hostname (@switchyard.{{ site.arvados_api_host }}@) of the switchyard server that will proxy our connection to the VM.
+* *@shell@* is the name of the VM that we want to connect to.  This is sent to the switchyard server as if it were an SSH command, and the switchyard server connects to the VM on our behalf.
+* After the ProxyCommand section, we repeat @-x@ to disable X session forwarding to the virtual machine.
+* @-A@ specifies that we want to forward access to @ssh-agent@ to the VM.
+* Finally, *<code>you@shell</code>* specifies your login name and repeats the hostname of the VM.  The username can be found in the *logins* column in the VMs Workbench page, discussed in the previous section.
+
+You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
+
+h3. Configuration (recommended)
+
+The command line above is cumbersome, but you can configure SSH to remember many of these settings.  Add this text to the file @.ssh/config@ in your home directory (create a new file if @.ssh/config@ doesn't exist):
+
+<notextile>
+<pre><code class="userinput">Host *.arvados
+  ProxyCommand ssh -a -x -p2222 turnout@switchyard.{{ site.arvados_api_host }} $SSH_PROXY_FLAGS %h
+  User <b>you</b>
+  ForwardAgent yes
+  ForwardX11 no
+</code></pre>
+</notextile>
+
+This will recognize any host ending in ".arvados" and automatically apply the proxy, user and forwarding settings from the configuration file, allowing you to log in with a much simpler command:
+
+notextile. <pre><code>$ <span class="userinput">ssh <b>shell</b>.arvados</span></code></pre>
+
+You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
diff --git a/doc/user/getting_started/ssh-access-windows.html.textile.liquid b/doc/user/getting_started/ssh-access-windows.html.textile.liquid
new file mode 100644 (file)
index 0000000..7a9ab27
--- /dev/null
@@ -0,0 +1,76 @@
+---
+layout: default
+navsection: userguide
+title: Accessing an Arvados VM with SSH - Windows Environments
+...
+
+This document is for Windows environments. If you are using a Unix environment (Linux, OS X, Cygwin), please visit the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.
+
+{% include 'ssh_intro' %}
+
+h1(#gettingkey). Getting your SSH key
+
+(Note: if you are using the SSH client that comes with "Cygwin":http://cygwin.com, please use instructions found in the "Accessing an Arvados VM with SSH - Unix Environments":ssh-access-unix.html page.)
+
+We will be using PuTTY to connect to Arvados. "PuTTY":http://www.chiark.greenend.org.uk/~sgtatham/putty/ is a free (MIT-licensed) Win32 Telnet and SSH client. PuTTY includes all the tools a Windows user needs to create private keys and make SSH connections to your virtual machines in the Arvados Cloud.
+
+You can "download PuTTY from its Web site":http://www.chiark.greenend.org.uk/~sgtatham/putty/.  Note that you should download the installer or .zip file with all of the PuTTY tools (PuTTYtel is not required).
+
+If you downloaded the zip file, extract it to the location you wish to install the PuTTY applications. This document assumes that you installed PuTTY in the default directory under @C:\Program Files\@ or @C:\Program Files (x86)\@ (if you are using a 64 bit operating system).
+
+h3. Step 1 - Adding PuTTY to the PATH
+
+# After downloading PuTTY and installing it, you should have a PuTTY folder in @C:\Program Files\@ or @C:\Program Files (x86)\@ (if you are using a 64 bit operating system).
+# Open the Control Panel.
+# Select _Advanced System Settings_, and choose _Environment Variables_.
+If you are using newer systems like Windows 7, you may use the following to open _Advanced System Settings_. Open Control Panel. Click on _System and Security_. Click on _System_. Click on _Advanced system settings_ and choose _Environment Variables..._
+# Under system variables, find and edit @PATH@.
+# If you installed PuTTY in @C:\Program Files\PuTTY\@, add the following to the end of PATH:
+<code>;C:\Program Files\PuTTY</code>
+If you installed PuTTY in @C:\Program Files (x86)\PuTTY\@, add the following to the end of PATH:
+<code>;C:\Program Files (x86)\PuTTY</code>
+# Click through the OKs to close all the dialogs you’ve opened.
+
+h3. Step 2 - Creating a Public Key
+
+# Start PuTTYgen from the Start Menu or the folder where it was installed.
+# At the bottom of the window, make sure the ‘Number of bits in a generated key’ field is set to 4096.
+# Click Generate and follow the instructions to generate a key.
+# Click the _Save public key_ button.
+# Click the _Save private key_ button (we recommend using a strong passphrase).
+# Select the text of the Public Key and copy it to the clipboard.
+
+h3. Step 3 - Set up Pageant
+
+Pageant is a PuTTY utility that manages your private keys so is not necessary to enter your private key passphrase every time you make a new SSH connection.
+
+# Start Pageant from the Start Menu or the folder where it was installed.
+# Pageant will now be running in the system tray. Click the Pageant icon to configure.
+# Choose _Add Key_ and add the private key which you created in the previous step.
+
+{% include 'ssh_addkey' %}
+
+h3. Initial configuration
+
+# Open PuTTY from the Start Menu.
+# On the Session screen set the Host Name (or IP address) to “shell”, which is the hostname listed in the _Virtual Machines_ page.
+# On the Session screen set the Port to “22”.
+# On the Connection %(rarr)&rarr;% Data screen set the Auto-login username to the username listed in the *logins* column on the Arvados Workbench _Settings %(rarr)&rarr;% Virtual machines_ page.
+# On the Connection %(rarr)&rarr;% Proxy screen set the Proxy Type to “Local”.
+# On the Connection %(rarr)&rarr;% Proxy screen in the “Telnet command, or local proxy command” box enter:
+<code>plink -P 2222 turnout@switchyard.{{ site.arvados_api_host }} %host</code>
+Make sure there is no newline at the end of the text entry.
+# Return to the Session screen. In the Saved Sessions box, enter a name for this configuration and click Save.
+
+_Note: We recommend you do not delete the “Default” Saved Session._
+
+h3. Connecting to the VM
+
+# Open PuTTY from the Start Menu.
+# Click on the Saved Session name you created in the previous section.
+# Click Load to load those saved session settings.
+# Click Open to open the SSH window at the command prompt. You will now be logged into your virtual machine.
+
+_Note_: If you see a hung PuTTY terminal window with no further action: open a new _Command Prompt_ window using the Windows -> Start menu and type <code>plink -P 2222 turnout@switchyard.{{ site.arvados_api_host }} shell</code> in it. Please make sure to replace *shell* with the hostname listed in the _Virtual Machines_ page. Hit enter and type _y_ when prompted to cache the session state. Go back and start PuTTY session using the start menu button.
+
+You should now be able to log into the Arvados VM and "check your environment.":check-environment.html
diff --git a/doc/user/getting_started/workbench.html.textile.liquid b/doc/user/getting_started/workbench.html.textile.liquid
new file mode 100644 (file)
index 0000000..54ab71b
--- /dev/null
@@ -0,0 +1,17 @@
+---
+layout: default
+navsection: userguide
+title: Accessing Arvados Workbench
+...
+
+If you are using the default Arvados instance for this guide, you can Access Arvados Workbench using this link:
+
+<a href="https://{{ site.arvados_workbench_host }}/" target="_blank">https://{{ site.arvados_workbench_host }}/</a>
+
+(If you are using a different Arvados instance than the default for this guide, replace *{{ site.arvados_workbench_host }}* with your private instance in all of the examples in this guide.)
+
+You may be asked to log in using a Google account.  Arvados uses only your name and email address from Google services for identification, and will never access any personal information.  If you are accessing Arvados for the first time, the Workbench may indicate your account status is *New / inactive*.  If this is the case, contact the administrator of the Arvados instance to request activation of your account.
+
+Once your account is active, logging in to the Workbench will present you with the Dashboard. This gives a summary of your projects and recent activity in the Arvados instance.  "You are now ready to run your first pipeline.":{{ site.baseurl }}/user/tutorials/tutorial-pipeline-workbench.html
+
+!{{ site.baseurl }}/images/workbench-dashboard.png!
diff --git a/doc/user/index.html.textile.liquid b/doc/user/index.html.textile.liquid
new file mode 100644 (file)
index 0000000..46a55ae
--- /dev/null
@@ -0,0 +1,37 @@
+---
+layout: default
+navsection: userguide
+title: Welcome to Arvados!
+...
+
+_If you are new to Arvados and want to get started quickly, go to "Accessing Arvados Workbench.":{{site.baseurl}}/user/getting_started/workbench.html_
+
+This guide provides an introduction to using Arvados to solve big data bioinformatics problems, including:
+
+* Robust storage of very large files, such as whole genome sequences, using the "Arvados Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html content-addressable cluster file system.
+* Running compute-intensive genomic analysis pipelines, such as alignment and variant calls using the "Arvados Crunch":{{site.baseurl}}/user/tutorials/intro-crunch.html cluster compute engine.
+* Storing and querying metadata about genome sequence files, such as human subjects and their phenotypic traits using the "Arvados Metadata Database.":{{site.baseurl}}/user/topics/tutorial-trait-search.html
+* Accessing, organizing, and sharing data, pipelines and results using the "Arvados Workbench":{{site.baseurl}}/user/getting_started/workbench.html web application.
+
+The examples in this guide use the Arvados instance located at <a href="https://{{ site.arvados_workbench_host }}/" target="_blank">https://{{ site.arvados_workbench_host }}</a>.  If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.
+
+Curoverse maintains a public Arvados instance located at <a href="https://workbench.qr1hi.arvadosapi.com/" target="_blank">https://workbench.qr1hi.arvadosapi.com/</a>.  You must have an account in order to use this service.  If you would like to request an account, please send an email to "arvados@curoverse.com":mailto:arvados@curoverse.com.
+
+h2. Typographic conventions
+
+This manual uses the following typographic conventions:
+
+<notextile>
+<ul>
+<li>Code blocks which are set aside from the text indicate user input to the system.  Commands that should be entered into a Unix shell are indicated by the directory where you should  enter the command ('~' indicates your home directory) followed by '$', followed by the highlighted <span class="userinput">command to enter</span> (do not enter the '$'), and possibly followed by example command output in black.  For example, the following block indicates that you should type <code>ls foo.*</code> while in your home directory and the expected output will be "foo.input" and "foo.output".
+<pre><code>~$ <span class="userinput">ls foo.*</span>
+foo.input foo.output
+</code></pre>
+</li>
+
+<li>Code blocks inline with text emphasize specific <code>programs</code>, <code>files</code>, or <code>options</code> that are being discussed.</li>
+<li>Bold text emphasizes <b>specific items</b> to review on Arvados Workbench pages.</li>
+<li>A sequence of steps separated by right arrows (<span class="rarr">&rarr;</span>) indicate a path the user should follow through the Arvados Workbench.  The steps indicate a menu, hyperlink, column name, field name, or other label on the page that guide the user where to look or click.
+</li>
+</ul>
+</notextile>
diff --git a/doc/user/reference/api-tokens.html.textile.liquid b/doc/user/reference/api-tokens.html.textile.liquid
new file mode 100644 (file)
index 0000000..768c7d1
--- /dev/null
@@ -0,0 +1,44 @@
+---
+layout: default
+navsection: userguide
+title: "Getting an API token"
+...
+
+The Arvados API token is a secret key that enables the @arv@ command line client to access Arvados with the proper permissions.
+
+Access the Arvados Workbench using this link: "https://{{ site.arvados_workbench_host }}/":https://{{ site.arvados_workbench_host }}/  (Replace @{{ site.arvados_api_host }}@ with the hostname of your local Arvados instance if necessary.)
+
+Open a shell on the system where you want to use the Arvados client. This may be your local workstation, or an Arvados virtual machine accessed with SSH (instructions for "Unix":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or "Windows":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).
+
+Click on the link with your _email address_ in the upper right corner to access your account menu, then click on the menu item *Manage account* to go to the account management page. On the *Manage account* page, you will see the *Current Token* panel, which lists your current token and instructions to set up your environment.
+
+h2. Setting environment variables
+
+For your convenience, the *Manage account* page on Workbench provides the *Current Token* panel that includes a command you may copy and paste directly into the shell.  It will look something as the following.
+
+bc. HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'
+export ARVADOS_API_TOKEN=2jv9346o396exampledonotuseexampledonotuseexes7j1ld
+export ARVADOS_API_HOST={{ site.arvados_api_host }}
+unset ARVADOS_API_HOST_INSECURE
+
+* The @export@ command puts a local shell variable into the environment that will be inherited by child processes such as the @arv@ client.
+
+h2. settings.conf
+
+Arvados tools will also look for the authentication information in @~/.config/arvados/settings.conf@. If you have already put the variables into the environment following the instructions above, you can use these commands to create an Arvados configuration file:
+
+<notextile>
+<pre><code>$ <span class="userinput">echo "ARVADOS_API_HOST=$ARVADOS_API_HOST" > ~/.config/arvados/settings.conf</span>
+$ <span class="userinput">echo "ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN" >> ~/.config/arvados/settings.conf</span>
+</code></pre>
+</notextile>
+
+h2. .bashrc
+
+Alternately, you may add the declarations of @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ to the @~/.bashrc@ file on the system on which you intend to use the Arvados client.  If you have already put the variables into the environment following the instructions above, you can use these commands to append the environment variables to your @~/.bashrc@:
+
+<notextile>
+<pre><code>$ <span class="userinput">echo "export ARVADOS_API_HOST=$ARVADOS_API_HOST" >> ~/.bashrc</span>
+$ <span class="userinput">echo "export ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN" >> ~/.bashrc</span>
+</code></pre>
+</notextile>
diff --git a/doc/user/reference/job-pipeline-ref.html.textile.liquid b/doc/user/reference/job-pipeline-ref.html.textile.liquid
new file mode 100644 (file)
index 0000000..f8f749c
--- /dev/null
@@ -0,0 +1,7 @@
+---
+layout: default
+navsection: userguide
+title: "Pipeline template reference"
+...
+
+Pipeline template options are described on the "pipeline template schema page.":{{site.baseurl}}/api/schema/PipelineTemplate.html
diff --git a/doc/user/topics/arv-docker.html.textile.liquid b/doc/user/topics/arv-docker.html.textile.liquid
new file mode 100644 (file)
index 0000000..0a0693f
--- /dev/null
@@ -0,0 +1,202 @@
+---
+layout: default
+navsection: userguide
+title: "Customizing Crunch environment using Docker"
+...
+
+This page describes how to customize the runtime environment (e.g. the programs, libraries, and other dependencies needed to run a job) that a crunch script will be run in using "Docker.":https://www.docker.com/  Docker is a tool for building and running containers that isolate applications from other applications running on the same node.  For detailed information about Docker, see the "Docker User Guide.":https://docs.docker.com/userguide/
+
+This page will demonstrate how to:
+
+# Fetch the arvados/jobs Docker image
+# Manually install additional software into the container
+# Create a new custom image
+# Upload that image to Arvados for use by Crunch jobs
+# Share your image with others
+
+{% include 'tutorial_expectations' %}
+
+You also need ensure that "Docker is installed,":https://docs.docker.com/installation/ the Docker daemon is running, and you have permission to access Docker.  You can test this by running @docker version@.  If you receive a permission denied error, your user account may need to be added to the @docker@ group.  If you have root access, you can add yourself to the @docker@ group using @$ sudo addgroup $USER docker@ then log out and log back in again; otherwise consult your local sysadmin.
+
+h2. Fetch a starting image
+
+The easiest way to begin is to start from the "arvados/jobs" image which already has the Arvados SDK installed along with other configuration required for use with Crunch.
+
+Download the latest "arvados/jobs" image from the Docker registry:
+
+<notextile>
+<pre><code>$ <span class="userinput">docker pull arvados/jobs</span>
+Pulling repository arvados/jobs
+3132168f2acb: Download complete
+a42b7f2c59b6: Download complete
+e5afdf26a7ae: Download complete
+5cae48636278: Download complete
+7a4f91b70558: Download complete
+a04a275c1fd6: Download complete
+c433ff206a22: Download complete
+b2e539b45f96: Download complete
+073b2581c6be: Download complete
+593915af19dc: Download complete
+32260b35005e: Download complete
+6e5b860c1cde: Download complete
+95f0bfb43d4d: Download complete
+c7fd77eedb96: Download complete
+0d7685aafd00: Download complete
+</code></pre>
+</notextile>
+
+h2. Install new packages
+
+Next, enter the container using @docker run@, providing the arvados/jobs image and the program you want to run (in this case the bash shell).
+
+<notextile>
+<pre><code>$ <span class="userinput">docker run --interactive --tty --user root arvados/jobs /bin/bash</span>
+root@a0e8299b59aa:/#
+</code></pre>
+</notextile>
+
+Next, update the package list using @apt-get update@.
+
+<notextile>
+<pre><code>root@a0e8299b59aa:/# <span class="userinput">apt-get update</span>
+Get:1 http://apt.arvados.org wheezy Release.gpg [490 B]
+Get:2 http://apt.arvados.org wheezy Release [1568 B]
+Get:3 http://apt.arvados.org wheezy/main amd64 Packages [34.6 kB]
+Get:4 http://ftp.us.debian.org wheezy Release.gpg [1655 B]
+Get:5 http://ftp.us.debian.org wheezy-updates Release.gpg [836 B]
+Get:6 http://ftp.us.debian.org wheezy Release [168 kB]
+Ign http://apt.arvados.org wheezy/main Translation-en
+Get:7 http://security.debian.org wheezy/updates Release.gpg [836 B]
+Get:8 http://security.debian.org wheezy/updates Release [102 kB]
+Get:9 http://ftp.us.debian.org wheezy-updates Release [124 kB]
+Get:10 http://ftp.us.debian.org wheezy/main amd64 Packages [5841 kB]
+Get:11 http://security.debian.org wheezy/updates/main amd64 Packages [218 kB]
+Get:12 http://security.debian.org wheezy/updates/main Translation-en [123 kB]
+Hit http://ftp.us.debian.org wheezy/main Translation-en
+Hit http://ftp.us.debian.org wheezy-updates/main amd64 Packages/DiffIndex
+Hit http://ftp.us.debian.org wheezy-updates/main Translation-en/DiffIndex
+Fetched 6617 kB in 5s (1209 kB/s)
+Reading package lists... Done
+</code></pre>
+</notextile>
+
+In this example, we will install the "R" statistical language Debian package "r-base-core".  Use @apt-get install@:
+
+<notextile>
+<pre><code>root@a0e8299b59aa:/# <span class="userinput">apt-get install r-base-core</span>
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following extra packages will be installed:
+  [...]
+libxv1 libxxf86dga1 libxxf86vm1 r-base-core r-base-dev r-base-html r-cran-boot r-cran-class r-cran-cluster r-cran-codetools
+  [...]
+Suggested packages:
+  [...]
+The following NEW packages will be installed:
+  [...]
+  libxv1 libxxf86dga1 libxxf86vm1 r-base r-base-core r-base-dev r-base-html r-cran-boot r-cran-class r-cran-cluster
+  [...]
+0 upgraded, 107 newly installed, 0 to remove and 9 not upgraded.
+Need to get 88.2 MB of archives.
+After this operation, 219 MB of additional disk space will be used.
+Do you want to continue [Y/n]? y
+[...]
+Get:85 http://ftp.us.debian.org/debian/ wheezy/main r-base-core amd64 2.15.1-4 [20.6 MB]
+Get:86 http://ftp.us.debian.org/debian/ wheezy/main r-base-dev all 2.15.1-4 [3882 B]
+Get:87 http://ftp.us.debian.org/debian/ wheezy/main r-cran-boot all 1.3-5-1 [472 kB]
+[...]
+Fetched 88.2 MB in 2min 17s (642 kB/s)
+Extracting templates from packages: 100%
+Preconfiguring packages ...
+[...]
+Unpacking r-base-core (from .../r-base-core_2.15.1-4_amd64.deb) ...
+Selecting previously unselected package r-base-dev.
+Unpacking r-base-dev (from .../r-base-dev_2.15.1-4_all.deb) ...
+Selecting previously unselected package r-cran-boot.
+Unpacking r-cran-boot (from .../r-cran-boot_1.3-5-1_all.deb) ...
+[...]
+Setting up r-base-core (2.15.1-4) ...
+Setting R_PAPERSIZE_USER default to 'a4'
+
+Creating config file /etc/R/Renviron with new version
+Setting up r-base-dev (2.15.1-4) ...
+Setting up r-cran-boot (1.3-5-1) ...
+[...]
+</code></pre>
+</notextile>
+
+Now we can verify that "R" is installed:
+
+<notextile>
+<pre><code>root@a0e8299b59aa:/# <span class="userinput">R</span>
+
+R version 2.15.1 (2012-06-22) -- "Roasted Marshmallows"
+Copyright (C) 2012 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+Platform: x86_64-pc-linux-gnu (64-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+>
+</code></pre>
+</notextile>
+
+Note that you are not limited to installing Debian packages.  You may compile programs or libraries from source and install them, edit systemwide configuration files, use other package managers such as @pip@ or @gem@, and perform any other customization necessary to run your program.
+
+h2. Create a new image
+
+We're now ready to create a new Docker image.  First, quit the container, then use @docker commit@ to create a new image from the stopped container.  The container id can be found in the default hostname of the container displayed in the prompt, in this case @a0e8299b59aa@:
+
+<notextile>
+<pre><code>root@a0e8299b59aa:/# <span class="userinput">exit</span>
+$ <span class="userinput">docker commit a0e8299b59aa arvados/jobs-with-r</span>
+33ea6b87792364cb9989a149c36a31e5a9c8cf96694ba05f66545ad7b842522e
+$ <span class="userinput">docker images</span>
+REPOSITORY            TAG                 IMAGE ID            CREATED              VIRTUAL SIZE
+arvados/jobs-with-r   latest              33ea6b877923        43 seconds ago       1.607 GB
+arvados/jobs          latest              3132168f2acb        22 hours ago         1.314 GB
+</code></pre>
+</notextile>
+
+h2. Upload your image
+
+Finally, we are ready to upload the new Docker image to Arvados.  Use @arv keep docker@ with the image repository name to upload the image.  Without arguments, @arv keep docker@ will print out the list of Docker images in Arvados that are available to you.
+
+<notextile>
+<pre><code>$ <span class="userinput">arv keep docker arvados/jobs-with-r</span>
+1591M / 1591M 100.0%
+Collection saved as 'Docker image arvados/jobs-with-r:latest 33ea6b877923'
+qr1hi-4zz18-3fk2px2ji25nst2
+$ <span class="userinput">arv keep docker</span>
+REPOSITORY                      TAG         IMAGE ID      COLLECTION                     CREATED
+arvados/jobs-with-r             latest      33ea6b877923  qr1hi-4zz18-3fk2px2ji25nst2    Thu Oct 16 13:58:53 2014
+</code></pre>
+</notextile>
+
+You are now able to specify the runtime environment for your program using the @docker_image@ field of the @runtime_constaints@ section of your pipeline components:
+
+<notextile>
+{% code 'example_docker' as javascript %}
+</notextile>
+
+* The @docker_image@ field can be one of: the Docker repository name (as shown above), the Docker image hash, the Arvados collection UUID, or the Arvados collection portable data hash.
+
+h2. Share Docker images
+
+Docker images are subject to normal Arvados permissions.  If wish to share your Docker image with others (or wish to share a pipeline template that uses your Docker image) you will need to use @arv keep docker@ with the @--project-uuid@ option to upload the image to a shared project.
+
+<notextile>
+<pre><code>$ <span class="userinput">arv keep docker --project-uuid zzzzz-j7d0g-u7zg1qdaowykd8d arvados/jobs-with-r</span>
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/arv-run.html.textile.liquid b/doc/user/topics/arv-run.html.textile.liquid
new file mode 100644 (file)
index 0000000..300ff2f
--- /dev/null
@@ -0,0 +1,166 @@
+---
+layout: default
+navsection: userguide
+title: "Using arv-run"
+...
+
+The @arv-run@ command enables you create Arvados pipelines at the command line that fan out to multiple concurrent tasks across Arvados compute nodes.
+
+{% include 'tutorial_expectations' %}
+
+h1. Usage
+
+Using @arv-run@ you can write and test command lines interactively, then insert @arv-run@ at the beginning of the command line to run the command on Arvados.  For example:
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/3229739b505d2b878b62aed09895a55a+142</span>
+$ <span class="userinput">ls *.fastq</span>
+HWI-ST1027_129_D0THKACXX.1_1.fastq  HWI-ST1027_129_D0THKACXX.1_2.fastq
+$ <span class="userinput">grep -H -n ATTGGAGGAAAGATGAGTGAC HWI-ST1027_129_D0THKACXX.1_1.fastq</span>
+HWI-ST1027_129_D0THKACXX.1_1.fastq:14:TCTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCCCAACCTA
+HWI-ST1027_129_D0THKACXX.1_1.fastq:18:AACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCT
+HWI-ST1027_129_D0THKACXX.1_1.fastq:30:ATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCTGTGATACG
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC HWI-ST1027_129_D0THKACXX.1_1.fastq</span>
+Running pipeline qr1hi-d1hrv-mg3bju0u7r6w241
+[...]
+ 0 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq
+ 0 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:14:TCTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCCCAACCTA
+ 0 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:18:AACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCT
+ 0 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:30:ATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCTGTGATACG
+ 0 stderr run-command: completed with exit code 0 (success)
+[...]
+</pre>
+</notextile>
+
+A key feature of @arv-run@ is the ability to introspect the command line to determine which arguments are file inputs, and transform those paths so they are usable inside the Arvados container.  In the above example, @HWI-ST1027_129_D0THKACXX.1_2.fastq@ is transformed into @/keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq@.  @arv-run@ also works together with @arv-mount@ to identify that the file is already part of an Arvados collection.  In this case, it will use the existing collection without any upload step.  If you specify a file that is only available on the local filesystem, @arv-run@ will upload a new collection.
+
+If you find that @arv-run@ is incorrectly rewriting one of your command line arguments, place a backslash @\@ at the beginning of the affected argument to quote it (suppress rewriting).
+
+h2. Parallel tasks
+
+@arv-run@ will parallelize over files listed on the command line after @--@.
+
+<notextile>
+<pre>
+HWI-ST1027_129_D0THKACXX.1_1.fastq  HWI-ST1027_129_D0THKACXX.1_2.fastq
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC -- *.fastq</span>
+Running pipeline qr1hi-d1hrv-mg3bju0u7r6w241
+[...]
+ 0 stderr run-command: parallelizing on input0 with items [u'/keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq', u'/keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq']
+[...]
+ 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq
+ 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq
+[...]
+ 1 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:14:TCTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCCCAACCTA
+ 1 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:18:AACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCT
+ 1 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq:30:ATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAGGCCAGTAAGTAGTGCTTGTGCTCATCTCCTTGGCTGTGATACG
+ 1 stderr run-command: completed with exit code 0 (success)
+ 2 stderr /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq:34:CTGGCCCCTGTTGTCTGCATGTAACTTAATACCACAACCAGGCATAGGGGAAAGATTGGAGGAAAGATGAGTGACAGCATCAACTTCTCTCACAACCTAG
+ 2 stderr run-command: completed with exit code 0 (success)
+</pre>
+</notextile>
+
+You may specify @--batch-size N@ (or the short form @-bN@) after the @--@ but before listing any files to specify how many files to provide put on the command line for each task.  See "Putting it all together" below for an example.
+
+h2. Redirection
+
+You may use standard input (@<@) and standard output (@>@) redirection.  This will create a separate task for each file listed in standard input.  You are only permitted to supply a single file name for stdout @>@ redirection.  If there are multiple tasks with their output sent to the same file, the output will be collated at the end of the pipeline.
+
+(Note: because the syntax is designed to mimic standard shell syntax, it is necessary to quote the metacharacters @<@, @>@ and @|@ as either @\<@, @\>@ and @\|@ or @'<'@, @'>'@ and @'|'@.)
+
+<notextile>
+<pre>
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC \< *.fastq \> output.txt</span>
+[...]
+ 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq > output.txt
+ 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq > output.txt
+ 2 stderr run-command: completed with exit code 0 (success)
+ 2 stderr run-command: the following output files will be saved to keep:
+ 2 stderr run-command: 121 ./output.txt
+ 2 stderr run-command: start writing output to keep
+ 1 stderr run-command: completed with exit code 0 (success)
+ 1 stderr run-command: the following output files will be saved to keep:
+ 1 stderr run-command: 363 ./output.txt
+ 1 stderr run-command: start writing output to keep
+ 2 stderr upload wrote 121 total 121
+ 1 stderr upload wrote 363 total 363
+[..]
+</pre>
+</notextile>
+
+You may use "run-command":run-command.html parameter substitution in the output file name to generate different filenames for each task:
+
+<notextile>
+<pre>
+$ <span class="userinput">arv-run grep -H -n ATTGGAGGAAAGATGAGTGAC \< *.fastq \> '$(task.uuid).txt'</span>
+[...]
+ 1 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq > qr1hi-ot0gb-hmmxf2zubfpmhfk.txt
+ 2 stderr run-command: grep -H -n ATTGGAGGAAAGATGAGTGAC < /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq > qr1hi-ot0gb-iu2xgy4hkx4mmri.txt
+ 1 stderr run-command: completed with exit code 0 (success)
+ 1 stderr run-command: the following output files will be saved to keep:
+ 1 stderr run-command:          363 ./qr1hi-ot0gb-hmmxf2zubfpmhfk.txt
+ 1 stderr run-command: start writing output to keep
+ 1 stderr upload wrote 363 total 363
+ 2 stderr run-command: completed with exit code 0 (success)
+ 2 stderr run-command: the following output files will be saved to keep:
+ 2 stderr run-command:          121 ./qr1hi-ot0gb-iu2xgy4hkx4mmri.txt
+ 2 stderr run-command: start writing output to keep
+ 2 stderr upload wrote 121 total 121
+[...]
+</pre>
+</notextile>
+
+h2. Pipes
+
+Multiple commands may be connected by pipes and execute in the same container:
+
+<notextile>
+<pre>
+$ <span class="userinput">arv-run cat -- *.fastq \| grep -H -n ATTGGAGGAAAGATGAGTGAC \> output.txt</span>
+[...]
+ 1 stderr run-command: cat /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq | grep -H -n ATTGGAGGAAAGATGAGTGAC > output.txt
+ 2 stderr run-command: cat /keep/3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_2.fastq | grep -H -n ATTGGAGGAAAGATGAGTGAC > output.txt
+[...]
+</pre>
+</notextile>
+
+If you need to capture intermediate results of a pipe, use the @tee@ command.
+
+h2. Running a shell script
+
+<notextile>
+<pre>
+$ <span class="userinput">echo 'echo hello world' > hello.sh</span>
+$ <span class="userinput">arv-run /bin/sh hello.sh</span>
+Upload local files: "hello.sh"
+Uploaded to qr1hi-4zz18-23u3hxugbm71qmn
+Running pipeline qr1hi-d1hrv-slcnhq5czo764b1
+[...]
+ 0 stderr run-command: /bin/sh /keep/5d3a4131b7d8f233f2a917d8a5c3c2b2+52/hello.sh
+ 0 stderr hello world
+ 0 stderr run-command: completed with exit code 0 (success)
+[...]
+</pre>
+</notextile>
+
+h2. Additional options
+
+* @--docker-image IMG@ : By default, commands run inside a Docker container created from the latest "arvados/jobs" Docker image.  Use this option to specify a different image to use.  Note: the Docker image must be uploaded to Arvados using @arv keep docker@.
+* @--dry-run@ : Print out the final Arvados pipeline generated by @arv-run@ without submitting it.
+* @--local@ : By default, the pipeline will be submitted to your configured Arvados instance.  Use this option to run the command locally using @arv-run-pipeline-instance --run-jobs-here@.
+* @--ignore-rcode@ : Some commands use non-zero exit codes to indicate nonfatal conditions (e.g. @grep@ returns 1 when no match is found).  Set this to indicate that commands that return non-zero return codes should not be considered failed.
+* @--no-wait@ : Do not wait and display logs after submitting command, just exit.
+
+h2. Putting it all together: bwa mem
+
+<notextile>
+<pre>
+$ <span class="userinput">cd ~/keep/by_id/d0136bc494c21f79fc1b6a390561e6cb+2778</span>
+$ <span class="userinput">arv-run --docker-image arvados/jobs-java-bwa-samtools bwa mem ../3514b8e5da0e8d109946bc809b20a78a+5698/human_g1k_v37.fasta -- --batch-size 2 *.fastq.gz \> '$(task.uuid).sam'</span>
+ 0 stderr run-command: parallelizing on input0 with items [[u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_1.fastq.gz', u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_2.fastq.gz'], [u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_1.fastq.gz', u'/keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_2.fastq.gz']]
+[...]
+ 1 stderr run-command: bwa mem /keep/3514b8e5da0e8d109946bc809b20a78a+5698/human_g1k_v37.fasta /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_1.fastq.gz /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.1_2.fastq.gz > qr1hi-ot0gb-a4bzzyqqz4ubair.sam
+ 2 stderr run-command: bwa mem /keep/3514b8e5da0e8d109946bc809b20a78a+5698/human_g1k_v37.fasta /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_1.fastq.gz /keep/d0136bc494c21f79fc1b6a390561e6cb+2778/HWI-ST1027_129_D0THKACXX.2_2.fastq.gz > qr1hi-ot0gb-14j9ncw0ymkxq0v.sam
+</pre>
+</notextile>
diff --git a/doc/user/topics/keep.html.textile.liquid b/doc/user/topics/keep.html.textile.liquid
new file mode 100644 (file)
index 0000000..9d5cae5
--- /dev/null
@@ -0,0 +1,54 @@
+---
+layout: default
+navsection: userguide
+title: "How Keep works"
+...
+
+The Arvados distributed file system is called *Keep*.  Keep is a content-addressable file system.  This means that files are managed using special unique identifiers derived from the _contents_ of the file (specifically, the MD5 hash), rather than human-assigned file names.  This has a number of advantages:
+* Files can be stored and replicated across a cluster of servers without requiring a central name server.
+* Both the server and client systematically validate data integrity because the checksum is built into the identifier.
+* Data duplication is minimized—two files with the same contents will have in the same identifier, and will not be stored twice.
+* It avoids data race conditions, since an identifier always points to the same data.
+
+In Keep, information is stored in *data blocks*.  Data blocks are normally between 1 byte and 64 megabytes in size.  If a file exceeds the maximum size of a single data block, the file will be split across multiple data blocks until the entire file can be stored.  These data blocks may be stored and replicated across multiple disks, servers, or clusters.  Each data block has its own identifier for the contents of that specific data block.
+
+In order to reassemble the file, Keep stores a *collection* data block which lists in sequence the data blocks that make up the original file.  A collection data block may store the information for multiple files, including a directory structure.
+
+In this example we will use @c1bad4b39ca5a924e481008009d94e32+210@, which we added to Keep in "how to upload data":{{ site.baseurl }}/user/tutorials/tutorial-keep.html.  First let us examine the contents of this collection using @arv keep get@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep get c1bad4b39ca5a924e481008009d94e32+210</span>
+. 204e43b8a1185621ca55a94839582e6f+67108864 b9677abbac956bd3e86b1deb28dfac03+67108864 fc15aff2a762b13f521baf042140acec+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:227212247:var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+The command @arv keep get@ fetches the contents of the collection @c1bad4b39ca5a924e481008009d94e32+210@.  In this example, this collection includes a single file @var-GS000016015-ASM.tsv.bz2@ which is 227212247 bytes long, and is stored using four sequential data blocks, @204e43b8a1185621ca55a94839582e6f+67108864@, @b9677abbac956bd3e86b1deb28dfac03+67108864@, @fc15aff2a762b13f521baf042140acec+67108864@, and @323d2a3ce20370c4ca1d3462a344f8fd+25885655@.
+
+Let's use @arv keep get@ to download the first data block:
+
+notextile. <pre><code>~$ <span class="userinput">cd /scratch/<b>you</b></span>
+/scratch/<b>you</b>$ <span class="userinput">arv keep get 204e43b8a1185621ca55a94839582e6f+67108864 &gt; block1</span></code></pre>
+
+{% include 'notebox_begin' %}
+
+When you run this command, you may get this API warning:
+
+notextile. <pre><code>WARNING:root:API lookup failed for collection 204e43b8a1185621ca55a94839582e6f+67108864 (&lt;class 'apiclient.errors.HttpError'&gt;: &lt;HttpError 404 when requesting https://qr1hi.arvadosapi.com/arvados/v1/collections/204e43b8a1185621ca55a94839582e6f%2B67108864?alt=json returned "Not Found"&gt;)</code></pre>
+
+This happens because @arv keep get@ tries to find a collection with this identifier.  When that fails, it emits this warning, then looks for a datablock instead, which succeeds.
+
+{% include 'notebox_end' %}
+
+Let's look at the size and compute the MD5 hash of @block1@:
+
+<notextile>
+<pre><code>/scratch/<b>you</b>$ <span class="userinput">ls -l block1</span>
+-rw-r--r-- 1 you group 67108864 Dec  9 20:14 block1
+/scratch/<b>you</b>$ <span class="userinput">md5sum block1</span>
+204e43b8a1185621ca55a94839582e6f  block1
+</code></pre>
+</notextile>
+
+Notice that the block identifer <code>204e43b8a1185621ca55a94839582e6f+67108864</code> consists of:
+* the MD5 hash of @block1@, @204e43b8a1185621ca55a94839582e6f@, plus
+* the size of @block1@, @67108864@.
diff --git a/doc/user/topics/run-command.html.textile.liquid b/doc/user/topics/run-command.html.textile.liquid
new file mode 100644 (file)
index 0000000..ca0045b
--- /dev/null
@@ -0,0 +1,281 @@
+---
+layout: default
+navsection: userguide
+title: "run-command reference"
+...
+
+The @run-command@ crunch script enables you run command line programs.
+
+h1. Using run-command
+
+The basic @run-command@ process evaluates its inputs and builds a command line, executes the command, and saves the contents of the output directory back to Keep.  For large datasets, @run-command@ can schedule concurrent tasks to execute the wrapped program over a range of inputs (see @task.foreach@ below.)
+
+@run-command@ is controlled through the @script_parameters@ section of a pipeline component.  @script_parameters@ is a JSON object consisting of key-value pairs.  There are three categories of keys that are meaningful to run-command:
+* The @command@ section defining the template to build the command line of task
+* Special processing directives such as @task.foreach@ @task.cwd@ @task.vwd@ @task.stdin@ @task.stdout@
+* User-defined parameters (everything else)
+
+In the following examples, you can use "dry run mode" to determine the command line that @run-command@ will use without actually running the command.  For example:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME/arvados/crunch_scripts</span>
+~$ <span class="userinput">./run-command --dry-run --script-parameters '{
+  "command": ["echo", "hello world"]
+}'</span>
+run-command: echo hello world
+</code></pre>
+</notextile>
+
+h2. Command template
+
+The value of the "command" key is a list.  The first parameter of the list is the actual program to invoke, followed by the command arguments.  The simplest @run-command@ invocation simply runs a program with static parameters.  In this example, run "echo" with the first argument "hello world":
+
+<pre>
+{
+  "command": ["echo", "hello world"]
+}
+</pre>
+
+Running this job will print "hello world" to the job log.
+
+By default, the command will start with the current working directory set to the output directory.  Anything written to the output directory will be saved to Keep when the command is finished.  You can change the default working directory using @task.cwd@ and get the path to the output directory using @$(task.outdir)@ as explained below.
+
+Items in the "command" list may include lists and objects in addition to strings.  Lists are flattened to produce the final command line.  JSON objects are evaluated as list item functions (see below).  For example, the following evaluates to @["echo", "hello", "world"]@:
+
+<pre>
+{
+  "command": ["echo", ["hello", "world"]]
+}
+</pre>
+
+Finally, if "command" is a list of lists, it specifies a Unix pipeline where the standard output of the previous command is piped into the standard input of the next command.  The following example describes the Unix pipeline @cat foo | grep bar@:
+
+<pre>
+{
+  "command": [["cat", "foo"], ["grep", "bar"]]
+}
+</pre>
+
+h2. Parameter substitution
+
+The "command" list can include parameter substitutions.  Substitutions are enclosed in "$(...)" and may contain the name of a user-defined parameter.  In the following example, the value of "a" is "hello world"; so when "command" is evaluated, it will substitute "hello world" for "$(a)":
+
+<pre>
+{
+  "a": "c1bad4b39ca5a924e481008009d94e32+210/var-GS000016015-ASM.tsv.bz2",
+  "command": ["echo", "$(file $(a))"]
+}
+</pre>
+
+table(table table-bordered table-condensed).
+|_. Function|_. Action|
+|$(file ...)       | Takes a reference to a file within an Arvados collection and evaluates to a file path on the local file system where that file can be accessed by your command.  Will raise an error if the file is not accessible.|
+|$(dir ...)        | Takes a reference to an Arvados collection or directory within an Arvados collection and evaluates to a directory path on the local file system where that directory can be accessed by your command.  The path may include a file name, in which case it will evaluate to the parent directory of the file.  Uses Python's os.path.dirname(), so "/foo/bar" will evaluate to "/foo" but "/foo/bar/" will evaluate to "/foo/bar".  Will raise an error if the directory is not accessible. |
+|$(basename&nbsp;...)   | Strip leading directory and trailing file extension from the path provided.  For example, $(basename /foo/bar.baz.txt) will evaluate to "bar.baz".|
+|$(glob ...)       | Take a Unix shell path pattern (supports @*@ @?@ and @[]@) and search the local filesystem, returning the first match found.  Use together with $(dir ...) to get a local filesystem path for Arvados collections.  For example: $(glob $(dir $(mycollection)/*.bam)) will find the first .bam file in the collection specified by the user parameter "mycollection".  If there is more than one match, which one is returned is undefined.  Will raise an error if no matches are found.|
+
+h2. List context
+
+Where specified by the documentation, parameters may be evaluated in a "list context".  That means the value will evaluate to a list instead of a string.  Parameter values can be a static list, a path to a file, a path to a directory, or a JSON object describing a list context function.
+
+If the value is a string, it is interpreted as a path.  If the path specifies a regular file, that file will be opened as a text file and produce a list with one item for each line in the file (end-of-line characters will be stripped).  If the path specifies a directory, produce a list containing all of the entries in the directory.  Note that parameter expansion is not performed on list items produced this way.
+
+If the value is a static list, it will evaluate each item and return the expanded list.  Each item may be a string (evaluated for parameter substitution), a list (recursively evaluated), or a JSON object (indicating a list function, described below).
+
+If the value is a JSON object, it is evaluated as a list function described below.
+
+h2. List functions
+
+When @run-command@ is evaluating a list (such as "command"), in addition to string parameter substitution, you can use list item functions.  In the following functions, you specify the name of a user parameter to act on (@"$(a)"@ in the first example); the value of that user parameter will be evaluated in a list context (as described above) to get the list value. Alternately, you can provide list value directly in line.  As an example, the following two fragments yield the same result:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"foreach": "$(a)",
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+<pre>
+{
+  "command": ["echo", {"foreach": ["alice", "bob"],
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+Note: when you provide the list inline with "foreach" or "index", you must include the "var" parameter to specify the substitution variable name to use when evaluating the command fragment.
+
+You can also nest functions.  This filters @["alice", "bob", "betty"]@ on the regular expression @"b.*"@ to get the list @["bob", "betty"]@, assigns @a_var@ to each value of the list, then expands @"command"@ to get @["--something", "bob", "--something", "betty"]@.
+
+<pre>
+{
+  "command": ["echo", {"foreach": {"filter": ["alice", "bob", "betty"],
+                                   "regex": "b.*"},
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h3. foreach
+
+The @foreach@ list item function (not to be confused with the @task.foreach@ directive) expands a command template for each item in the specified user parameter (the value of the user parameter is evaluated in a list context, as described above).  The following example will evaluate "command" to @["echo", "--something", "alice", "--something", "bob"]@:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"foreach": "$(a)",
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h3. index
+
+This function extracts a single item from a list.  The value of @index@ is zero-based (i.e. the first item is at index 0, the second item index 1, etc).  The following example will evaluate "command" to @["echo", "--something", "bob"]@:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"list": "$(a)",
+                       "var": "a_var",
+                       "index": 1,
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h3. filter
+
+Filter the list so that it only includes items that match a regular expression.  The following example will evaluate to @["echo", "bob"]@
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "command": ["echo", {"filter": "$(a)",
+                       "regex": "b.*"}]
+}
+</pre>
+
+h3. group
+
+Generate a list of lists, where items are grouped on common subexpression match.  Items which don't match the regular expression are excluded.  In the following example, the subexpression is @(a?)@, resulting in two groups, strings that contain the letter 'a' and strings that do not.  The following example evaluates to @["echo", "--group", "alice", "carol", "dave", "--group", "bob", "betty"]@:
+
+<pre>
+{
+  "a": ["alice", "bob", "betty", "carol", "dave"],
+  "b": {"group": "$(a)",
+        "regex": "[^a]*(a?).*"},
+  "command": ["echo", {"foreach": "$(b)",
+                       "var": "b_var",
+                       "command": ["--group", "$(b_var)"]}]
+}
+</pre>
+
+h3. extract
+
+Generate a list of lists, where items are split by subexpression match.  Items which don't match the regular expression are excluded.  The following example evaluates to @["echo", "--something", "c", "a", "rol", "--something", "d", "a", "ve"]@:
+
+<pre>
+{
+  "a": ["alice", "bob", "carol", "dave"],
+  "b": {"extract": "$(a)",
+        "regex": "(.+)(a)(.*)"},
+  "command": ["echo", {"foreach": "$(b)",
+                       "var": "b_var",
+                       "command": ["--something", "$(b_var)"]}]
+}
+</pre>
+
+h3. batch
+
+Generate a list of lists, where items are split into a batch size.  If the list does not divide evenly into batch sizes, the last batch will be short.  The following example evaluates to @["echo", "--something", "alice", "bob", "--something", "carol", "dave"]@
+
+<pre>
+{
+  "a": ["alice", "bob", "carol", "dave"],
+  "command": ["echo", {"foreach":{"batch": "$(a)",
+                                  "size": 2},
+                       "var": "a_var",
+                       "command": ["--something", "$(a_var)"]}]
+}
+</pre>
+
+h2. Directives
+
+Directives alter the behavior of run-command.  All directives are optional.
+
+h3. task.cwd
+
+This directive sets the initial current working directory in which your command will run.  If @task.cwd@ is not specified, the default current working directory is @task.outdir@.
+
+h3. task.ignore_rcode
+
+By Unix convention a task which exits with a non-zero return code is considered failed.  However, some programs (such as @grep@) return non-zero codes for conditions that should not be considered fatal errors.  Set @"task.ignore_rcode": true@ to indicate the task should always be considered a success regardless of the return code.
+
+h3. task.stdin and task.stdout
+
+Provide standard input and standard output redirection.
+
+@task.stdin@ must evaluate to a path to a file to be bound to the standard input stream of the command.  When command describes a Unix pipeline, this goes into the first command.
+
+@task.stdout@ specifies the desired file name in the output directory to save the content of standard output.  When command describes a Unix pipeline, this captures the output of the last command.
+
+h3. task.vwd
+
+Background: because Keep collections are read-only, this does not play well with certain tools that expect to be able to write their outputs alongside their inputs (such as tools that generate indexes that are closely associated with the original file.)  The run-command's solution to this is the "virtual working directory".
+
+@task.vwd@ specifies a Keep collection with the starting contents of the directory.  @run-command@ will then populate @task.outdir@ with directories and symlinks to mirror the contents of the @task.vwd@ collection.  Your command will then be able to both access its input files and write its output files in @task.outdir@.  When the command completes, the output collection will merge the output of your command with the contents of the starting collection.  Note that files in the starting collection remain read-only and cannot be altered or deleted.
+
+h3. task.foreach
+
+Using @task.foreach@, you can run your command concurrently over large datasets.
+
+@task.foreach@ takes the names of one or more user-defined parameters.  The value of these parameters are evaluated in a list context.  @run-command@ then generates tasks based on the Cartesian product (i.e. all combinations) of the input lists.  The outputs of all tasks are merged to create the final output collection.  Note that if two tasks output a file in the same directory with the same name, that file will be concatenated in the final output.  In the following example, three tasks will be created for the "grep" command, based on the contents of user parameter "a":
+
+<pre>
+{
+  "command": ["echo", "$(a)"],
+  "task.foreach": "a",
+  "a": ["alice", "bob", "carol"]
+}
+</pre>
+
+This evaluates to the commands:
+<notextile>
+<pre>
+["echo", "alice"]
+["echo", "bob"]
+["echo", "carol"]
+</pre>
+</notextile>
+
+You can also specify multiple parameters:
+
+<pre>
+{
+  "a": ["alice", "bob"],
+  "b": ["carol", "dave"],
+  "task.foreach": ["a", "b"],
+  "command": ["echo", "$(a)", "$(b)"]
+}
+</pre>
+
+This evaluates to the commands:
+
+<pre>
+["echo", "alice", "carol"]
+["echo", "alice", "dave"]
+["echo", "bob", "carol"]
+["echo", "bob", "dave"]
+</pre>
+
+h1. Examples
+
+The following is a single task pipeline using @run-command@ to run the bwa alignment tool to align a single paired-end read fastq sample.  The input to this pipeline is the reference genome and a collection consisting of two fastq files for the read pair.
+
+<notextile>{% code 'run_command_simple_example' as javascript %}</notextile>
+
+The following is a concurrent task pipeline using @run-command@ to run the bwa alignment tool to align a set of fastq reads over multiple samples.  The input to this pipeline is the reference genome and a collection consisting subdirectories for each sample, with each subdirectory containing pairs of fastq files for each set of reads.
+
+<notextile>{% code 'run_command_foreach_example' as javascript %}</notextile>
diff --git a/doc/user/topics/running-pipeline-command-line.html.textile.liquid b/doc/user/topics/running-pipeline-command-line.html.textile.liquid
new file mode 100644 (file)
index 0000000..147fbf0
--- /dev/null
@@ -0,0 +1,47 @@
+---
+layout: default
+navsection: userguide
+title: "Running a pipeline on the command line"
+...
+
+This tutorial demonstrates how to use the command line to run the same pipeline as described in "running a pipeline using Workbench.":{{site.baseurl}}/user/tutorials/tutorial-pipeline-workbench.html
+
+{% include 'tutorial_expectations' %}
+
+When you use the command line, you must use Arvados unique identifiers to refer to objects.  The identifiers in this example correspond to the following Arvados objects:
+
+* <i class="fa fa-fw fa-gear"></i> "Tutorial align using bwa mem (qr1hi-p5p6p-itzkwxblfermlwv)":https://{{ site.arvados_workbench_host }}/pipeline_templates/qr1hi-p5p6p-itzkwxblfermlwv
+* <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":https://{{ site.arvados_workbench_host }}/collections/2463fa9efeb75e099685528b3b9071e0+438
+* <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":https://{{ site.arvados_workbench_host }}/collections/3229739b505d2b878b62aed09895a55a+142
+
+Use @arv pipeline run@ to run the pipeline, supplying the inputs to the bwa-mem component on the command line:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv pipeline run --run-pipeline-here --template qr1hi-p5p6p-itzkwxblfermlwv bwa-mem::reference_collection=2463fa9efeb75e099685528b3b9071e0+438 bwa-mem::sample=3229739b505d2b878b62aed09895a55a+142</span>
+
+2014-07-25 18:05:26 +0000 -- pipeline_instance qr1hi-d1hrv-d14trje19pna7f2
+bwa-mem qr1hi-8i9sb-67n1qvsronmd2z6 queued 2014-07-25T18:05:25Z
+
+2014-07-25 18:05:36 +0000 -- pipeline_instance qr1hi-d1hrv-d14trje19pna7f2
+bwa-mem qr1hi-8i9sb-67n1qvsronmd2z6 {:done=>0, :running=>1, :failed=>0, :todo=>0}
+
+2014-07-25 18:05:46 +0000 -- pipeline_instance qr1hi-d1hrv-d14trje19pna7f2
+bwa-mem qr1hi-8i9sb-67n1qvsronmd2z6 49bae1066f4ebce72e2587a3efa61c7d+88
+</code></pre>
+</notextile>
+
+This instantiates your pipeline and displays periodic status reports in your terminal window. The new pipeline instance will also show up on the Workbench Dashboard.
+
+@arv pipeline run@ submits a job for each pipeline component as soon as the component's inputs are known (i.e., any dependencies are satsified). It terminates when there is no work left to do: this means either all components are satisfied and all jobs have completed successfully, _or_ one or more jobs have failed and it is therefore unproductive to submit any further jobs.
+
+The Keep locators of the output of of the @bwa-mem@ components are available from the last status report shown above:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls -s 49bae1066f4ebce72e2587a3efa61c7d+88</span>
+     29226 ./HWI-ST1027_129_D0THKACXX.1_1.sam
+</code></pre>
+</notextile>
+
+h2. Re-using existing jobs and outputs
+
+When satisfying a pipeline component that is not marked as nondeterministic in the pipeline template, @arv pipeline run@ checks for a previously submitted job that satisfies the component's requirements. If such a job is found, @arv pipeline run@ uses the existing job rather than submitting a new one. Usually this is a safe way to conserve time and compute resources. In some cases it's desirable to re-run jobs with identical specifications (e.g., to demonstrate that a job or entire pipeline thought to be repeatable is in fact repeatable). For such cases, job re-use features can be disabled entirely by passing the @--no-reuse@ flag to the @arv pipeline run@ command.
diff --git a/doc/user/topics/tutorial-gatk-variantfiltration.html.textile.liquid b/doc/user/topics/tutorial-gatk-variantfiltration.html.textile.liquid
new file mode 100644 (file)
index 0000000..fa33f67
--- /dev/null
@@ -0,0 +1,173 @@
+---
+layout: default
+navsection: userguide
+title: "Using GATK with Arvados"
+...
+
+This tutorial demonstrates how to use the Genome Analysis Toolkit (GATK) with Arvados. In this example we will install GATK and then create a VariantFiltration job to assign pass/fail scores to variants in a VCF file.
+
+{% include 'tutorial_expectations' %}
+
+h2. Installing GATK
+
+Download the GATK binary tarball[1] -- e.g., @GenomeAnalysisTK-2.6-4.tar.bz2@ -- and "copy it to your Arvados VM":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep put GenomeAnalysisTK-2.6-4.tar.bz2</span>
+c905c8d8443a9c44274d98b7c6cfaa32+94
+</code></pre>
+</notextile>
+
+Next, you need the GATK Resource Bundle[2].  This may already be available in Arvados.  If not, you will need to download the files listed below and put them into Keep.
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls -s d237a90bae3870b3b033aea1e99de4a9+10820</span>
+  50342 1000G_omni2.5.b37.vcf.gz
+      1 1000G_omni2.5.b37.vcf.gz.md5
+    464 1000G_omni2.5.b37.vcf.idx.gz
+      1 1000G_omni2.5.b37.vcf.idx.gz.md5
+  43981 1000G_phase1.indels.b37.vcf.gz
+      1 1000G_phase1.indels.b37.vcf.gz.md5
+    326 1000G_phase1.indels.b37.vcf.idx.gz
+      1 1000G_phase1.indels.b37.vcf.idx.gz.md5
+ 537210 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz
+      1 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz.md5
+   3473 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz
+      1 CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz.md5
+  19403 Mills_and_1000G_gold_standard.indels.b37.vcf.gz
+      1 Mills_and_1000G_gold_standard.indels.b37.vcf.gz.md5
+    536 Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz
+      1 Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz.md5
+  29291 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz.md5
+    565 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz.md5
+  37930 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz.md5
+    592 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz.md5
+5898484 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam
+    112 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz.md5
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.md5
+   3837 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz.md5
+     65 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz
+      1 NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz.md5
+ 275757 dbsnp_137.b37.excluding_sites_after_129.vcf.gz
+      1 dbsnp_137.b37.excluding_sites_after_129.vcf.gz.md5
+   3735 dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz
+      1 dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz.md5
+ 998153 dbsnp_137.b37.vcf.gz
+      1 dbsnp_137.b37.vcf.gz.md5
+   3890 dbsnp_137.b37.vcf.idx.gz
+      1 dbsnp_137.b37.vcf.idx.gz.md5
+  58418 hapmap_3.3.b37.vcf.gz
+      1 hapmap_3.3.b37.vcf.gz.md5
+    999 hapmap_3.3.b37.vcf.idx.gz
+      1 hapmap_3.3.b37.vcf.idx.gz.md5
+      3 human_g1k_v37.dict.gz
+      1 human_g1k_v37.dict.gz.md5
+      2 human_g1k_v37.fasta.fai.gz
+      1 human_g1k_v37.fasta.fai.gz.md5
+ 849537 human_g1k_v37.fasta.gz
+      1 human_g1k_v37.fasta.gz.md5
+      1 human_g1k_v37.stats.gz
+      1 human_g1k_v37.stats.gz.md5
+      3 human_g1k_v37_decoy.dict.gz
+      1 human_g1k_v37_decoy.dict.gz.md5
+      2 human_g1k_v37_decoy.fasta.fai.gz
+      1 human_g1k_v37_decoy.fasta.fai.gz.md5
+ 858592 human_g1k_v37_decoy.fasta.gz
+      1 human_g1k_v37_decoy.fasta.gz.md5
+      1 human_g1k_v37_decoy.stats.gz
+      1 human_g1k_v37_decoy.stats.gz.md5
+</code></pre>
+</notextile>
+
+h2. Submit a GATK job
+
+The Arvados distribution includes an example crunch script ("crunch_scripts/GATK2-VariantFiltration":https://arvados.org/projects/arvados/repository/revisions/master/entry/crunch_scripts/GATK2-VariantFiltration) that runs the GATK VariantFiltration tool with some default settings.
+
+<notextile>
+<pre><code>~$ <span class="userinput">src_version=76588bfc57f33ea1b36b82ca7187f465b73b4ca4</span>
+~$ <span class="userinput">vcf_input=5ee633fe2569d2a42dd81b07490d5d13+82</span>
+~$ <span class="userinput">gatk_binary=c905c8d8443a9c44274d98b7c6cfaa32+94</span>
+~$ <span class="userinput">gatk_bundle=d237a90bae3870b3b033aea1e99de4a9+10820</span>
+~$ <span class="userinput">cat &gt;the_job &lt;&lt;EOF
+{
+ "script":"GATK2-VariantFiltration",
+ "repository":"arvados",
+ "script_version":"$src_version",
+ "script_parameters":
+ {
+  "input":"$vcf_input",
+  "gatk_binary_tarball":"$gatk_binary",
+  "gatk_bundle":"$gatk_bundle"
+ }
+}
+EOF</span>
+</code></pre>
+</notextile>
+
+* @"input"@ is collection containing the source VCF data. Here we are using an exome report from PGP participant hu34D5B9.
+* @"gatk_binary_tarball"@ is a Keep collection containing the GATK 2 binary distribution[1] tar file.
+* @"gatk_bundle"@ is a Keep collection containing the GATK resource bundle[2].
+
+Now start a job:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv job create --job "$(cat the_job)"</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-n9k7qyp7bs5b9d4",
+ "kind":"arvados#job",
+ "etag":"9j99n1feoxw3az448f8ises12",
+ "uuid":"qr1hi-8i9sb-n9k7qyp7bs5b9d4",
+ "owner_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "created_at":"2013-12-17T19:02:15Z",
+ "modified_by_client_uuid":"qr1hi-ozdt8-obw7foaks3qjyej",
+ "modified_by_user_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "modified_at":"2013-12-17T19:02:15Z",
+ "updated_at":"2013-12-17T19:02:15Z",
+ "submit_id":null,
+ "priority":null,
+ "script":"GATK2-VariantFiltration",
+ "script_parameters":{
+  "input":"5ee633fe2569d2a42dd81b07490d5d13+82",
+  "gatk_binary_tarball":"c905c8d8443a9c44274d98b7c6cfaa32+94",
+  "gatk_bundle":"d237a90bae3870b3b033aea1e99de4a9+10820"
+ },
+ "script_version":"76588bfc57f33ea1b36b82ca7187f465b73b4ca4",
+ "cancelled_at":null,
+ "cancelled_by_client_uuid":null,
+ "cancelled_by_user_uuid":null,
+ "started_at":null,
+ "finished_at":null,
+ "output":null,
+ "success":null,
+ "running":null,
+ "is_locked_by_uuid":null,
+ "log":null,
+ "runtime_constraints":{},
+ "tasks_summary":{},
+ "dependencies":[
+  "5ee633fe2569d2a42dd81b07490d5d13+82",
+  "c905c8d8443a9c44274d98b7c6cfaa32+94",
+  "d237a90bae3870b3b033aea1e99de4a9+10820"
+ ]
+}
+</code></pre>
+</notextile>
+
+Once the job completes, the output can be found in hu34D5B9-exome-filtered.vcf:
+
+<notextile><pre><code>~$ <span class="userinput">arv keep ls bedd6ff56b3ae9f90d873b1fcb72f9a3+91</span>
+hu34D5B9-exome-filtered.vcf
+</code></pre>
+</notextile>
+
+h2. Notes
+
+fn1. "Download the GATK tools":http://www.broadinstitute.org/gatk/download
+
+fn2. "Information about the GATK resource bundle":http://gatkforums.broadinstitute.org/discussion/1213/whats-in-the-resource-bundle-and-how-can-i-get-it and "direct download link":ftp://gsapubftp-anonymous@ftp.broadinstitute.org/bundle/2.5/b37/ (if prompted, submit an empty password)
diff --git a/doc/user/topics/tutorial-job1.html.textile.liquid b/doc/user/topics/tutorial-job1.html.textile.liquid
new file mode 100644 (file)
index 0000000..e231c9b
--- /dev/null
@@ -0,0 +1,215 @@
+---
+layout: default
+navsection: userguide
+title: "Running a Crunch job on the command line"
+...
+
+This tutorial introduces how to run individual Crunch jobs using the @arv@ command line tool.
+
+{% include 'tutorial_expectations' %}
+
+You will create a job to run the "hash" Crunch script.  The "hash" script computes the MD5 hash of each file in a collection.
+
+h2. Jobs
+
+Crunch pipelines consist of one or more jobs.  A "job" is a single run of a specific version of a Crunch script with a specific input.  You can also run jobs individually.
+
+A request to run a Crunch job are is described using a JSON object.  For example:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cat &gt;~/the_job &lt;&lt;EOF
+{
+ "script": "hash",
+ "repository": "arvados",
+ "script_version": "master",
+ "script_parameters": {
+  "input": "c1bad4b39ca5a924e481008009d94e32+210"
+ },
+ "no_reuse": "true"
+}
+EOF
+</code></pre>
+</notextile>
+
+* @cat@ is a standard Unix utility that writes a sequence of input to standard output.
+* @<<EOF@ tells the shell to direct the following lines into the standard input for @cat@ up until it sees the line @EOF@.
+* @>~/the_job@ redirects standard output to a file called @~/the_job@.
+* @"repository"@ is the name of a Git repository to search for the script version.  You can access a list of available git repositories on the Arvados Workbench under "*Code repositories*":https://{{site.arvados_workbench_host}}/repositories.
+* @"script_version"@ specifies the version of the script that you wish to run.  This can be in the form of an explicit Git revision hash, a tag, or a branch.  Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @"script"@ specifies the name of the script to run.  The script must be given relative to the @crunch_scripts/@ subdirectory of the Git repository.
+* @"script_parameters"@ are provided to the script.  In this case, the input is the PGP data Collection that we "put in Keep earlier":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
+* Setting the @"no_reuse"@ flag tells Crunch not to reuse work from past jobs.  This helps ensure that you can watch a new Job process for the rest of this tutorial, without reusing output from a past run that you made, or somebody else marked as public.  (If you want to experiment, after the first run below finishes, feel free to edit this job to remove the @"no_reuse"@ line and resubmit it.  See what happens!)
+
+Use @arv job create@ to actually submit the job.  It should print out a JSON object which describes the newly created job:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv job create --job "$(cat ~/the_job)"</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-1pm1t02dezhupss",
+ "kind":"arvados#job",
+ "etag":"ax3cn7w9whq2hdh983yxvq09p",
+ "uuid":"qr1hi-8i9sb-1pm1t02dezhupss",
+ "owner_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "created_at":"2013-12-16T20:44:32Z",
+ "modified_by_client_uuid":"qr1hi-ozdt8-obw7foaks3qjyej",
+ "modified_by_user_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "modified_at":"2013-12-16T20:44:32Z",
+ "updated_at":"2013-12-16T20:44:33Z",
+ "submit_id":null,
+ "priority":null,
+ "script":"hash",
+ "script_parameters":{
+  "input":"c1bad4b39ca5a924e481008009d94e32+210"
+ },
+ "script_version":"d9cd657b733d578ac0d2167dd75967aa4f22e0ac",
+ "cancelled_at":null,
+ "cancelled_by_client_uuid":null,
+ "cancelled_by_user_uuid":null,
+ "started_at":null,
+ "finished_at":null,
+ "output":null,
+ "success":null,
+ "running":null,
+ "is_locked_by_uuid":null,
+ "log":null,
+ "runtime_constraints":{},
+ "tasks_summary":{},
+ "dependencies":[
+  "c1bad4b39ca5a924e481008009d94e32+210"
+ ]
+}
+</code></pre>
+</notextile>
+
+The job is now queued and will start running as soon as it reaches the front of the queue.  Fields to pay attention to include:
+
+ * @"uuid"@ is the unique identifier for this specific job.
+ * @"script_version"@ is the actual revision of the script used.  This is useful if the version was described using the "repository:branch" format.
+
+h2. Monitor job progress
+
+Go to "*Recent jobs*":https://{{site.arvados_workbench_host}}/jobs in Workbench.  Your job should be near the top of the table.  This table refreshes automatically.  When the job has completed successfully, it will show <span class="label label-success">finished</span> in the *Status* column.
+
+h2. Inspect the job output
+
+On the "Workbench Dashboard":https://{{site.arvados_workbench_host}}, look for the *Output* column of the *Recent jobs* table.  Click on the link under *Output* for your job to go to the files page with the job output.  The files page lists all the files that were output by the job.  Click on the link under the *file* column to view a file, or click on the download button <span class="glyphicon glyphicon-download-alt"></span> to download the output file.
+
+On the command line, you can use @arv job get@ to access a JSON object describing the output:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv job get --uuid qr1hi-8i9sb-xxxxxxxxxxxxxxx</span>
+{
+ "href":"https://qr1hi.arvadosapi.com/arvados/v1/jobs/qr1hi-8i9sb-1pm1t02dezhupss",
+ "kind":"arvados#job",
+ "etag":"1bk98tdj0qipjy0rvrj03ta5r",
+ "uuid":"qr1hi-8i9sb-1pm1t02dezhupss",
+ "owner_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "created_at":"2013-12-16T20:44:32Z",
+ "modified_by_client_uuid":null,
+ "modified_by_user_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "modified_at":"2013-12-16T20:44:55Z",
+ "updated_at":"2013-12-16T20:44:55Z",
+ "submit_id":null,
+ "priority":null,
+ "script":"hash",
+ "script_parameters":{
+  "input":"c1bad4b39ca5a924e481008009d94e32+210"
+ },
+ "script_version":"d9cd657b733d578ac0d2167dd75967aa4f22e0ac",
+ "cancelled_at":null,
+ "cancelled_by_client_uuid":null,
+ "cancelled_by_user_uuid":null,
+ "started_at":"2013-12-16T20:44:36Z",
+ "finished_at":"2013-12-16T20:44:53Z",
+ "output":"dd755dbc8d49a67f4fe7dc843e4f10a6+54",
+ "success":true,
+ "running":false,
+ "is_locked_by_uuid":"qr1hi-tpzed-9zdpkpni2yddge6",
+ "log":"2afdc6c8b67372ffd22d8ce89d35411f+91",
+ "runtime_constraints":{},
+ "tasks_summary":{
+  "done":2,
+  "running":0,
+  "failed":0,
+  "todo":0
+ },
+ "dependencies":[
+  "c1bad4b39ca5a924e481008009d94e32+210"
+ ]
+}
+</code></pre>
+</notextile>
+
+* @"output"@ is the unique identifier for this specific job's output.  This is a Keep collection.  Because the output of Arvados jobs should be deterministic, the known expected output is <code>dd755dbc8d49a67f4fe7dc843e4f10a6+54</code>.
+
+Now you can list the files in the collection:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls dd755dbc8d49a67f4fe7dc843e4f10a6+54</span>
+./md5sum.txt
+</code></pre>
+</notextile>
+
+This collection consists of the @md5sum.txt@ file.  Use @arv keep get@ to show the contents of the @md5sum.txt@ file:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep get dd755dbc8d49a67f4fe7dc843e4f10a6+54/md5sum.txt</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f ./var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+This MD5 hash matches the MD5 hash which we "computed earlier":{{site.baseurl}}/user/tutorials/tutorial-keep.html.
+
+h2. The job log
+
+When the job completes, you can access the job log.  On the Workbench, visit "*Recent jobs*":https://{{site.arvados_workbench_host}}/jobs %(rarr)&rarr;% your job's UUID under the *uuid* column %(rarr)&rarr;% the collection link on the *log* row.
+
+On the command line, the Keep identifier listed in the @"log"@ field from @arv job get@ specifies a collection.  You can list the files in the collection:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep ls xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+91</span>
+./qr1hi-8i9sb-xxxxxxxxxxxxxxx.log.txt
+</code></pre>
+</notextile>
+
+The log collection consists of one log file named with the job's UUID.  You can access it using @arv keep get@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep get xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx+91/qr1hi-8i9sb-xxxxxxxxxxxxxxx.log.txt</span>
+2013-12-16_20:44:35 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  check slurm allocation
+2013-12-16_20:44:35 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  node compute13 - 8 slots
+2013-12-16_20:44:36 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  start
+2013-12-16_20:44:36 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Install revision d9cd657b733d578ac0d2167dd75967aa4f22e0ac
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Clean-work-dir exited 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Install exited 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  script hash
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  script_version d9cd657b733d578ac0d2167dd75967aa4f22e0ac
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  script_parameters {"input":"c1bad4b39ca5a924e481008009d94e32+210"}
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  runtime_constraints {"max_tasks_per_node":0}
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  start level 0
+2013-12-16_20:44:37 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 0 done, 0 running, 1 todo
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 job_task qr1hi-ot0gb-23c1k3kwrf8da62
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 child 7681 started on compute13.1
+2013-12-16_20:44:38 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 0 done, 1 running, 0 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 child 7681 on compute13.1 exit 0 signal 0 success=true
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 success in 1 seconds
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 0 output
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  wait for last 0 children to finish
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 1 done, 0 running, 1 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  start level 1
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 1 done, 0 running, 1 todo
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 job_task qr1hi-ot0gb-iwr0o3unqothg28
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 child 7716 started on compute13.1
+2013-12-16_20:44:39 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 1 done, 1 running, 0 todo
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 child 7716 on compute13.1 exit 0 signal 0 success=true
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 success in 13 seconds
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575 1 output dd755dbc8d49a67f4fe7dc843e4f10a6+54
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  wait for last 0 children to finish
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  status: 2 done, 0 running, 0 todo
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  release job allocation
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  Freeze not implemented
+2013-12-16_20:44:52 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  collate
+2013-12-16_20:44:53 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  output dd755dbc8d49a67f4fe7dc843e4f10a6+54+K@qr1hi
+2013-12-16_20:44:53 qr1hi-8i9sb-xxxxxxxxxxxxxxx 7575  finish
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/tutorial-parallel.html.textile.liquid b/doc/user/topics/tutorial-parallel.html.textile.liquid
new file mode 100644 (file)
index 0000000..9be6103
--- /dev/null
@@ -0,0 +1,78 @@
+---
+layout: default
+navsection: userguide
+title: "Concurrent Crunch tasks"
+...
+
+In the previous tutorials, we used @arvados.job_setup.one_task_per_input_file()@ to automatically create concurrent jobs by creating a separate task per file.  For some types of jobs, you may need to split the work up differently, for example creating tasks to process different segments of a single large file.  In this this tutorial will demonstrate how to create Crunch tasks directly.
+
+Start by entering the @crunch_scripts@ directory of your Git repository:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $USER/crunch_scripts</span>
+</code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @concurrent-hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano concurrent-hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection:
+
+<notextile> {% code 'concurrent_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x concurrent-hash.py</span></code></pre>
+
+Add the file to the Git staging area, commit, and push:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add concurrent-hash.py</span>
+~/$USER/crunch_scripts$ <span class="userinput">git commit -m"concurrent hash"</span>
+~/$USER/crunch_scripts$ <span class="userinput">git push origin master</span>
+</code></pre>
+</notextile>
+
+You should now be able to run your new script using Crunch, with "script" referring to our new "concurrent-hash.py" script.  We will use a different input from our previous examples.  We will use @887cd41e9c613463eab2f0d885c6dd96+83@ which consists of three files, "alice.txt", "bob.txt" and "carol.txt" (the example collection used previously in "fetching data from Arvados using Keep":{{site.baseurl}}/user/tutorials/tutorial-keep.html#dir).
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">cat &gt;~/the_job &lt;&lt;EOF
+{
+ "script": "concurrent-hash.py",
+ "repository": "$USER",
+ "script_version": "master",
+ "script_parameters":
+ {
+  "input": "887cd41e9c613463eab2f0d885c6dd96+83"
+ }
+}
+EOF</span>
+~/$USER/crunch_scripts$ <span class="userinput">arv job create --job "$(cat ~/the_job)"</span>
+{
+ ...
+ "uuid":"qr1hi-xxxxx-xxxxxxxxxxxxxxx"
+ ...
+}
+~/$USER/crunch_scripts$ <span class="userinput">arv job get --uuid qr1hi-xxxxx-xxxxxxxxxxxxxxx</span>
+{
+ ...
+ "output":"e2ccd204bca37c77c0ba59fc470cd0f7+162",
+ ...
+}
+</code></pre>
+</notextile>
+
+(Your shell should automatically fill in @$USER@ with your login name.  The job JSON that gets saved should have @"repository"@ pointed at your personal Git repository.)
+
+Because the job ran in concurrent, each instance of concurrent-hash creates a separate @md5sum.txt@ as output.  Arvados automatically collates theses files into a single collection, which is the output of the job:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">arv keep ls e2ccd204bca37c77c0ba59fc470cd0f7+162</span>
+./md5sum.txt
+~/$USER/crunch_scripts$ <span class="userinput">arv keep get e2ccd204bca37c77c0ba59fc470cd0f7+162/md5sum.txt</span>
+0f1d6bcf55c34bed7f92a805d2d89bbf alice.txt
+504938460ef369cd275e4ef58994cffe bob.txt
+8f3b36aff310e06f3c5b9e95678ff77a carol.txt
+</code></pre>
+</notextile>
diff --git a/doc/user/topics/tutorial-trait-search.html.textile.liquid b/doc/user/topics/tutorial-trait-search.html.textile.liquid
new file mode 100644 (file)
index 0000000..d1a0e24
--- /dev/null
@@ -0,0 +1,269 @@
+---
+layout: default
+navsection: userguide
+title: "Querying the Metadata Database"
+...
+
+This tutorial introduces the Arvados Metadata Database.  The Metadata Database stores information about files in Keep.  This example will use the Python SDK to find public WGS (Whole Genome Sequencing) data for people who have reported a certain medical condition.
+
+{% include 'tutorial_expectations' %}
+
+In the tutorial examples, three angle brackets (&gt;&gt;&gt;) will be used to denote code to enter at the interactive Python prompt.
+
+Start by running Python.  
+
+<notextile>
+<pre><code>~$ <span class="userinput">python</span>
+Python 2.7.3 (default, Jan  2 2013, 13:56:14) 
+[GCC 4.7.2] on linux2
+Type "help", "copyright", "credits" or "license" for more information.
+&gt;&gt;&gt;
+</code></pre>
+</notextile>
+      
+If everything is set up correctly, you will be able to import the arvados SDK.
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">import arvados</span></pre></code>
+
+This tutorial will also use the regular expression (re) python module:
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">import re</span>
+</code></pre>
+</notextile>
+
+h2. Finding traits
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">all_traits = arvados.api().traits().list(limit=1000).execute()</span></code></pre>
+
+* @arvados.api()@ gets an object that provides access to the Arvados API server
+* @.traits()@ gets an object that provides access to the "traits" resource on the Arvados API server
+* @.list(limit=1000)@ constructs a query to list all elements of the "traits" resource, with a limit of 1000 entries returned
+* @.execute()@ executes the query and returns the result, which we assign to "all_traits"
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">cancer_traits = filter(lambda t: re.search('cancer', t['name']), all_traits['items'])</span></code></pre>
+
+* @lambda t: re.search('cancer', t['name'])@ is an inline function that takes a parameter @t@ and uses a simple regular expression to test if @t['name']@ contains the substring 'cancer'
+* @all_traits['items']@ is the input sequence of traits
+* @filter@ tests each element @t@ and constructs a new sequence consisting only of the elements that pass the filter
+* @cancer_traits@ gets the result of @filter@
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">for t in cancer_traits: print(t['uuid'], t['name'])</span>
+...
+qr1hi-q1cn2-8q57g2diohwnzm0 Cervical cancer
+qr1hi-q1cn2-vqp4243janpjbyj Breast cancer
+qr1hi-q1cn2-v6usijujcpwqrn1 Non-melanoma skin cancer
+...
+</code></pre>
+</notextile>
+
+In this tutorial wil will use "Non-melanoma skin cancer" trait with uuid @qr1hi-q1cn2-v6usijujcpwqrn1@.
+
+notextile. <pre><code>&gt;&gt;&gt; <span class="userinput">non_melanoma_cancer = 'qr1hi-q1cn2-v6usijujcpwqrn1'</code></pre>
+
+h2. Finding humans with the selected trait
+
+We query the "links" resource to find humans that report the selected trait.  Links are directional connections between Arvados data items, for example, from a human to their reported traits.
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">trait_filter = [
+    ['link_class', '=', 'human_trait'],
+    ['tail_uuid', 'is_a', 'arvados#human'],
+    ['head_uuid', '=', non_melanoma_cancer],
+  ]
+</code></pre>
+</notextile>
+
+* @['link_class', '=', 'human_trait']@ filters on links that connect phenotype traits to individuals in the database.
+* @['tail_uuid', 'is_a', 'arvados#human']@ filters that the "tail" must be a "human" database object.
+* @['head_uuid', '=', non_melanoma_cancer]@ filters that the "head" of the link must connect to the "trait" database object non_melanoma_cancer .
+
+The query will return links that match all three conditions.
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">trait_links = arvados.api().links().list(limit=1000, filters=trait_filter).execute()</span>
+</code></pre>
+</notextile>
+
+* @arvados.api()@ gets an object that provides access to the Arvados API server
+* @.links()@ gets an object that provides access to the "links" resource on the Arvados API server
+* @.list(limit=1000, filters=trait_filter)@ constructs a query to elements of the "links" resource that match the criteria discussed above, with a limit of 1000 entries returned
+* @.execute()@ executes the query and returns the result, which we assign to "trait_links"
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">human_uuids = map(lambda l: l['tail_uuid'], trait_links['items'])</span>
+&gt;&gt;&gt; <span class="userinput">human_uuids</span>
+[u'1h9kt-7a9it-c0uqa4kcdh29wdf', u'1h9kt-7a9it-x4tru6mn40hc6ah',
+u'1h9kt-7a9it-yqb8m5s9cpy88i8', u'1h9kt-7a9it-46sm75w200ngwny',
+u'1h9kt-7a9it-gx85a4tdkpzsg3w', u'1h9kt-7a9it-8cvlaa8909lgeo9',
+u'1h9kt-7a9it-as37qum2pq8vizb', u'1h9kt-7a9it-14fph66z2baqxb9',
+u'1h9kt-7a9it-e9zc7i4crmw3v69', u'1h9kt-7a9it-np7f35hlijlxdmt',
+u'1h9kt-7a9it-j9hqyjwbvo9cojn', u'1h9kt-7a9it-lqxdtm1gynmsv13',
+u'1h9kt-7a9it-zkhhxjfg2o22ywq', u'1h9kt-7a9it-nsjoxqd33lzldw9',
+u'1h9kt-7a9it-ytect4smzcgd4kg', u'1h9kt-7a9it-y6tl353b3jc4tos',
+u'1h9kt-7a9it-98f8qave4f8vbs5', u'1h9kt-7a9it-gd72sh15q0p4wq3',
+u'1h9kt-7a9it-zlx25dscak94q9h', u'1h9kt-7a9it-8gronw4rbgmim01',
+u'1h9kt-7a9it-wclfkjcb23tr5es', u'1h9kt-7a9it-rvp2qe7szfz4dy6',
+u'1h9kt-7a9it-50iffhmpzsktwjm', u'1h9kt-7a9it-ul412id5y31a5o8',
+u'1h9kt-7a9it-732kwkfzylmt4ik', u'1h9kt-7a9it-v9zqxegpblsbtai',
+u'1h9kt-7a9it-kmaraqduit1v5wd', u'1h9kt-7a9it-t1nwtlo1hru5vvq',
+u'1h9kt-7a9it-q3w6j9od4ibpoyl', u'1h9kt-7a9it-qz8vzkuuz97ezwv',
+u'1h9kt-7a9it-t1v8sjz6dm9jmjf', u'1h9kt-7a9it-qe8wrbyvuqs5jew']
+</code></pre>
+</notextile>
+
+* @lambda l: l['tail_uuid']@ is an inline function that returns the 'tail_uuid' attribute of 'l'
+* @trait_links['items']@ is the input set from the query
+* @map@ converts each item in a sequence into a different item using the embedded function, in this case to produce a sequence of uuids which refer to humans that have the specified trait.
+
+h2. Find Personal Genome Project identifiers from Arvados UUIDs
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">human_filters = [
+    ["link_class", "=", "identifier"],
+    ["head_uuid", "in", human_uuids]
+  ]</span>
+&gt;&gt;&gt; <span class="userinput">pgpid_links = arvados.api('v1').links().list(limit=1000, filters=human_filters).execute()</span>
+&gt;&gt;&gt; <span class="userinput">map(lambda l: l['name'], pgpid_links['items'])</span>
+[u'hu01024B', u'hu11603C', u'hu15402B', u'hu174334', u'hu1BD549', u'hu237A50',
+ u'hu34A921', u'hu397733', u'hu414115', u'hu43860C', u'hu474789', u'hu553620',
+ u'hu56B3B6', u'hu5917F3', u'hu599905', u'hu5E55F5', u'hu602487', u'hu633787',
+ u'hu68F245', u'hu6C3F34', u'hu7260DD', u'hu7A2F1D', u'hu94040B', u'hu9E356F',
+ u'huAB8707', u'huB1FD55', u'huB4883B', u'huD09050', u'huD09534', u'huD3A569',
+ u'huDF04CC', u'huE2E371']
+</code></pre>
+</notextile>
+
+These PGP IDs let us find public profiles, for example:
+
+* "https://my.pgp-hms.org/profile/huE2E371":https://my.pgp-hms.org/profile/huE2E371
+* "https://my.pgp-hms.org/profile/huDF04CC":https://my.pgp-hms.org/profile/huDF04CC
+* ...
+
+h2. Find genomic data from specific humans
+
+Now we want to find collections in Keep that were provided by these humans.  We search the "links" resource for "provenance" links that point to entries in the list of humans with the non-melanoma skin cancer trait:
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">provenance_links = arvados.api().links().list(limit=1000, filters=[
+    ["link_class", "=", "provenance"],
+    ["name", "=", "provided"],
+    ["tail_uuid", "in", human_uuids]
+  ]).execute()
+collection_uuids = map(lambda l: l['head_uuid'], provenance_links['items'])
+
+# build map of human uuid -> PGP ID
+pgpid = {}
+for pgpid_link in pgpid_links['items']:
+  pgpid[pgpid_link['head_uuid']] = pgpid_link['name']
+
+# build map of collection uuid -> PGP ID
+for p_link in provenance_links['items']:
+  pgpid[p_link['head_uuid']] = pgpid[p_link['tail_uuid']]
+
+# get details (e.g., list of files) of each collection
+collections = arvados.api('v1').collections().list(filters=[
+    ["uuid", "in", collection_uuids]
+  ]).execute()
+
+# print PGP public profile links with file locators
+for c in collections['items']:
+  for f in c['files']:
+    print "https://my.pgp-hms.org/profile/%s %s %s%s" % (pgpid[c['uuid']], c['uuid'], ('' if f[0] == '.' else f[0]+'/'), f[1])
+</span>
+https://my.pgp-hms.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d var-GS000010320-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873 var-GS000010427-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7 var-GS000014566-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df var-GS000015272-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f var-GS000016374-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af var-GS000016015-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu43860C a58dca7609fa84c8c38a7e926a97b2fc+302 var-GS00253-DNA_A01_200_37-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huB1FD55 ea30eb9e46eedf7f05ed6e348c2baf5d+291 var-GS000010320-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huDF04CC 4ab0df8f22f595d1747a22c476c05873+242 var-GS000010427-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu7A2F1D 756d0ada29b376140f64e7abfe6aa0e7+242 var-GS000014566-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu553620 7ed4e425bb1c7cc18387cbd9388181df+242 var-GS000015272-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/huD09534 542112e210daff30dd3cfea4801a9f2f+242 var-GS000016374-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 33a9f3842b01ea3fdf27cc582f5ea2af+242 var-GS000016015-ASM.tsv.bz2
+https://my.pgp-hms.org/profile/hu599905 d6e2e57cd60ba5979006d0b03e45e726+81 Witch_results.zip
+https://my.pgp-hms.org/profile/hu553620 ea4f2d325592a1272f989d141a917fdd+85 Devenwood_results.zip
+https://my.pgp-hms.org/profile/hu7A2F1D 4580f6620bb15b25b18373766e14e4a7+85 Innkeeper_results.zip
+https://my.pgp-hms.org/profile/huD09534 fee37be9440b912eb90f5e779f272416+82 Hallet_results.zip
+</code></pre>
+</notextile>
+
+h3. Search for a variant
+
+Now we will use crunch to issue a 'grep' job to look for variant rs1126809 in each of the "var-" files (these contain variant calls from WGS data).
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">job = {}
+for c in collections['items']:
+  if [] != filter(lambda f: re.search('^var-.*\.tsv\.bz2', f[1]), c['files']):
+    job[c['uuid']] = arvados.api('v1').jobs().create(body={
+      'script': 'grep',
+      'script_parameters': {'input': c['uuid'], 'pattern': "rs1126809\\b"},
+      'script_version': 'e7aeb42'
+    }).execute()
+    print "%s %s" % (pgpid[c['uuid']], job[c['uuid']]['uuid'])
+</span>
+hu43860C qr1hi-8i9sb-wbf3uthbhkcy8ji
+huB1FD55 qr1hi-8i9sb-scklkiy8dc27dab
+huDF04CC qr1hi-8i9sb-pg0w4rfrwfd9srg
+hu7A2F1D qr1hi-8i9sb-n7u0u0rj8b47168
+hu553620 qr1hi-8i9sb-k7gst7vyhg20pt1
+huD09534 qr1hi-8i9sb-4w65pm48123fte5
+hu599905 qr1hi-8i9sb-wmwa5b5r3eghnev
+hu43860C qr1hi-8i9sb-j1mngmakdh8iv9o
+huB1FD55 qr1hi-8i9sb-4j6ehiatcolaoxb
+huDF04CC qr1hi-8i9sb-n6lcmcr3lowqr5u
+hu7A2F1D qr1hi-8i9sb-0hwsdtojfcxjo40
+hu553620 qr1hi-8i9sb-cvvqzqea7jhwb0i
+huD09534 qr1hi-8i9sb-d0y0qtzuwzbrjj0
+hu599905 qr1hi-8i9sb-i9ec9g8d7rt70xg
+</code></pre>
+</notextile>
+
+
+Monitor job progress by refreshing the Jobs page in Workbench, or by using the API:
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">map(lambda j: arvados.api('v1').jobs().get(uuid=j['uuid']).execute()['success'], job.values())
+[None, True, None, None, None, None, None, None, None, None, None, None, None, None]
+</code></pre>
+</notextile>
+
+Unfinished jobs will appear as None, failed jobs as False, and completed jobs as True.
+
+After the jobs have completed, check output file sizes.
+
+<notextile>
+<pre><code>&gt;&gt;&gt; <span class="userinput">for collection_uuid in job:
+  job_uuid = job[collection_uuid]['uuid']
+  job_output = arvados.api('v1').jobs().get(uuid=job_uuid).execute()['output']
+  output_files = arvados.api('v1').collections().get(uuid=job_output).execute()['files']
+  # Test the output size.  If greater than zero, that means 'grep' found the variant 
+  if output_files[0][2] > 0:
+    print("%s has variant rs1126809" % (pgpid[collection_uuid]))
+  else:
+    print("%s does not have variant rs1126809" % (pgpid[collection_uuid]))
+</span>
+hu553620 does not have variant rs1126809
+hu43860C does not have variant rs1126809
+hu599905 has variant rs1126809
+huD09534 has variant rs1126809
+hu553620 does not have variant rs1126809
+huB1FD55 does not have variant rs1126809
+huDF04CC has variant rs1126809
+hu7A2F1D has variant rs1126809
+hu7A2F1D has variant rs1126809
+hu599905 has variant rs1126809
+huDF04CC has variant rs1126809
+huB1FD55 does not have variant rs1126809
+huD09534 has variant rs1126809
+hu43860C does not have variant rs1126809
+</code></pre>
+</notextile>
+
+Thus, of the 14 WGS results available for PGP participants reporting non-melanoma skin cancer, 8 include the rs1126809 variant.
diff --git a/doc/user/tutorials/intro-crunch.html.textile.liquid b/doc/user/tutorials/intro-crunch.html.textile.liquid
new file mode 100644 (file)
index 0000000..a678083
--- /dev/null
@@ -0,0 +1,24 @@
+---
+layout: default
+navsection: userguide
+title: Introduction to Crunch
+...
+
+The Arvados "Crunch" framework is designed to support processing very large data batches (gigabytes to terabytes) efficiently, and provides the following benefits:
+* Increase concurrency by running tasks asynchronously, using many CPUs and network interfaces at once (especially beneficial for CPU-bound and I/O-bound tasks respectively).
+* Track inputs, outputs, and settings so you can verify that the inputs, settings, and sequence of programs you used to arrive at an output is really what you think it was.
+* Ensure that your programs and workflows are repeatable with different versions of your code, OS updates, etc.
+* Interrupt and resume long-running jobs consisting of many short tasks.
+* Maintain timing statistics automatically, so they're there when you want them.
+
+h2. Prerequisites
+
+To get the most value out of this section, you should be comfortable with the following:
+
+# Using a secure shell client such as SSH or PuTTY to log on to a remote server
+# Using the Unix command line shell, Bash
+# Viewing and editing files using a unix text editor such as vi, Emacs, or nano
+# Programming in Python
+# Revision control using Git
+
+We also recommend you read the "Arvados Platform Overview":https://arvados.org/projects/arvados/wiki#Platform-Overview for an introduction and background information about Arvados.
diff --git a/doc/user/tutorials/running-external-program.html.textile.liquid b/doc/user/tutorials/running-external-program.html.textile.liquid
new file mode 100644 (file)
index 0000000..18f5f7d
--- /dev/null
@@ -0,0 +1,52 @@
+---
+layout: default
+navsection: userguide
+title: "Writing a pipeline template"
+...
+
+This tutorial demonstrates how to construct a two stage pipeline template that uses the "bwa mem":http://bio-bwa.sourceforge.net/ tool to produce a "Sequence Alignment/Map (SAM)":https://samtools.github.io/ file, then uses the "Picard SortSam tool":http://picard.sourceforge.net/command-line-overview.shtml#SortSam to produce a BAM (Binary Alignment/Map) file.
+
+{% include 'tutorial_expectations' %}
+
+Use the following command to create an empty template using @arv create pipeline_template@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv create pipeline_template</span></code></pre>
+</notextile>
+
+This will open the template record in an interactive text editor (as specified by $EDITOR or $VISUAL, otherwise defaults to @nano@).  Now, update the contents of the editor with the following content:
+
+<notextile>{% code 'tutorial_bwa_sortsam_pipeline' as javascript %}</notextile>
+
+* @"name"@ is a human-readable name for the pipeline.
+* @"components"@ is a set of scripts or commands that make up the pipeline.  Each component is given an identifier (@"bwa-mem"@ and @"SortSam"@) in this example).
+** Each entry in components @"components"@ is an Arvados job submission.  For more information about individual jobs, see the "job object reference":{{site.baseurl}}/api/schema/Job.html and "job create method.":{{site.baseurl}}/api/methods/jobs.html#create
+* @"repository"@, @"script_version"@, and @"script"@ indicate that we intend to use the external @"run-command"@ tool wrapper that is part of the Arvados.  These parameters are described in more detail in "Writing a script":tutorial-firstscript.html
+* @"runtime_constraints"@ describes runtime resource requirements for the component.
+** @"docker_image"@ specifies the "Docker":https://www.docker.com/ runtime environment in which to run the job.  The Docker image @"arvados/jobs-java-bwa-samtools"@ supplied here has the Arvados SDK, Java runtime environment, bwa, and samtools installed.
+* @"script_parameters"@ describes the component parameters.
+** @"command"@ is the actual command line to invoke the @bwa@ and then @SortSam@.  The notation @$()@ denotes macro substitution commands evaluated by the run-command tool wrapper.
+** @"stdout"@ indicates that the output of this command should be captured to a file.
+** @$(node.cores)@ evaluates to the number of cores available on the compute node at time the command is run.
+** @$(tmpdir)@ evaluates to the local path for temporary directory the command should use for scratch data.
+** @$(reference_collection)@ evaluates to the script_parameter @"reference_collection"@
+** @$(dir $(...))@ constructs a local path to a directory representing the supplied Arvados collection.
+** @$(file $(...))@ constructs a local path to a given file within the supplied Arvados collection.
+** @$(glob $(...))@ searches the specified path based on a file glob pattern and evalutes to the first result.
+** @$(basename $(...))@ evaluates to the supplied path with leading path portion and trailing filename extensions stripped
+** @"output_of"@ indicates that the @output@ of the @bwa-mem@ component should be used as the @"input"@ of @SortSam@.  Arvados uses these dependencies between components to automatically determine the correct order to run them.
+
+When using @run-command@, the tool should write its output to the current working directory.  The output will be automatically uploaded to Keep when the job completes.
+
+See the "run-command reference":{{site.baseurl}}/user/topics/run-command.html for more information about using @run-command@.
+
+h2. Running your pipeline
+
+Your new pipeline template should appear at the top of the Workbench "pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page.  You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+
+Test data is available in the "Arvados Tutorial":https://{{ site.arvados_workbench_host }}/projects/qr1hi-j7d0g-u7zg1qdaowykd8d project:
+
+* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial chromosome 19 reference (2463fa9efeb75e099685528b3b9071e0+438)":https://{{ site.arvados_workbench_host }}/collections/2463fa9efeb75e099685528b3b9071e0+438 for the "reference_collection" parameter
+* Choose <i class="fa fa-fw fa-archive"></i> "Tutorial sample exome (3229739b505d2b878b62aed09895a55a+142)":https://{{ site.arvados_workbench_host }}/collections/3229739b505d2b878b62aed09895a55a+142 for the "sample" parameter
+
+For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
diff --git a/doc/user/tutorials/tutorial-firstscript.html.textile.liquid b/doc/user/tutorials/tutorial-firstscript.html.textile.liquid
new file mode 100644 (file)
index 0000000..6fe88fe
--- /dev/null
@@ -0,0 +1,104 @@
+---
+layout: default
+navsection: userguide
+navmenu: Tutorials
+title: "Writing a Crunch script"
+...
+
+This tutorial demonstrates how to write a script using Arvados Python SDK.  The Arvados SDK supports access to advanced features not available using the @run-command@ wrapper, such as scheduling concurrent tasks across nodes.
+
+{% include 'tutorial_expectations' %}
+
+This tutorial uses @$USER@ to denote your username.  Replace @$USER@ with your user name in all the following examples.
+
+Start by creating a directory called @$USER@ .  Next, create a subdirectory called @crunch_scripts@ and change to that directory:
+
+<notextile>
+<pre><code>~$ <span class="userinput">mkdir -p tutorial/crunch_scripts</span>
+~$ <span class="userinput">cd tutorial/crunch_scripts</span></code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/tutorial/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection:
+
+<notextile> {% code 'tutorial_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/tutorial/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
+
+Next, create a submission job record.  This describes a specific invocation of your script:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">cat &gt;~/the_job &lt;&lt;EOF
+{
+ "repository":"",
+ "script":"hash.py",
+ "script_version":"$HOME/tutorial",
+ "script_parameters":{
+   "input":"c1bad4b39ca5a924e481008009d94e32+210"
+ }
+}
+EOF</span>
+</code></pre>
+</notextile>
+
+You can now run your script on your local workstation or VM using @arv-crunch-job@:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts</span>$ <span class="userinput">arv-crunch-job --job "$(cat ~/the_job)"</span>
+2014-08-06_15:16:22 qr1hi-8i9sb-qyrat80ef927lam 14473  check slurm allocation
+2014-08-06_15:16:22 qr1hi-8i9sb-qyrat80ef927lam 14473  node localhost - 1 slots
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  start
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  script hash.py
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  script_version $HOME/tutorial
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  script_parameters {"input":"c1bad4b39ca5a924e481008009d94e32+210"}
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  runtime_constraints {"max_tasks_per_node":0}
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  start level 0
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 0 done, 0 running, 1 todo
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 0 job_task qr1hi-ot0gb-lptn85mwkrn9pqo
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473 0 child 14478 started on localhost.1
+2014-08-06_15:16:23 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 0 done, 1 running, 0 todo
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 stderr crunchstat: Running [stdbuf --output=0 --error=0 /home/$USER/tutorial/crunch_scripts/hash.py]
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 child 14478 on localhost.1 exit 0 signal 0 success=true
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 success in 1 seconds
+2014-08-06_15:16:24 qr1hi-8i9sb-qyrat80ef927lam 14473 0 output
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  wait for last 0 children to finish
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 1 done, 0 running, 1 todo
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  start level 1
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 1 done, 0 running, 1 todo
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 1 job_task qr1hi-ot0gb-e3obm0lv6k6p56a
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473 1 child 14504 started on localhost.1
+2014-08-06_15:16:25 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 1 done, 1 running, 0 todo
+2014-08-06_15:16:26 qr1hi-8i9sb-qyrat80ef927lam 14473 1 stderr crunchstat: Running [stdbuf --output=0 --error=0 /home/$USER/tutorial/crunch_scripts/hash.py]
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 child 14504 on localhost.1 exit 0 signal 0 success=true
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 success in 10 seconds
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473 1 output 8c20281b9840f624a486e4f1a78a1da8+105+A234be74ceb5ea31db6e11b6be26f3eb76d288ad0@54987018
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  wait for last 0 children to finish
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  status: 2 done, 0 running, 0 todo
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  release job allocation
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  Freeze not implemented
+2014-08-06_15:16:35 qr1hi-8i9sb-qyrat80ef927lam 14473  collate
+2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473  output uuid qr1hi-4zz18-n91qrqfp3zivexo
+2014-08-06_15:16:36 qr1hi-8i9sb-qyrat80ef927lam 14473  output hash c1b44b6dc41ef334cf1136033ca950e6+54
+2014-08-06_15:16:37 qr1hi-8i9sb-qyrat80ef927lam 14473  finish
+2014-08-06_15:16:38 qr1hi-8i9sb-qyrat80ef927lam 14473  log manifest is 7fe8cf1d45d438a3ca3ac4a184b7aff4+83
+</code></pre>
+</notextile>
+
+Although the job runs locally, the output of the job has been saved to Keep, the Arvados file store.  The "output uuid" line (fourth from the bottom) provides the UUID of the Arvados collection where the script's output has been saved.  Copy the output identifier and use @arv-ls@ to list the contents of your output collection, and @arv-get@ to download it to the current directory:
+
+<notextile>
+<pre><code>~/tutorial/crunch_scripts$ <span class="userinput">arv-ls qr1hi-4zz18-n91qrqfp3zivexo</span>
+./md5sum.txt
+~/tutorial/crunch_scripts$ <span class="userinput">arv-get qr1hi-4zz18-n91qrqfp3zivexo/ .</span>
+0 MiB / 0 MiB 100.0%
+~/tutorial/crunch_scripts$ <span class="userinput">cat md5sum.txt</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f c1bad4b39ca5a924e481008009d94e32+210/var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+Running locally is convenient for development and debugging, as it permits a fast iterative development cycle.  Your job run is also recorded by Arvados, and will appear in the *Recent jobs and pipelines* panel on the "Workbench Dashboard":https://{{site.arvados_workbench_host}}.  This provides limited provenance, by recording the input parameters, the execution log, and the output.  However, running locally does not allow you to scale out to multiple nodes, and does not store the complete system snapshot required to achieve reproducibility; to do that you need to "submit a job to the Arvados cluster":{{site.baseurl}}/user/tutorials/tutorial-submit-job.html.
diff --git a/doc/user/tutorials/tutorial-keep-get.html.textile.liquid b/doc/user/tutorials/tutorial-keep-get.html.textile.liquid
new file mode 100644 (file)
index 0000000..8f991d9
--- /dev/null
@@ -0,0 +1,47 @@
+---
+layout: default
+navsection: userguide
+title: "Downloading data"
+...
+
+This tutorial describes how to list and download Arvados data collections using the command line tools @arv-ls@ and @arv-get@.  It is also possible to download files from a collection from the Workbench page for the collection, covered in "running a pipeline using Workbench":{{site.baseurl}}/user/tutorials/tutorial-pipeline-workbench.html
+
+{% include 'tutorial_expectations' %}
+
+You can view the contents of a collection using @arv-ls@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-ls c1bad4b39ca5a924e481008009d94e32+210</span>
+var-GS000016015-ASM.tsv.bz2
+</code></pre>
+
+<pre><code>~$ <span class="userinput">arv-ls 887cd41e9c613463eab2f0d885c6dd96+83</span>
+alice.txt
+bob.txt
+carol.txt
+</code></pre>
+</notextile>
+
+Use @-s@ to print file sizes rounded up to the nearest kilobyte:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-ls -s c1bad4b39ca5a924e481008009d94e32+210</span>
+221887 var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+Use @arv-get@ to download the contents of a collection and place it in the directory specified in the second argument (in this example, @.@ for the current directory):
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-get c1bad4b39ca5a924e481008009d94e32+210/ .</span>
+~$ <span class="userinput">ls var-GS000016015-ASM.tsv.bz2</span>
+var-GS000016015-ASM.tsv.bz2
+</code></pre>
+</notextile>
+
+You can also download individual files:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv-get 887cd41e9c613463eab2f0d885c6dd96+83/alice.txt .</span>
+</code></pre>
+</notextile>
diff --git a/doc/user/tutorials/tutorial-keep-mount.html.textile.liquid b/doc/user/tutorials/tutorial-keep-mount.html.textile.liquid
new file mode 100644 (file)
index 0000000..a41fede
--- /dev/null
@@ -0,0 +1,36 @@
+---
+layout: default
+navsection: userguide
+title: "Mounting Keep as a filesystem"
+...
+
+This tutoral describes how to access Arvados collections using traditional filesystem tools by mounting Keep as a read-only file system using @arv-mount@.
+
+{% include 'tutorial_expectations' %}
+
+h2. Arv-mount
+
+@arv-mount@ provides several features:
+
+* You can browse, open and read Keep entries as if they are regular files.
+* It is easy for existing tools to access files in Keep.
+* Data is downloaded on demand.  It is not necessary to download an entire file or collection to start processing.
+
+The default mode permits browsing any collection in Arvados as a subdirectory under the mount directory.  To avoid having to fetch a potentially large list of all collections, collection directories only come into existence when explicitly accessed by their Keep locator. For instance, a collection may be found by its content hash in the @keep/by_id@ directory.
+
+<notextile>
+<pre><code>~$ <span class="userinput">mkdir -p keep</span>
+~$ <span class="userinput">arv-mount keep</span>
+~$ <span class="userinput">cd keep/by_id/c1bad4b39ca5a924e481008009d94e32+210</span>
+~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class="userinput">ls</span>
+var-GS000016015-ASM.tsv.bz2
+~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class="userinput">md5sum var-GS000016015-ASM.tsv.bz2</span>
+44b8ae3fde7a8a88d2f7ebd237625b4f  var-GS000016015-ASM.tsv.bz2
+~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class="userinput">cd ../..</span>
+~$ <span class="userinput">fusermount -u keep</span>
+</code></pre>
+</notextile>
+
+The last line unmounts Keep.  Subdirectories will no longer be accessible.
+
+Within each directory on Keep, there is a @.arvados#collection@ file that does not show up with @ls@. Its contents include, for instance, the @portable_data_hash@, which is the same as the Keep locator.
diff --git a/doc/user/tutorials/tutorial-keep.html.textile.liquid b/doc/user/tutorials/tutorial-keep.html.textile.liquid
new file mode 100644 (file)
index 0000000..ada6d1f
--- /dev/null
@@ -0,0 +1,56 @@
+---
+layout: default
+navsection: userguide
+title: "Uploading data"
+...
+
+This tutorial describes how to to upload new Arvados data collections using the command line tool @arv keep put@.
+
+notextile. <div class="spaced-out">
+
+{% include 'tutorial_expectations' %}
+
+h3. Upload
+
+To upload a file to Keep using @arv keep put@:
+<notextile>
+<pre><code>~$ <span class="userinput">arv keep put var-GS000016015-ASM.tsv.bz2</span>
+216M / 216M 100.0%
+Collection saved as ...
+qr1hi-4zz18-xxxxxxxxxxxxxxx
+</code></pre>
+</notextile>
+
+The output value @qr1hi-4zz18-xxxxxxxxxxxxxxx@ is the uuid of the Arvados collection created.
+
+The file used in this example is a freely available TSV file containing variant annotations from "Personal Genome Project (PGP)":http://www.pgp-hms.org participant "hu599905.":https://my.pgp-hms.org/profile/hu599905), downloadable "here":https://warehouse.pgp-hms.org/warehouse/f815ec01d5d2f11cb12874ab2ed50daa+234+K@ant/var-GS000016015-ASM.tsv.bz2.
+
+<notextile><a name="dir"></a></notextile>It is also possible to upload an entire directory with @arv keep put@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">mkdir tmp</span>
+~$ <span class="userinput">echo "hello alice" > tmp/alice.txt</span>
+~$ <span class="userinput">echo "hello bob" > tmp/bob.txt</span>
+~$ <span class="userinput">echo "hello carol" > tmp/carol.txt</span>
+~$ <span class="userinput">arv keep put tmp</span>
+0M / 0M 100.0%
+Collection saved as ...
+qr1hi-4zz18-yyyyyyyyyyyyyyy
+</code></pre>
+</notextile>
+
+In both examples, the @arv keep put@ command created a collection. The first collection contains the single uploaded file. The second collection contains the entire uploaded directory.
+
+@arv keep put@ accepts quite a few optional command line arguments, which are described "on the arv subcommands":{{site.baseurl}}/sdk/cli/subcommands.html#arv-keep-put page.
+
+h3. Locate your collection in Workbench
+
+Visit the Workbench *Dashboard*.  Click on *Projects*<span class="caret"></span> dropdown menu in the top navigation menu, select your *Home* project.  Your newly uploaded collection should appear near the top of the *Data collections* tab.  The collection locator printed by @arv keep put@ will appear under the *name* column.
+
+To move the collection to a different project, check the box at the left of the collection row.  Pull down the *Selection...*<span class="caret"></span> menu near the top of the page tab, and select *Move selected*. This will open a dialog box where you can select a destination project for the collection.  Click a project, then finally the <span class="btn btn-sm btn-primary">Move</span> button.
+
+!{{ site.baseurl }}/images/workbench-move-selected.png!
+
+Click on the *<i class="fa fa-fw fa-archive"></i> Show* button next to the collection's listing on a project page to go to the Workbench page for your collection.  On this page, you can see the collection's contents, download individual files, and set sharing options.
+
+notextile. </div>
diff --git a/doc/user/tutorials/tutorial-pipeline-workbench.html.textile.liquid b/doc/user/tutorials/tutorial-pipeline-workbench.html.textile.liquid
new file mode 100644 (file)
index 0000000..8dad6ab
--- /dev/null
@@ -0,0 +1,29 @@
+---
+layout: default
+navsection: userguide
+title: "Running a pipeline using Workbench"
+...
+
+A "pipeline" (sometimes called a "workflow" in other systems) is a sequence of steps that apply various programs or tools to transform input data to output data.  Pipelines are the principal means of performing computation with Arvados.  This tutorial demonstrates how to run a single-stage pipeline to take a small data set of paired-end reads from a sample "exome":https://en.wikipedia.org/wiki/Exome in "FASTQ":https://en.wikipedia.org/wiki/FASTQ_format format and align them to "Chromosome 19":https://en.wikipedia.org/wiki/Chromosome_19_%28human%29 using the "bwa mem":http://bio-bwa.sourceforge.net/ tool, producing a "Sequence Alignment/Map (SAM)":https://samtools.github.io/ file.  This tutorial will introduce the following Arvados features:
+
+<div class="inside-list">
+* How to create a new pipeline from an existing template.
+* How to browse and select input data for the pipeline and submit the pipeline to run on the Arvados cluster.
+* How to access your pipeline results.
+</div>
+
+notextile. <div class="spaced-out">
+
+# Start from the *Workbench Dashboard*.  You can access the Dashboard by clicking on *<i class="fa fa-lg fa-fw fa-dashboard"></i> Dashboard* in the upper left corner of any Workbench page.
+# Click on the <span class="btn btn-sm btn-primary"><i class="fa fa-fw fa-gear"></i> Run a pipeline...</span> button.  This will open a dialog box titled *Choose a pipeline to run*.
+# Click to open the *All projects <span class="caret"></span>* menu.  Under the *Projects shared with me* header, select *<i class="fa fa-fw fa-share-alt"></i> Arvados Tutorial*.
+# Select *<i class="fa fa-fw fa-gear"></i> Tutorial align using bwa mem* and click the <span class="btn btn-sm btn-primary" >Next: choose inputs <i class="fa fa-fw fa-arrow-circle-right"></i></span> button.  This will create a new pipeline in your *Home* project and will open it. You can now supply the inputs for the pipeline.
+# The first input parameter to the pipeline is *Reference genoma (fasta)*.  Click the <span class="btn btn-sm btn-primary">Choose</span> button beneath that header.  This will open a dialog box titled *Choose a dataset for Reference genome (fasta)*.
+# Once again, open the *All projects <span class="caret"></span>* menu and select *<i class="fa fa-fw fa-share-alt"></i> Arvados Tutorial*.  Select *<i class="fa fa-fw fa-archive"></i> Tutorial chromosome 19 reference* and click the <span class="btn btn-sm btn-primary" >OK</span> button.
+# Repeat the previous two steps to set the *Input genome (fastq)* parameter to *<i class="fa fa-fw fa-archive"></i> Tutorial sample exome*.
+# Click on the <span class="btn btn-sm btn-primary" >Run <i class="fa fa-fw fa-play"></i></span> button.  The page updates to show you that the pipeline has been submitted to run on the Arvados cluster.
+# After the pipeline starts running, you can track the progress by watching log messages from jobs.  This page refreshes automatically.  You will see a <span class="label label-success">complete</span> label under the *job* column when the pipeline completes successfully.
+# Click on the *Output* link to see the results of the job.  This will load a new page listing the output files from this pipeline.  You'll see the output SAM file from the alignment tool under the *Files* tab.
+# Click on the <span class="btn btn-sm btn-info"><i class="fa fa-download"></i></span> download button to the right of the SAM file to download your results.
+
+notextile. </div>
diff --git a/doc/user/tutorials/tutorial-submit-job.html.textile.liquid b/doc/user/tutorials/tutorial-submit-job.html.textile.liquid
new file mode 100644 (file)
index 0000000..fc77e5c
--- /dev/null
@@ -0,0 +1,112 @@
+---
+layout: default
+navsection: userguide
+navmenu: Tutorials
+title: "Running on an Arvados cluster"
+...
+
+This tutorial demonstrates how to create a pipeline to run your crunch script on an Arvados cluster.  Cluster jobs can scale out to multiple nodes, and use @git@ and @docker@ to store the complete system snapshot required to achieve reproducibilty.
+
+{% include 'tutorial_expectations' %}
+
+This tutorial uses @$USER@ to denote your username.  Replace @$USER@ with your user name in all the following examples.
+
+h2. Setting up Git
+
+All Crunch scripts are managed through the Git revision control system.  Before you start using Git, you should do some basic configuration (you only need to do this the first time):
+
+<notextile>
+<pre><code>~$ <span class="userinput">git config --global user.name "Your Name"</span>
+~$ <span class="userinput">git config --global user.email $USER@example.com</span></code></pre>
+</notextile>
+
+On the Arvados Workbench, navigate to "Code repositories":https://{{site.arvados_workbench_host}}/repositories.  You should see a repository with your user name listed in the *name* column.  Next to *name* is the column *push_url*.  Copy the *push_url* value associated with your repository.  This should look like <notextile><code>git@git.{{ site.arvados_api_host }}:$USER.git</code></notextile>.
+
+Next, on the Arvados virtual machine, clone your Git repository:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $HOME</span> # (or wherever you want to install)
+~$ <span class="userinput">git clone git@git.{{ site.arvados_api_host }}:$USER.git</span>
+Cloning into '$USER'...</code></pre>
+</notextile>
+
+This will create a Git repository in the directory called @$USER@ in your home directory. Say yes when prompted to continue with connection.
+Ignore any warning that you are cloning an empty repository.
+
+{% include 'notebox_begin' %}
+For more information about using Git, try
+
+notextile. <pre><code>$ <span class="userinput">man gittutorial</span></code></pre>
+
+or *"search Google for Git tutorials":http://google.com/#q=git+tutorial*.
+{% include 'notebox_end' %}
+
+h2. Creating a Crunch script
+
+Start by entering the @$USER@ directory created by @git clone@.  Next create a subdirectory called @crunch_scripts@ and change to that directory:
+
+<notextile>
+<pre><code>~$ <span class="userinput">cd $USER</span>
+~/$USER$ <span class="userinput">mkdir crunch_scripts</span>
+~/$USER$ <span class="userinput">cd crunch_scripts</span></code></pre>
+</notextile>
+
+Next, using @nano@ or your favorite Unix text editor, create a new file called @hash.py@ in the @crunch_scripts@ directory.
+
+notextile. <pre>~/$USER/crunch_scripts$ <code class="userinput">nano hash.py</code></pre>
+
+Add the following code to compute the MD5 hash of each file in a collection (if you already completed "Writing a Crunch script":tutorial-firstscript.html you can just copy the @hash.py@ file you created previously.)
+
+<notextile> {% code 'tutorial_hash_script_py' as python %} </notextile>
+
+Make the file executable:
+
+notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">chmod +x hash.py</span></code></pre>
+
+Next, add the file to the staging area.  This tells @git@ that the file should be included on the next commit.
+
+notextile. <pre><code>~/$USER/crunch_scripts$ <span class="userinput">git add hash.py</span></code></pre>
+
+Next, commit your changes.  All staged changes are recorded into the local git repository:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git commit -m"my first script"</span>
+[master (root-commit) 27fd88b] my first script
+ 1 file changed, 45 insertions(+)
+ create mode 100755 crunch_scripts/hash.py</code></pre>
+</notextile>
+
+Finally, upload your changes to the Arvados server:
+
+<notextile>
+<pre><code>~/$USER/crunch_scripts$ <span class="userinput">git push origin master</span>
+Counting objects: 4, done.
+Compressing objects: 100% (2/2), done.
+Writing objects: 100% (4/4), 682 bytes, done.
+Total 4 (delta 0), reused 0 (delta 0)
+To git@git.qr1hi.arvadosapi.com:$USER.git
+ * [new branch]      master -> master</code></pre>
+</notextile>
+
+h2. Create a pipeline template
+
+Next, create a new template using @arv create pipeline_template@:
+
+<notextile>
+<pre><code>~$ <span class="userinput">arv create pipeline_template</span></code></pre>
+</notextile>
+
+In the editor, enter the following template:
+
+<notextile> {% code 'tutorial_submit_job' as javascript %} </notextile>
+
+* @"repository"@ is the name of a git repository to search for the script version.  You can access a list of available git repositories on the Arvados Workbench under "Code repositories":https://{{site.arvados_workbench_host}}/repositories.
+* @"script_version"@ specifies the version of the script that you wish to run.  This can be in the form of an explicit Git revision hash, a tag, or a branch (in which case it will use the HEAD of the specified branch).  Arvados logs the script version that was used in the run, enabling you to go back and re-run any past job with the guarantee that the exact same code will be used as was used in the previous run.
+* @"script"@ specifies the filename of the script to run.  Crunch expects to find this in the @crunch_scripts/@ subdirectory of the Git repository.
+* @"runtime_constraints"@ describes the runtime environment required to run the job.  These are described in the "job record schema":{{site.baseurl}}/api/schema/Job.html
+
+h2. Running your pipeline
+
+Your new pipeline template should appear at the top of the Workbench "pipeline&nbsp;templates":https://{{ site.arvados_workbench_host }}/pipeline_templates page.  You can run your pipeline "using Workbench":tutorial-pipeline-workbench.html or the "command line.":{{site.baseurl}}/user/topics/running-pipeline-command-line.html
+
+For more information and examples for writing pipelines, see the "pipeline template reference":{{site.baseurl}}/api/schema/PipelineTemplate.html
diff --git a/doc/zenweb-liquid.rb b/doc/zenweb-liquid.rb
new file mode 100644 (file)
index 0000000..0be882a
--- /dev/null
@@ -0,0 +1,72 @@
+require 'zenweb'
+require 'liquid'
+
+module ZenwebLiquid
+  VERSION = '0.0.1'
+end
+
+module Zenweb
+
+  class Page
+
+    def render_liquid page, content
+      liquid self.body, content, page, binding
+    end
+    
+    ##
+    # Render a page's liquid and return the intermediate result
+    def liquid template, content, page, binding = TOPLEVEL_BINDING
+      Liquid::Template.file_system = Liquid::LocalFileSystem.new(File.join(File.dirname(Rake.application().rakefile), "_includes"))
+      unless defined? @liquid_template
+        @liquid_template = Liquid::Template.parse(template)
+      end
+      
+      vars = {}
+      vars["content"] = content
+
+      vars["site"] = site.config.h.clone
+      pages = {}
+      site.pages.each do |f, p|
+        pages[f] = p.config.h.clone
+        pages[f]["url"] = p.url
+      end
+      vars["site"]["pages"] = pages
+
+      vars["page"] = page.config.h.clone
+      vars["page"]["url"] = page.url
+      
+      @liquid_template.render(vars)
+    end
+  end
+
+  class LiquidCode < Liquid::Include
+    Syntax = /(#{Liquid::QuotedFragment}+)(\s+(?:as)\s+(#{Liquid::QuotedFragment}+))?/o
+
+    def initialize(tag_name, markup, tokens)
+      Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)
+
+      if markup =~ Syntax
+        @template_name = $1
+        @language = $3
+        @attributes    = {}
+      else
+        raise SyntaxError.new("Error in tag 'code' - Valid syntax: include '[code_file]' as '[language]'")
+      end
+    end
+    
+    def render(context)
+      require 'coderay'
+
+      partial = load_cached_partial(context)
+      html = ''
+
+      context.stack do
+        html = CodeRay.scan(partial.root.nodelist.join, @language).div
+      end
+
+      html
+    end
+
+    Liquid::Template.register_tag('code', LiquidCode)    
+  end
+end
diff --git a/doc/zenweb-textile.rb b/doc/zenweb-textile.rb
new file mode 100644 (file)
index 0000000..0a4bb6f
--- /dev/null
@@ -0,0 +1,17 @@
+require 'zenweb'
+
+module ZenwebTextile
+  VERSION = '0.0.1'
+end
+
+module Zenweb
+  class Page
+    
+    ##
+    # Render a page's textile and return the resulting html
+    def render_textile page, content
+      require 'RedCloth'
+      RedCloth.new(content ? content : self.body).to_html
+    end
+  end
+end
diff --git a/docker/.gitignore b/docker/.gitignore
new file mode 100644 (file)
index 0000000..ff626a3
--- /dev/null
@@ -0,0 +1,2 @@
+*-image
+build/
diff --git a/docker/README.md b/docker/README.md
new file mode 100644 (file)
index 0000000..f521b8c
--- /dev/null
@@ -0,0 +1,81 @@
+Deploying Arvados in Docker Containers
+======================================
+
+This file explains how to build and deploy Arvados servers in Docker
+containers, so that they can be run easily in different environments
+(a dedicated server, a developer's laptop, a virtual machine,
+etc).
+
+This is a work in progress; instructions will almost certainly be
+incomplete and possibly out of date.
+
+Prerequisites
+-------------
+
+* Docker
+
+  Docker is a Linux container management system based on LXC. It is a
+  very young system but is being developed rapidly.
+  [Installation packages](http://www.docker.io/gettingstarted/)
+  are available for several platforms.
+  
+  If a prebuilt docker package is not available for your platform, the
+  short instructions for installing it are:
+  
+  1. Create a `docker` group and add yourself to it.
+
+     <pre>
+     $ sudo addgroup docker
+     $ sudo adduser `whoami` docker
+     </pre>
+
+     Log out and back in.
+        
+  2. Add a `cgroup` filesystem and mount it:
+
+     <pre>
+     $ mkdir -p /cgroup
+     $ grep cgroup /etc/fstab
+     none   /cgroup    cgroup    defaults    0    0
+     $ sudo mount /cgroup
+        </pre>
+        
+  3. [Download and run a docker binary from docker.io.](http://docs.docker.io/en/latest/installation/binaries/)
+
+* Ruby (version 1.9.3 or greater)
+
+* sudo privileges to run `debootstrap`
+
+Building
+--------
+
+Type `./build.sh` to configure and build the following Docker images:
+
+   * arvados/api       - the Arvados API server
+   * arvados/doc       - Arvados documentation
+   * arvados/warehouse - Keep, the Arvados content-addressable filesystem
+   * arvados/workbench - the Arvados console
+   * arvados/sso       - the Arvados single-signon authentication server
+
+`build.sh` will generate reasonable defaults for all configuration
+settings.  If you want more control over the way Arvados is
+configured, first copy `config.yml.example` to `config.yml` and edit
+it with appropriate configuration settings, and then run `./build.sh`.
+
+Running
+-------
+
+The `arvdock` script in this directory is used to start, stop and
+restart Arvados servers on your machine.  The simplest and easiest way
+to use it is `./arvdock start` to start the full complement of Arvados
+servers, and `./arvdock stop` and `./arvdock restart` to stop and
+restart all servers, respectively.
+
+Developers who are working on individual servers can start, stop or
+restart just those containers, e.g.:
+
+* `./arvdock start --api --sso` to start just the API and SSO services.
+* `./arvdock stop --keep` to stop just the Keep services.
+* `./arvdock restart --workbench=8000` restarts just the Workbench service on port 8000.
+
+For a full set of arguments, use `./arvdock --help`.
diff --git a/docker/api/Dockerfile b/docker/api/Dockerfile
new file mode 100644 (file)
index 0000000..abd2114
--- /dev/null
@@ -0,0 +1,80 @@
+# Arvados API server Docker container.
+
+FROM arvados/passenger
+MAINTAINER Tim Pierce <twp@curoverse.com>
+
+# Install postgres and apache.
+RUN apt-get update -qq
+RUN apt-get install -qqy \
+    procps postgresql postgresql-server-dev-9.1 apache2 slurm-llnl munge \
+    supervisor sudo libwww-perl libio-socket-ssl-perl libcrypt-ssleay-perl \
+    libjson-perl cron openssh-server
+
+ADD munge.key /etc/munge/
+RUN chown munge:munge /etc/munge/munge.key && chmod 600 /etc/munge/munge.key
+ADD generated/slurm.conf /etc/slurm-llnl/
+
+RUN /usr/local/rvm/bin/rvm-exec default gem install arvados-cli arvados
+# /for crunch-dispatch
+
+RUN /bin/mkdir -p /usr/src/arvados/services
+ADD generated/api.tar.gz /usr/src/arvados/services/
+
+# Install generated config files
+ADD generated/database.yml /usr/src/arvados/services/api/config/database.yml
+ADD generated/omniauth.rb /usr/src/arvados/services/api/config/initializers/omniauth.rb
+RUN /bin/cp /usr/src/arvados/services/api/config/environments/production.rb.example /usr/src/arvados/services/api/config/environments/production.rb
+ADD generated/application.yml /usr/src/arvados/services/api/config/application.yml
+ADD generated/apache2_vhost /etc/apache2/sites-available/arvados
+
+# Configure Rails databases.
+ENV RAILS_ENV production
+ADD generated/config_databases.sh /tmp/config_databases.sh
+ADD generated/superuser_token /tmp/superuser_token
+RUN /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/services/api/Gemfile && \
+    sh /tmp/config_databases.sh && \
+    rm /tmp/config_databases.sh && \
+    /etc/init.d/postgresql start && \
+    cd /usr/src/arvados/services/api && \
+    /usr/local/rvm/bin/rvm-exec default bundle exec rake db:structure:load && \
+    /usr/local/rvm/bin/rvm-exec default bundle exec rake db:seed && \
+    /usr/local/rvm/bin/rvm-exec default bundle exec rake assets:precompile && \
+    /usr/local/rvm/bin/rvm-exec default ./script/create_superuser_token.rb $(cat /tmp/superuser_token) && \
+    chown www-data:www-data config.ru && \
+    chown www-data:www-data log -R && \
+    mkdir -p tmp && \
+    chown www-data:www-data tmp -R
+
+# Configure Apache and Passenger.
+RUN a2dissite default && \
+    a2ensite arvados && \
+    a2enmod rewrite && \
+    a2enmod ssl && \
+    /bin/mkdir /var/run/apache2
+
+# Install a token for root
+RUN mkdir -p /root/.config/arvados; echo "ARVADOS_API_HOST=api" >> /root/.config/arvados/settings.conf && echo "ARVADOS_API_HOST_INSECURE=yes" >> /root/.config/arvados/settings.conf && echo "ARVADOS_API_TOKEN=$(cat /tmp/superuser_token)" >> /root/.config/arvados/settings.conf && chmod 600 /root/.config/arvados/settings.conf
+
+# Set up directory for job commit repo
+RUN mkdir -p /var/lib/arvados
+# Add crunch user
+RUN addgroup --gid 4005 crunch && mkdir /home/crunch && useradd --uid 4005 --gid 4005 crunch && chown crunch:crunch /home/crunch
+
+# Create keep and compute node objects
+ADD keep_server_0.json /root/
+ADD keep_server_1.json /root/
+
+# Set up update-gitolite.rb
+RUN mkdir /usr/local/arvados/config -p
+ADD generated/arvados-clients.yml /usr/local/arvados/config/
+ADD update-gitolite.rb /usr/local/arvados/
+
+# Supervisor.
+ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
+ADD generated/setup.sh /usr/local/bin/setup.sh
+ADD generated/setup-gitolite.sh /usr/local/bin/setup-gitolite.sh
+ADD crunch-dispatch-run.sh /usr/local/bin/crunch-dispatch-run.sh
+ADD apache2_foreground.sh /etc/apache2/foreground.sh
+
+# Start the supervisor.
+CMD ["/usr/bin/supervisord", "-n"]
diff --git a/docker/api/apache2_foreground.sh b/docker/api/apache2_foreground.sh
new file mode 100755 (executable)
index 0000000..fc6028e
--- /dev/null
@@ -0,0 +1,7 @@
+#! /bin/bash
+
+read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
+trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
+
+source /etc/apache2/envvars
+/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/api/apache2_vhost.in b/docker/api/apache2_vhost.in
new file mode 100644 (file)
index 0000000..fdbb2f9
--- /dev/null
@@ -0,0 +1,52 @@
+# VirtualHost definition for the Arvados API server
+
+<VirtualHost *:80>
+  ServerName @@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@
+  ServerAdmin sysadmin@curoverse.com
+
+  RedirectPermanent / https://@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@/
+
+  LogLevel warn
+  ErrorLog  ${APACHE_LOG_DIR}/error.log
+  CustomLog ${APACHE_LOG_DIR}/access.log combined
+
+</VirtualHost>
+
+<VirtualHost *:443>
+  ServerName @@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@
+  ServerAdmin sysadmin@curoverse.com
+
+  RailsEnv production
+  RackBaseURI /
+  RailsAppSpawnerIdleTime 1200
+
+  # Enable streaming
+  PassengerBufferResponse off
+
+  # Index file and Document Root (where the public files are located)
+  DirectoryIndex index.html
+  DocumentRoot /usr/src/arvados/services/api/public
+
+  LogLevel warn
+  ErrorLog  ${APACHE_LOG_DIR}/ssl_error.log
+  CustomLog ${APACHE_LOG_DIR}/ssl_access.log combined
+
+  <Directory /usr/src/arvados/services/api/public>
+    Options Indexes FollowSymLinks MultiViews IncludesNoExec
+    AllowOverride None
+    Order allow,deny
+    allow from all
+  </Directory>
+
+  <IfModule mod_ssl.c>
+    SSLEngine on
+    # SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
+    # SSLCACertificateFile    /etc/ssl/certs/startcom.ca.pem
+    # SSLCertificateFile      /etc/ssl/certs/qr1hi.arvadosapi.com.crt.pem
+    # SSLCertificateKeyFile   /etc/ssl/private/qr1hi.arvadosapi.com.key.pem
+    SSLCertificateFile    /etc/ssl/certs/ssl-cert-snakeoil.pem
+    SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
+    SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
+  </IfModule>
+
+</VirtualHost>
diff --git a/docker/api/application.yml.in b/docker/api/application.yml.in
new file mode 100644 (file)
index 0000000..7f41985
--- /dev/null
@@ -0,0 +1,72 @@
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+development:
+  # The blob_signing_key is a string of alphanumeric characters used
+  # to sign permission hints for Keep locators. It must be identical
+  # to the permission key given to Keep.  If you run both apiserver
+  # and Keep in development, change this to a hardcoded string and
+  # make sure both systems use the same value.
+  blob_signing_key: ~
+
+production:
+  # At minimum, you need a nice long randomly generated secret_token here.
+  # Use a long string of alphanumeric characters (at least 36).
+  secret_token: @@API_SECRET@@
+
+  # blob_signing_key is required and must be identical to the
+  # permission secret provisioned to Keep.
+  # Use a long string of alphanumeric characters (at least 36).
+  blob_signing_key: @@KEEP_SIGNING_SECRET@@
+
+  uuid_prefix: @@API_HOSTNAME@@
+
+  # The e-mail address of the user you would like to become marked as an admin
+  # user on their first login.
+  # In the default configuration, authentication happens through the Arvados SSO
+  # server, which uses openid against Google's servers, so in that case this
+  # should be an address associated with a Google account.
+  auto_admin_user: @@API_AUTO_ADMIN_USER@@
+
+  # compute_node_domain: example.org
+  # compute_node_nameservers:
+  #   - 127.0.0.1
+  #   - 192.168.1.1
+  #
+  # The version below is suitable for AWS.
+  # Uncomment and change <%# to <%= to use it.
+  # compute_node_nameservers: <%#
+  #   require 'net/http'
+  #   ['local', 'public'].collect do |iface|
+  #     Net::HTTP.get(URI("http://169.254.169.254/latest/meta-data/#{iface}-ipv4")).match(/^[\d\.]+$/)[0]
+  #   end << '172.16.0.23'
+  # %>
+  permit_create_collection_with_unsigned_manifest: true
+  git_repositories_dir: /home/git/repositories
+  crunch_job_wrapper: :slurm_immediate
+  action_mailer.raise_delivery_errors: false
+  action_mailer.perform_deliveries: false
+
+  workbench_address: @@API_WORKBENCH_ADDRESS@@
+
+  auto_setup_new_users: true
+
+  auto_admin_first_user: true
+
+test:
+  uuid_prefix: zzzzz
+  secret_token: <%= rand(2**512).to_s(36) %>
+
+common:
+  #git_repositories_dir: /var/cache/git
+  #git_internal_dir: /var/cache/arvados/internal.git
+
diff --git a/docker/api/apt.arvados.org.list b/docker/api/apt.arvados.org.list
new file mode 100644 (file)
index 0000000..7eb8716
--- /dev/null
@@ -0,0 +1,2 @@
+# apt.arvados.org
+deb http://apt.arvados.org/ wheezy main
diff --git a/docker/api/arvados-clients.yml.in b/docker/api/arvados-clients.yml.in
new file mode 100644 (file)
index 0000000..59ff352
--- /dev/null
@@ -0,0 +1,6 @@
+production:
+  gitolite_url: 'git@api:gitolite-admin.git'
+  gitolite_tmp: 'gitolite-tmp'
+  arvados_api_host: 'api'
+  arvados_api_token: '@@API_SUPERUSER_SECRET@@'
+  arvados_api_host_insecure: true
diff --git a/docker/api/config_databases.sh.in b/docker/api/config_databases.sh.in
new file mode 100755 (executable)
index 0000000..b548c21
--- /dev/null
@@ -0,0 +1,15 @@
+#! /bin/sh
+
+# Configure postgresql in a docker instance.
+
+/bin/su postgres -c '/usr/lib/postgresql/9.1/bin/postgres --single -D /var/lib/postgresql/9.1/main -c config_file=/etc/postgresql/9.1/main/postgresql.conf' <<EOF
+alter role postgres with encrypted password '@@POSTGRES_ROOT_PW@@';
+
+create user @@ARVADOS_DEV_USER@@ with encrypted password '@@ARVADOS_DEV_PW@@';
+create database @@ARVADOS_DEV_DB@@ with owner @@ARVADOS_DEV_USER@@;
+
+create user @@ARVADOS_TEST_USER@@ with createdb encrypted password '@@ARVADOS_TEST_PW@@';
+
+create user @@ARVADOS_PROD_USER@@ with encrypted password '@@ARVADOS_PROD_PW@@';
+create database @@ARVADOS_PROD_DB@@ with owner @@ARVADOS_PROD_USER@@;
+EOF
diff --git a/docker/api/crunch-dispatch-run.sh b/docker/api/crunch-dispatch-run.sh
new file mode 100755 (executable)
index 0000000..5103b1d
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+set -e
+export PATH="$PATH":/usr/src/arvados/services/crunch
+export PERLLIB=/usr/src/arvados/sdk/perl/lib
+export ARVADOS_API_HOST=api
+export ARVADOS_API_HOST_INSECURE=yes
+export CRUNCH_DISPATCH_LOCKFILE=/var/lock/crunch-dispatch
+
+if [[ ! -e $CRUNCH_DISPATCH_LOCKFILE ]]; then
+  touch $CRUNCH_DISPATCH_LOCKFILE
+fi
+
+export CRUNCH_JOB_BIN=/usr/src/arvados/services/crunch/crunch-job
+export HOME=`pwd`
+fuser -TERM -k $CRUNCH_DISPATCH_LOCKFILE || true
+
+# Give the compute nodes some time to start up
+sleep 5
+
+cd /usr/src/arvados/services/api
+export RAILS_ENV=production
+/usr/local/rvm/bin/rvm-exec default bundle install
+exec /usr/local/rvm/bin/rvm-exec default bundle exec ./script/crunch-dispatch.rb 2>&1
+
diff --git a/docker/api/database.yml.in b/docker/api/database.yml.in
new file mode 100644 (file)
index 0000000..5990319
--- /dev/null
@@ -0,0 +1,25 @@
+development:
+  adapter: postgresql
+  encoding: utf8
+  database: @@ARVADOS_DEV_DB@@
+  username: @@ARVADOS_DEV_USER@@
+  password: @@ARVADOS_DEV_PW@@
+  host: localhost
+
+test:
+  adapter: postgresql
+  encoding: utf8
+  template: template0
+  database: @@ARVADOS_TEST_DB@@
+  username: @@ARVADOS_TEST_USER@@
+  password: @@ARVADOS_TEST_PW@@
+  host: localhost
+
+production:
+  adapter: postgresql
+  encoding: utf8
+  database: @@ARVADOS_PROD_DB@@
+  username: @@ARVADOS_PROD_USER@@
+  password: @@ARVADOS_PROD_PW@@
+  host: localhost
+
diff --git a/docker/api/keep_server_0.json b/docker/api/keep_server_0.json
new file mode 100644 (file)
index 0000000..ce02f50
--- /dev/null
@@ -0,0 +1,6 @@
+{
+  "service_host": "keep_server_0.keep.dev.arvados",
+  "service_port": 25107,
+  "service_ssl_flag": "false",
+  "service_type": "disk"
+}
diff --git a/docker/api/keep_server_1.json b/docker/api/keep_server_1.json
new file mode 100644 (file)
index 0000000..dbbdd1c
--- /dev/null
@@ -0,0 +1,7 @@
+{
+  "service_host": "keep_server_1.keep.dev.arvados",
+  "service_port": 25107,
+  "service_ssl_flag": "false",
+  "service_type": "disk"
+}
+
diff --git a/docker/api/munge.key b/docker/api/munge.key
new file mode 100644 (file)
index 0000000..34036a0
Binary files /dev/null and b/docker/api/munge.key differ
diff --git a/docker/api/omniauth.rb.in b/docker/api/omniauth.rb.in
new file mode 100644 (file)
index 0000000..198668e
--- /dev/null
@@ -0,0 +1,18 @@
+# Change this omniauth configuration to point to your registered provider
+# Since this is a registered application, add the app id and secret here
+APP_ID = '@@SSO_CLIENT_APP_ID@@'
+APP_SECRET = '@@SSO_CLIENT_SECRET@@'
+
+# Update your custom Omniauth provider URL here
+if '@@OMNIAUTH_URL@@' != ''
+  CUSTOM_PROVIDER_URL = '@@OMNIAUTH_URL@@'
+else
+  CUSTOM_PROVIDER_URL = 'https://' + ENV['SSO_PORT_443_TCP_ADDR'].to_s
+end
+
+# This is a development sandbox, we use self-signed certificates
+OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE
+
+Rails.application.config.middleware.use OmniAuth::Builder do
+  provider :josh_id, APP_ID, APP_SECRET, CUSTOM_PROVIDER_URL
+end
diff --git a/docker/api/setup-gitolite.sh.in b/docker/api/setup-gitolite.sh.in
new file mode 100755 (executable)
index 0000000..92014f9
--- /dev/null
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+ssh-keygen -q -N '' -t rsa -f /root/.ssh/id_rsa
+
+useradd git
+mkdir /home/git
+
+# Set up gitolite repository
+cp ~root/.ssh/id_rsa.pub ~git/root-authorized_keys.pub
+chown git:git /home/git -R
+su - git -c "mkdir -p ~/bin"
+
+su - git -c "git clone git://github.com/sitaramc/gitolite"
+su - git -c "gitolite/install -ln ~/bin"
+su - git -c "PATH=/home/git/bin:$PATH gitolite setup -pk ~git/root-authorized_keys.pub"
+
+# Make sure the repositories are created in such a way that they are readable
+# by the api server
+sed -i 's/0077/0022/g' /home/git/.gitolite.rc
+
+# And make sure that the existing repos are equally readable, or the API server commit model will freak out...
+chmod 755 /home/git/repositories
+chmod +rx /home/git/repositories/*git -R
+
+# Now set up the gitolite repo(s) we use
+mkdir -p /usr/local/arvados/gitolite-tmp/
+# Make ssh store the host key
+ssh -o "StrictHostKeyChecking no" git@api info
+# Now check out the tree
+git clone git@api:gitolite-admin.git /usr/local/arvados/gitolite-tmp/gitolite-admin/
+cd /usr/local/arvados/gitolite-tmp/gitolite-admin
+mkdir keydir/arvados
+mkdir conf/admin
+mkdir conf/auto
+echo "
+
+@arvados_git_user = arvados_git_user
+
+repo @all
+     RW+                 = @arvados_git_user
+
+" > conf/admin/arvados.conf
+echo '
+include "auto/*.conf"
+include "admin/*.conf"
+' >> conf/gitolite.conf
+
+#su - git -c "ssh-keygen -t rsa"
+cp /root/.ssh/id_rsa.pub keydir/arvados/arvados_git_user.pub
+# Replace the 'root' key with the user key, just in case
+cp /root/.ssh/authorized_keys keydir/root-authorized_keys.pub
+# But also make sure we have the root key installed so it can access all keys
+git add keydir/root-authorized_keys.pub
+git add keydir/arvados/arvados_git_user.pub
+git add conf/admin/arvados.conf
+git add keydir/arvados/
+git add conf/gitolite.conf
+git commit -a -m 'git server setup'
+git push
+
+# Prepopulate the arvados.git repo with our source. Silly, but until we can check out from remote trees,
+# we need this to make the tutorials work.
+su - git -c "git clone --bare git://github.com/curoverse/arvados.git /home/git/repositories/arvados.git"
+
+echo "ARVADOS_API_HOST_INSECURE=yes" > /etc/cron.d/gitolite-update
+echo "*/2 * * * * root /bin/bash -c 'source /etc/profile.d/rvm.sh && /usr/local/arvados/update-gitolite.rb production'" >> /etc/cron.d/gitolite-update
+
+# Create/update the repos now
+. /etc/profile.d/rvm.sh
+export ARVADOS_API_HOST=api
+export ARVADOS_API_HOST_INSECURE=yes
+export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
+/usr/local/arvados/update-gitolite.rb production
+
+echo "PATH=/usr/bin:/bin:/sbin" > /etc/cron.d/arvados-repo-update
+echo "*/5 * * * * git cd ~git/repositories/arvados.git; git fetch https://github.com/curoverse/arvados.git master:master" >> /etc/cron.d/arvados-repo-update
+
diff --git a/docker/api/setup.sh.in b/docker/api/setup.sh.in
new file mode 100755 (executable)
index 0000000..7af6afb
--- /dev/null
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+set -x
+
+. /etc/profile.d/rvm.sh
+
+export ARVADOS_API_HOST=api
+export ARVADOS_API_HOST_INSECURE=yes
+export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
+
+# Arvados repository object
+all_users_group_uuid="$prefix-j7d0g-fffffffffffffff"
+repo_uuid=`arv --format=uuid repository create --repository '{"name":"arvados","fetch_url":"git@api:arvados.git","push_url":"git@api:arvados.git"}'`
+echo "Arvados repository uuid is $repo_uuid"
+
+read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
+{
+ "tail_uuid":"$all_users_group_uuid",
+ "head_uuid":"$repo_uuid",
+ "link_class":"permission",
+ "name":"can_read"
+}
+EOF
+
+# Make sure the necessary keep_service objects exist
+arv keep_service list > /tmp/keep_service.list
+
+grep -q keep_server_0 /tmp/keep_service.list
+if [[ "$?" != "0" ]]; then
+  arv keep_service create --keep-service "$(cat /root/keep_server_0.json)"
+fi
+
+grep -q keep_server_1 /tmp/keep_service.list
+if [[ "$?" != "0" ]]; then
+  arv keep_service create --keep-service "$(cat /root/keep_server_1.json)"
+fi
+
+# User repository object
+user_uuid=`arv --format=uuid user current`
+repo_uuid=`arv --format=uuid repository create --repository '{"name":"@@ARVADOS_USER_NAME@@","fetch_url":"git@api:@@ARVADOS_USER_NAME@@.git","push_url":"git@api:@@ARVADOS_USER_NAME@@.git"}'`
+echo "User repository uuid is $repo_uuid"
+
+read -rd $'\000' newlink <<EOF; arv link create --link "$newlink"
+{
+ "tail_uuid":"$user_uuid",
+ "head_uuid":"$repo_uuid",
+ "link_class":"permission",
+ "name":"can_write"
+}
+EOF
+
+# Shell machine object
+arv virtual_machine create --virtual-machine '{"hostname":"shell"}'
diff --git a/docker/api/slurm.conf.in b/docker/api/slurm.conf.in
new file mode 100644 (file)
index 0000000..7312a0e
--- /dev/null
@@ -0,0 +1,60 @@
+
+ControlMachine=api
+#SlurmUser=slurmd
+SlurmctldPort=6817
+SlurmdPort=6818
+AuthType=auth/munge
+#JobCredentialPrivateKey=/etc/slurm-llnl/slurm-key.pem
+#JobCredentialPublicCertificate=/etc/slurm-llnl/slurm-cert.pem
+StateSaveLocation=/tmp
+SlurmdSpoolDir=/tmp/slurmd
+SwitchType=switch/none
+MpiDefault=none
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+ProctrackType=proctrack/pgid
+CacheGroups=0
+ReturnToService=2
+TaskPlugin=task/affinity
+#
+# TIMERS
+SlurmctldTimeout=300
+SlurmdTimeout=300
+InactiveLimit=0
+MinJobAge=300
+KillWait=30
+Waittime=0
+#
+# SCHEDULING
+SchedulerType=sched/backfill
+#SchedulerType=sched/builtin
+SchedulerPort=7321
+#SchedulerRootFilter=
+#SelectType=select/linear
+SelectType=select/cons_res
+SelectTypeParameters=CR_CPU_Memory
+FastSchedule=1
+#
+# LOGGING
+SlurmctldDebug=3
+#SlurmctldLogFile=
+SlurmdDebug=3
+#SlurmdLogFile=
+JobCompType=jobcomp/none
+#JobCompLoc=
+JobAcctGatherType=jobacct_gather/none
+#JobAcctLogfile=
+#JobAcctFrequency=
+#
+# COMPUTE NODES
+NodeName=DEFAULT
+# CPUs=8 State=UNKNOWN RealMemory=6967 Weight=6967
+PartitionName=DEFAULT MaxTime=INFINITE State=UP
+PartitionName=compute Default=YES Shared=yes
+#PartitionName=sysadmin Hidden=YES Shared=yes
+
+NodeName=compute[0-1]
+#NodeName=compute0 RealMemory=6967 Weight=6967
+
+PartitionName=compute Nodes=compute[0-1]
+PartitionName=crypto Nodes=compute[0-1]
diff --git a/docker/api/superuser_token.in b/docker/api/superuser_token.in
new file mode 100644 (file)
index 0000000..49bb34e
--- /dev/null
@@ -0,0 +1 @@
+@@API_SUPERUSER_SECRET@@
diff --git a/docker/api/supervisor.conf b/docker/api/supervisor.conf
new file mode 100644 (file)
index 0000000..b01dc1c
--- /dev/null
@@ -0,0 +1,44 @@
+[program:ssh]
+user=root
+command=/etc/init.d/ssh start
+startsecs=0
+
+[program:postgres]
+user=postgres
+command=/usr/lib/postgresql/9.1/bin/postgres -D /var/lib/postgresql/9.1/main -c config_file=/etc/postgresql/9.1/main/postgresql.conf
+autorestart=true
+
+[program:apache2]
+command=/etc/apache2/foreground.sh
+stopsignal=6
+autorestart=true
+
+[program:munge]
+user=root
+command=/etc/init.d/munge start
+startsecs=0
+
+[program:slurm]
+user=root
+command=/etc/init.d/slurm-llnl start
+startsecs=0
+
+[program:cron]
+user=root
+command=/etc/init.d/cron start
+startsecs=0
+
+[program:setup]
+user=root
+command=/usr/local/bin/setup.sh
+startsecs=0
+
+[program:setup-gitolite]
+user=root
+command=/usr/local/bin/setup-gitolite.sh
+startsecs=0
+
+[program:crunch-dispatch]
+user=root
+command=/usr/local/bin/crunch-dispatch-run.sh
+autorestart=true
diff --git a/docker/api/update-gitolite.rb b/docker/api/update-gitolite.rb
new file mode 100755 (executable)
index 0000000..2c46a0d
--- /dev/null
@@ -0,0 +1,168 @@
+#!/usr/bin/env ruby
+
+require 'rubygems'
+require 'pp'
+require 'arvados'
+require 'active_support/all'
+require 'yaml'
+
+# This script does the actual gitolite config management on disk.
+#
+# Ward Vandewege <ward@curoverse.com>
+
+# Default is development
+production = ARGV[0] == "production"
+
+ENV["RAILS_ENV"] = "development"
+ENV["RAILS_ENV"] = "production" if production
+
+DEBUG = 1
+
+# load and merge in the environment-specific application config info
+# if present, overriding base config parameters as specified
+path = File.dirname(__FILE__) + '/config/arvados-clients.yml'
+if File.exists?(path) then
+  cp_config = YAML.load_file(path)[ENV['RAILS_ENV']]
+else
+  puts "Please create a\n " + File.dirname(__FILE__) + "/config/arvados-clients.yml\n file"
+  exit 1
+end
+
+gitolite_url = cp_config['gitolite_url']
+gitolite_tmp = cp_config['gitolite_tmp']
+
+gitolite_admin = File.join(File.expand_path(File.dirname(__FILE__)) + '/' + gitolite_tmp + '/gitolite-admin')
+
+ENV['ARVADOS_API_HOST'] = cp_config['arvados_api_host']
+ENV['ARVADOS_API_TOKEN'] = cp_config['arvados_api_token']
+if cp_config['arvados_api_host_insecure']
+  ENV['ARVADOS_API_HOST_INSECURE'] = 'true'
+else
+  ENV.delete('ARVADOS_API_HOST_INSECURE')
+end
+
+keys = ''
+
+seen = Hash.new
+
+def ensure_repo(name,permissions,user_keys,gitolite_admin)
+  tmp = ''
+  # Just in case...
+  name.gsub!(/[^a-z0-9]/i,'')
+
+  keys = Hash.new()
+
+  user_keys.each do |uuid,p|
+    p.each do |k|
+      next if k[:public_key].nil?
+      keys[uuid] = Array.new() if not keys.key?(uuid)
+
+      key = k[:public_key]
+      # Handle putty-style ssh public keys
+      key.sub!(/^(Comment: "r[^\n]*\n)(.*)$/m,'ssh-rsa \2 \1')
+      key.sub!(/^(Comment: "d[^\n]*\n)(.*)$/m,'ssh-dss \2 \1')
+      key.gsub!(/\n/,'')
+      key.strip
+
+      keys[uuid].push(key)
+    end
+  end
+
+  cf = gitolite_admin + '/conf/auto/' + name + '.conf'
+
+  conf = "\nrepo #{name}\n"
+
+  commit = false
+
+  seen = {}
+  permissions.sort.each do |uuid,v|
+    conf += "\t#{v[:gitolite_permissions]}\t= #{uuid.to_s}\n"
+
+    count = 0
+    keys.include?(uuid) and keys[uuid].each do |v|
+      kf = gitolite_admin + '/keydir/arvados/' + uuid.to_s + "@#{count}.pub"
+      seen[kf] = true
+      if !File.exists?(kf) or IO::read(kf) != v then
+        commit = true
+        f = File.new(kf + ".tmp",'w')
+        f.write(v)
+        f.close()
+        # File.rename will overwrite the destination file if it exists
+        File.rename(kf + ".tmp",kf);
+      end
+      count += 1
+    end
+  end
+
+  if !File.exists?(cf) or IO::read(cf) != conf then
+    commit = true
+    f = File.new(cf + ".tmp",'w')
+    f.write(conf)
+    f.close()
+    # this is about as atomic as we can make the replacement of the file...
+    File.unlink(cf) if File.exists?(cf)
+    File.rename(cf + ".tmp",cf);
+  end
+
+  return commit,seen
+end
+
+begin
+
+  pwd = Dir.pwd
+  # Get our local gitolite-admin repo up to snuff
+  if not File.exists?(File.dirname(__FILE__) + '/' + gitolite_tmp) then
+    Dir.mkdir(File.join(File.dirname(__FILE__) + '/' + gitolite_tmp), 0700)
+  end
+  if not File.exists?(gitolite_admin) then
+    Dir.chdir(File.join(File.dirname(__FILE__) + '/' + gitolite_tmp))
+    `git clone #{gitolite_url}`
+  else
+    Dir.chdir(gitolite_admin)
+    `git pull`
+  end
+  Dir.chdir(pwd)
+
+  arv = Arvados.new( { :suppress_ssl_warnings => false } )
+
+  permissions = arv.repository.get_all_permissions
+
+  repos = permissions[:repositories]
+  user_keys = permissions[:user_keys]
+
+  @commit = false
+
+  @seen = {}
+
+  repos.each do |r|
+    next if r[:name].nil?
+    (@c,@s) = ensure_repo(r[:name],r[:user_permissions],user_keys,gitolite_admin)
+    @seen.merge!(@s)
+    @commit = true if @c
+  end
+
+  # Clean up public key files that should not be present
+  Dir.glob(gitolite_admin + '/keydir/arvados/*.pub') do |key_file|
+    next if key_file =~ /arvados_git_user.pub$/
+    next if @seen.has_key?(key_file)
+    puts "Extra file #{key_file}"
+    @commit = true
+    Dir.chdir(gitolite_admin)
+    key_file.gsub!(/^#{gitolite_admin}\//,'')
+    `git rm #{key_file}`
+  end
+
+  if @commit then
+    message = "#{Time.now().to_s}: update from API"
+    Dir.chdir(gitolite_admin)
+    `git add --all`
+    `git commit -m '#{message}'`
+    `git push`
+  end
+
+rescue Exception => bang
+  puts "Error: " + bang.to_s
+  puts bang.backtrace.join("\n")
+  exit 1
+end
+
diff --git a/docker/arvdock b/docker/arvdock
new file mode 100755 (executable)
index 0000000..142ba27
--- /dev/null
@@ -0,0 +1,447 @@
+#!/bin/bash
+
+DOCKER=`which docker.io`
+
+if [[ "$DOCKER" == "" ]]; then
+    DOCKER=`which docker`
+fi
+
+COMPUTE_COUNTER=0
+
+function usage {
+    echo >&2
+    echo >&2 "usage: $0 (start|stop|restart|test) [options]"
+    echo >&2
+    echo >&2 "$0 start/stop/restart options:"
+    echo >&2 "  -d[port], --doc[=port]        Documentation server (default port 9898)"
+    echo >&2 "  -w[port], --workbench[=port]  Workbench server (default port 9899)"
+    echo >&2 "  -s[port], --sso[=port]        SSO server (default port 9901)"
+    echo >&2 "  -a[port], --api[=port]        API server (default port 9900)"
+    echo >&2 "  -c, --compute                 Compute nodes (starts 2)"
+    echo >&2 "  -v, --vm                      Shell server"
+    echo >&2 "  -n, --nameserver              Nameserver"
+    echo >&2 "  -k, --keep                    Keep servers"
+    echo >&2 "  -h, --help                    Display this help and exit"
+    echo >&2
+    echo >&2 "  If no options are given, the action is applied to all servers."
+    echo >&2
+    echo >&2 "$0 test [testname] [testname] ..."
+    echo >&2 "  By default, all tests are run."
+}
+
+function ip_address {
+    local container=$1
+    echo `$DOCKER inspect $container  |grep IPAddress |cut -f4 -d\"`
+}
+
+function start_container {
+    local args="-d -i -t"
+    if [[ "$1" != '' ]]; then
+      local port="$1"
+      args="$args -p $port"
+    fi
+    if [[ "$2" != '' ]]; then
+      local name="$2"
+      if [[ "$name" == "api_server" ]]; then
+        args="$args --dns=172.17.42.1 --dns-search=compute.dev.arvados --hostname api -P --name $name"
+      elif [[ "$name" == "compute" ]]; then
+        name=$name$COMPUTE_COUNTER
+        # We need --privileged because we run docker-inside-docker on the compute nodes
+        args="$args --dns=172.17.42.1 --dns-search=compute.dev.arvados --hostname compute$COMPUTE_COUNTER -P --privileged --name $name"
+        let COMPUTE_COUNTER=$(($COMPUTE_COUNTER + 1))
+      else
+        args="$args --dns=172.17.42.1 --dns-search=dev.arvados --hostname ${name#_server} --name $name"
+      fi
+    fi
+    if [[ "$3" != '' ]]; then
+      local volume="$3"
+      args="$args -v $volume"
+    fi
+    if [[ "$4" != '' ]]; then
+      local link="$4"
+      args="$args --link $link"
+    fi
+    local image=$5
+
+    `$DOCKER ps |grep -P "$name[^/]" -q`
+    if [[ "$?" == "0" ]]; then
+      echo "You have a running container with name $name -- skipping."
+      return
+    fi
+
+    # Remove any existing container by this name.
+    $DOCKER rm "$name" 2>/dev/null
+
+    echo "Starting container:"
+    #echo "  $DOCKER run --dns=127.0.0.1 $args $image"
+    echo "  $DOCKER run $args $image"
+    container=`$DOCKER run $args $image`
+    if [ "$?" != "0" -o "$container" = "" ]; then
+      echo "Unable to start container"
+      exit 1
+    else
+      echo "Started container: $container"
+    fi
+
+    if [[ "$name" == "doc_server" ]]; then
+      echo
+      echo "*****************************************************************"
+      echo "You can access the Arvados documentation at http://localhost:${port%:*}"
+      echo "*****************************************************************"
+      echo
+    fi
+
+    if [[ "$name" == "workbench_server" ]]; then
+      echo
+      echo "*****************************************************************"
+      echo "You can access the Arvados workbench at http://localhost:${port%:*}"
+      echo "*****************************************************************"
+      echo
+   fi
+
+
+}
+
+declare -a keep_volumes
+
+# Initialize the global `keep_volumes' array. If any keep volumes
+# already appear to exist (mounted volumes with a top-level "keep"
+# directory), use them; create temporary volumes if necessary.
+#
+function make_keep_volumes () {
+    # Mount a keep volume if we don't already have one
+    for mountpoint in $(cut -d ' ' -f 2 /proc/mounts); do
+      if [[ -d "$mountpoint/keep" && "$mountpoint" != "/" ]]; then
+        keep_volumes+=($mountpoint)
+      fi
+    done
+
+    # Create any keep volumes that do not yet exist.
+    while [ ${#keep_volumes[*]} -lt 2 ]
+    do
+        new_keep=$(mktemp -d)
+        echo >&2 "mounting 2G tmpfs keep volume in $new_keep"
+        sudo mount -t tmpfs -o size=2G tmpfs $new_keep
+        mkdir $new_keep/keep
+        keep_volumes+=($new_keep)
+    done
+}
+
+function do_start {
+    local start_doc=false
+    local start_sso=false
+    local start_api=false
+    local start_compute=false
+    local start_workbench=false
+    local start_vm=false
+    local start_nameserver=false
+    local start_keep=false
+
+    # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+    local TEMP=`getopt -o d::s::a::cw::nkvh \
+                  --long doc::,sso::,api::,compute,workbench::,nameserver,keep,vm,help \
+                  -n "$0" -- "$@"`
+
+    if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
+
+    # Note the quotes around `$TEMP': they are essential!
+    eval set -- "$TEMP"
+
+    while [ $# -ge 1 ]
+    do
+        case $1 in
+            -d | --doc)
+                case "$2" in
+                    "") start_doc=9898; shift 2 ;;
+                    *)  start_doc=$2; shift 2 ;;
+                esac
+                ;;
+            -s | --sso)
+                case "$2" in
+                    "") start_sso=9901; shift 2 ;;
+                    *)  start_sso=$2; shift 2 ;;
+                esac
+                ;;
+            -a | --api)
+                case "$2" in
+                    "") start_api=9900; shift 2 ;;
+                    *)  start_api=$2; shift 2 ;;
+                esac
+                ;;
+            -c | --compute)
+                start_compute=2
+                shift
+                ;;
+            -w | --workbench)
+                case "$2" in
+                    "") start_workbench=9899; shift 2 ;;
+                    *)  start_workbench=$2; shift 2 ;;
+                esac
+                ;;
+            -v | --vm)
+                start_vm=true
+                shift
+                ;;
+            -n | --nameserver)
+                start_nameserver=true
+                shift
+                ;;
+            -k | --keep)
+                start_keep=true
+                shift
+                ;;
+            --)
+                shift
+                break
+                ;;
+            *)
+                usage
+                exit 1
+                ;;
+        esac
+    done
+
+    # If no options were selected, then start all servers.
+    if [[ $start_doc == false &&
+          $start_sso == false &&
+          $start_api == false &&
+          $start_compute == false &&
+          $start_workbench == false &&
+          $start_vm == false &&
+          $start_nameserver == false &&
+          $start_keep == false ]]
+    then
+        start_doc=9898
+        #the sso server is currently not used by default so don't start it unless explicitly requested
+        #start_sso=9901
+        start_api=9900
+        start_compute=2
+        start_workbench=9899
+        start_vm=true
+        start_nameserver=true
+        start_keep=true
+    fi
+
+    if [[ $start_sso != false ]]
+    then
+        start_container "$start_sso:443" "sso_server" '' '' "arvados/sso"
+    fi
+
+    if [[ $start_api != false ]]
+    then
+      if [[ $start_sso != false ]]; then
+        start_container "$start_api:443" "api_server" '' "sso_server:sso" "arvados/api"
+      else
+        start_container "$start_api:443" "api_server" '' '' "arvados/api"
+      fi
+    fi
+
+    if [[ $start_nameserver != false ]]
+    then
+      # We rely on skydock and skydns for dns discovery between the slurm controller and compute nodes,
+      # so make sure they are running
+      $DOCKER ps | grep skydns >/dev/null
+      if [[ "$?" != "0" ]]; then
+        echo "Starting crosbymichael/skydns container..."
+        $DOCKER rm "skydns" 2>/dev/null
+        $DOCKER run -d -p 172.17.42.1:53:53/udp --name skydns crosbymichael/skydns -nameserver 8.8.8.8:53 -domain arvados
+      fi
+      $DOCKER ps | grep skydock >/dev/null
+      if [[ "$?" != "0" ]]; then
+        echo "Starting crosbymichael/skydock container..."
+        $DOCKER rm "skydock" 2>/dev/null
+        $DOCKER run -d -v /var/run/docker.sock:/docker.sock --name skydock crosbymichael/skydock -ttl 30 -environment dev -s /docker.sock -domain arvados -name skydns
+      fi
+    fi
+
+    if [[ $start_compute != false ]]
+    then
+        for i in `seq 0 $(($start_compute - 1))`; do
+          start_container "" "compute" '' "api_server:api" "arvados/compute"
+        done
+    fi
+
+    if [[ $start_keep != false ]]
+    then
+        # create `keep_volumes' array with a list of keep mount points
+        # remove any stale metadata from those volumes before starting them
+        make_keep_volumes
+        for v in ${keep_volumes[*]}
+        do
+            [ -f $v/keep/.metadata.yml ] && sudo rm $v/keep/.metadata.yml
+        done
+        start_container "25107:25107" "keep_server_0" \
+            "${keep_volumes[0]}:/keep-data" \
+            "api_server:api" \
+            "arvados/keep"
+        start_container "25108:25107" "keep_server_1" \
+            "${keep_volumes[1]}:/keep-data" \
+            "api_server:api" \
+            "arvados/keep"
+    fi
+
+    if [[ $start_doc != false ]]
+    then
+        start_container "$start_doc:80" "doc_server" '' '' "arvados/doc"
+    fi
+
+    if [[ $start_vm != false ]]
+    then
+        start_container "" "shell" '' "api_server:api" "arvados/shell"
+    fi
+
+    if [[ $start_workbench != false ]]
+    then
+        start_container "$start_workbench:80" "workbench_server" '' "api_server:api" "arvados/workbench"
+    fi
+
+    if [[ $start_api != false ]]
+    then
+        if [[ -f "api/generated/superuser_token" ]]
+        then
+          if [ -d $HOME/.config/arvados ] || mkdir -p $HOME/.config/arvados
+          then
+            cat >$HOME/.config/arvados/settings.conf <<EOF
+ARVADOS_API_HOST=$(ip_address "api_server")
+ARVADOS_API_HOST_INSECURE=yes
+ARVADOS_API_TOKEN=$(cat api/generated/superuser_token)
+EOF
+          fi
+        fi
+    fi
+}
+
+function do_stop {
+    local stop_doc=""
+    local stop_sso=""
+    local stop_api=""
+    local stop_compute=""
+    local stop_workbench=""
+    local stop_nameserver=""
+    local stop_vm=""
+    local stop_keep=""
+
+    # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).
+    local TEMP=`getopt -o dsacwnkvh \
+                  --long doc,sso,api,compute,workbench,nameserver,keep,vm,help \
+                  -n "$0" -- "$@"`
+
+    if [ $? != 0 ] ; then echo "Use -h for help"; exit 1 ; fi
+
+    # Note the quotes around `$TEMP': they are essential!
+    eval set -- "$TEMP"
+
+    while [ $# -ge 1 ]
+    do
+        case $1 in
+            -d | --doc)
+                stop_doc=doc_server ; shift ;;
+            -s | --sso)
+                stop_sso=sso_server ; shift ;;
+            -a | --api)
+                stop_api=api_server ; shift ;;
+            -c | --compute)
+                stop_compute=`$DOCKER ps |grep -P "compute\d+" |grep -v api_server |cut -f1 -d ' '` ; shift ;;
+            -w | --workbench)
+                stop_workbench=workbench_server ; shift ;;
+            -n | --nameserver )
+                stop_nameserver="skydock skydns" ; shift ;;
+            -v | --vm )
+                stop_vm="shell" ; shift ;;
+            -k | --keep )
+                stop_keep="keep_server_0 keep_server_1" ; shift ;;
+            --)
+                shift
+                break
+                ;;
+            *)
+                usage
+                exit 1
+                ;;
+        esac
+    done
+
+    # If no options were selected, then stop all servers.
+    if [[ $stop_doc == "" &&
+          $stop_sso == "" &&
+          $stop_api == "" &&
+          $stop_compute == "" &&
+          $stop_workbench == "" &&
+          $stop_vm == "" &&
+          $stop_nameserver == "" &&
+          $stop_keep == "" ]]
+    then
+        stop_doc=doc_server
+        stop_sso=sso_server
+        stop_api=api_server
+        stop_compute=`$DOCKER ps |grep -P "compute\d+" |grep -v api_server |cut -f1 -d ' '`
+        stop_workbench=workbench_server
+        stop_vm=shell
+        stop_nameserver="skydock skydns"
+        stop_keep="keep_server_0 keep_server_1"
+    fi
+
+    $DOCKER stop $stop_doc $stop_sso $stop_api $stop_compute $stop_workbench $stop_nameserver $stop_keep $stop_vm \
+        2>/dev/null
+}
+
+function do_test {
+    local alltests
+    if [ $# -lt 1 ]
+    then
+        alltests="python-sdk api"
+    else
+        alltests="$@"
+    fi
+
+    for testname in $alltests
+    do
+        echo "testing $testname..."
+        case $testname in
+            python-sdk)
+                do_start --api --keep --sso
+                export ARVADOS_API_HOST=$(ip_address "api_server")
+                export ARVADOS_API_HOST_INSECURE=yes
+                export ARVADOS_API_TOKEN=$(cat api/generated/superuser_token)
+                python -m unittest discover ../sdk/python
+                ;;
+            api)
+                $DOCKER run -t -i arvados/api \
+                    /usr/src/arvados/services/api/script/rake_test.sh
+                ;;
+            *)
+                echo >&2 "unknown test $testname"
+                ;;
+        esac
+    done
+}
+
+if [ $# -lt 1 ]
+then
+  usage
+  exit 1
+fi
+
+case $1 in
+    start)
+        shift
+        do_start $@
+        ;;
+    stop)
+        shift
+        do_stop $@
+        ;;
+    restart)
+        shift
+        do_stop $@
+        do_start $@
+        ;;
+    test)
+        shift
+        do_test $@
+        ;;
+    *)
+        usage
+        exit 1
+        ;;
+esac
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
new file mode 100644 (file)
index 0000000..c4b744b
--- /dev/null
@@ -0,0 +1,37 @@
+# Arvados base image (wheezy+rvm+Arvados source) in Docker
+
+# Based on Debian Wheezy
+FROM arvados/debian:wheezy
+MAINTAINER Tim Pierce <twp@curoverse.com>
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Install prerequisite packages for Arvados
+#   * git, curl, rvm
+#   * Arvados source code in /usr/src/arvados, for preseeding gem installation
+
+ADD apt.arvados.org.list /etc/apt/sources.list.d/
+RUN apt-key adv --keyserver pool.sks-keyservers.net --recv 1078ECD7
+RUN apt-get update -qq
+
+RUN apt-get install -qqy apt-utils git curl \
+             libcurl3 libcurl3-gnutls libcurl4-openssl-dev locales \
+             postgresql-server-dev-9.1 python-arvados-python-client
+
+RUN gpg --keyserver pool.sks-keyservers.net --recv-keys D39DC0E3 && \
+    /bin/sed -ri 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
+    /usr/sbin/locale-gen && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1 && \
+    /usr/local/rvm/bin/rvm alias create default ruby-2.1 && \
+    /bin/mkdir -p /usr/src/arvados
+
+ADD generated/arvados.tar.gz /usr/src/arvados/
+
+# Update gem. This (hopefully) fixes
+# https://github.com/rubygems/rubygems.org/issues/613.
+RUN /usr/local/rvm/bin/rvm-exec default gem update --system && \
+    /usr/local/rvm/bin/rvm-exec default gem install bundler && \
+    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
+    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/services/api/Gemfile && \
+    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/doc/Gemfile
diff --git a/docker/base/apt.arvados.org.list b/docker/base/apt.arvados.org.list
new file mode 100644 (file)
index 0000000..7eb8716
--- /dev/null
@@ -0,0 +1,2 @@
+# apt.arvados.org
+deb http://apt.arvados.org/ wheezy main
diff --git a/docker/bcbio-nextgen/Dockerfile b/docker/bcbio-nextgen/Dockerfile
new file mode 100644 (file)
index 0000000..8f6e774
--- /dev/null
@@ -0,0 +1,47 @@
+# Install Arvados SDK into bcbio-nextgen Docker image.
+#
+# To build bcbio-nextgen:
+#
+# $ git clone https://github.com/chapmanb/bcbio-nextgen.git
+# $ cd bcbio-nextgen
+# $ docker build
+# $ docker tag <image> bcbio-nextgen
+#
+
+FROM bcbio-nextgen
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+USER root
+
+# Install Ruby 2.1.0
+RUN apt-get remove --quiet --assume-yes ruby && \
+    curl -L https://get.rvm.io | bash -s stable && \
+    /usr/local/rvm/bin/rvm install 2.1.0 && \
+    /bin/mkdir -p /usr/src/arvados
+
+ADD generated/arvados.tar.gz /usr/src/arvados/
+ENV GEM_HOME /usr/local/rvm/gems/ruby-2.1.0
+ENV GEM_PATH /usr/local/rvm/gems/ruby-2.1.0:/usr/local/rvm/gems/ruby-2.1.0@global
+ENV PATH /usr/local/rvm/gems/ruby-2.1.0/bin:/usr/local/rvm/gems/ruby-2.1.0@global/bin:/usr/local/rvm/rubies/ruby-2.1.0/bin:/usr/local/rvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+# Install dependencies and set up system.
+# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
+RUN /usr/bin/apt-get update && \
+    /usr/bin/apt-get install --quiet --assume-yes python-dev python-llfuse python-pip \
+      libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl \
+      fuse libattr1-dev libfuse-dev && \
+    /usr/sbin/adduser --disabled-password \
+      --gecos 'Crunch execution user' crunch && \
+    /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
+    /bin/ln -s /usr/src/arvados /usr/local/src/arvados
+
+# Install Arvados packages.
+RUN gem update --system && \
+    find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+      xargs -0rn 1 gem install && \
+    cd /usr/src/arvados/services/fuse && \
+    python setup.py install && \
+    cd /usr/src/arvados/sdk/python && \
+    python setup.py install
+
+USER crunch
diff --git a/docker/build.sh b/docker/build.sh
new file mode 100755 (executable)
index 0000000..77aeb1f
--- /dev/null
@@ -0,0 +1,41 @@
+#! /bin/bash
+
+# make sure a Ruby version greater than or equal to 1.9.3 is installed before proceeding
+if ! ruby -e 'exit RUBY_VERSION >= "1.9.3"' 2>/dev/null
+then
+    echo "Building the Arvados docker containers requires at least Ruby 1.9.3."
+    echo "Please install ruby 1.9.3 or higher before executing this script."
+    exit 1
+fi
+
+function usage {
+    echo >&2
+    echo >&2 "usage: $0 [options]"
+    echo >&2
+    echo >&2 "Calling $0 without arguments will build all Arvados docker images"
+    echo >&2
+    echo >&2 "$0 options:"
+    echo >&2 "  -h, --help   Print this help text"
+    echo >&2 "  clean        Clear all build information"
+    echo >&2 "  realclean    clean and remove all Arvados Docker images except arvados/debian"
+    echo >&2 "  deepclean    realclean and remove arvados/debian, crosbymichael/skydns and "
+    echo >&2 "               crosbymichael/skydns Docker images"
+    echo >&2
+}
+
+if [ "$1" = '-h' ] || [ "$1" = '--help' ]; then
+  usage
+  exit 1
+fi
+
+build_tools/build.rb
+
+if [[ "$?" == "0" ]]; then
+    DOCKER=`which docker.io`
+
+    if [[ "$DOCKER" == "" ]]; then
+      DOCKER=`which docker`
+    fi
+
+    DOCKER=$DOCKER /usr/bin/make -f build_tools/Makefile $*
+fi
diff --git a/docker/build_tools/Makefile b/docker/build_tools/Makefile
new file mode 100644 (file)
index 0000000..d92349c
--- /dev/null
@@ -0,0 +1,244 @@
+# This is the 'shell hack'. Call make with DUMP=1 to see the effect.
+ifdef DUMP
+OLD_SHELL := $(SHELL)
+SHELL = $(warning [$@])$(OLD_SHELL) -x
+endif
+
+all: skydns-image skydock-image api-image compute-image doc-image workbench-image keep-image sso-image shell-image
+
+IMAGE_FILES := $(shell ls *-image 2>/dev/null |grep -v -E 'debian-arvados-image|skydns-image|skydock-image')
+GENERATED_DIRS := $(shell ls */generated 2>/dev/null)
+
+# `make clean' removes the files generated in the build directory
+# but does not remove any docker images generated in previous builds
+clean:
+       @echo "make clean"
+       -@rm -rf build
+       +@[ "$(IMAGE_FILES)" = "" ] || rm -f $(IMAGE_FILES) 2>/dev/null
+       +@[ "$(GENERATED_DIRS)" = "" ] || rm -rf */generated 2>/dev/null
+
+DEBIAN_IMAGE := $(shell $(DOCKER) images -q arvados/debian |head -n1)
+
+REALCLEAN_CONTAINERS := $(shell $(DOCKER) ps -a |grep -e arvados -e api_server -e keep_server -e doc_server -e workbench_server |cut -f 1 -d' ')
+REALCLEAN_IMAGES := $(shell $(DOCKER) images -q arvados/* |grep -v $(DEBIAN_IMAGE) 2>/dev/null)
+DEEPCLEAN_IMAGES := $(shell $(DOCKER) images -q arvados/*)
+SKYDNS_CONTAINERS := $(shell $(DOCKER) ps -a |grep -e crosbymichael/skydns -e crosbymichael/skydock |cut -f 1 -d' ')
+SKYDNS_IMAGES := $(shell $(DOCKER) images -q crosbymichael/skyd*)
+
+# `make realclean' will also remove the Arvados docker images (but not the
+# arvados/debian image) and force subsequent makes to build the entire chain
+# from the ground up
+realclean: clean
+       @echo "make realclean"
+       +@[ "`$(DOCKER) ps -q`" = '' ] || $(DOCKER) stop `$(DOCKER) ps -q`
+       +@[ "$(REALCLEAN_CONTAINERS)" = '' ] || $(DOCKER) rm $(REALCLEAN_CONTAINERS)
+       +@[ "$(REALCLEAN_IMAGES)" = '' ] || $(DOCKER) rmi $(REALCLEAN_IMAGES)
+
+# `make deepclean' will remove all Arvados docker images and the skydns/skydock
+# images and force subsequent makes to build the entire chain from the ground up
+deepclean: clean
+       @echo "make deepclean"
+       -@rm -f debian-arvados-image 2>/dev/null
+       -@rm -f skydns-image skydock-image 2>/dev/null
+       +@[ "`$(DOCKER) ps -q`" = '' ] || $(DOCKER) stop `$(DOCKER) ps -q`
+       +@[ "$(REALCLEAN_CONTAINERS)" = '' ] || $(DOCKER) rm $(REALCLEAN_CONTAINERS)
+       +@[ "$(DEEPCLEAN_IMAGES)" = '' ] || $(DOCKER) rmi $(DEEPCLEAN_IMAGES)
+       +@[ "$(SKYDNS_CONTAINERS)" = '' ] || $(DOCKER) rm $(SKYDNS_CONTAINERS)
+       +@[ "$(SKYDNS_IMAGES)" = '' ] || $(DOCKER) rmi $(SKYDNS_IMAGES)
+
+# ============================================================
+# Dependencies for */generated files which are prerequisites
+# for building docker images.
+
+CONFIG_RB = build_tools/config.rb
+
+BUILD = build/.buildstamp
+
+BASE_DEPS = base/Dockerfile config.yml $(BASE_GENERATED)
+
+SLURM_DEPS = slurm/Dockerfile config.yml $(SLURM_GENERATED)
+
+JOBS_DEPS = jobs/Dockerfile
+
+JAVA_BWA_SAMTOOLS_DEPS = java-bwa-samtools/Dockerfile
+
+API_DEPS = api/* config.yml $(API_GENERATED)
+
+SHELL_DEPS = shell/* config.yml $(SHELL_GENERATED)
+
+COMPUTE_DEPS = compute/* config.yml $(COMPUTE_GENERATED)
+
+DOC_DEPS = doc/Dockerfile doc/apache2_vhost
+
+WORKBENCH_DEPS = workbench/Dockerfile \
+                 config.yml \
+                 $(WORKBENCH_GENERATED)
+
+KEEP_DEPS = keep/Dockerfile config.yml $(KEEP_GENERATED)
+
+SSO_DEPS = config.yml $(SSO_GENERATED)
+
+BCBIO_NEXTGEN_DEPS = bcbio-nextgen/Dockerfile
+
+BASE_GENERATED = base/generated/arvados.tar.gz
+
+COMPUTE_GENERATED_IN   = compute/*.in
+COMPUTE_GENERATED      = compute/generated/*
+
+KEEP_GENERATED_IN      = keep/*.in
+KEEP_GENERATED         = keep/generated/*
+
+API_GENERATED_IN       = api/*.in
+API_GENERATED          = api/generated/*
+
+SHELL_GENERATED_IN     = shell/*.in
+SHELL_GENERATED        = shell/generated/*
+
+SLURM_GENERATED_IN     = slurm/*.in
+SLURM_GENERATED        = slurm/generated/*
+
+WORKBENCH_GENERATED_IN = workbench/*.in
+WORKBENCH_GENERATED    = workbench/generated/*
+
+SSO_GENERATED_IN       = sso/*.in
+SSO_GENERATED          = sso/generated/*
+
+KEEP_DEPS += keep/generated/bin/keepproxy
+KEEP_DEPS += keep/generated/bin/keepstore
+keep/generated/bin/%: $(wildcard build/services/%/*.go)
+       mkdir -p keep/generated/src/git.curoverse.com
+       ln -sfn ../../../../.. keep/generated/src/git.curoverse.com/arvados.git
+       GOPATH=$(shell pwd)/keep/generated go get $(@:keep/generated/bin/%=git.curoverse.com/arvados.git/services/%)
+
+$(BUILD):
+       mkdir -p build
+       rsync -rlp --exclude=docker/ --exclude='**/log/*' --exclude='**/tmp/*' \
+               --chmod=Da+rx,Fa+rX ../ build/
+       find build/ -name \*.gem -delete
+       cd build/services/fuse/ && python setup.py build
+       cd build/sdk/python/ && python setup.py build
+       cd build/sdk/cli && gem build arvados-cli.gemspec
+       cd build/sdk/ruby && gem build arvados.gemspec
+       touch build/.buildstamp
+
+$(SLURM_GENERATED): $(BUILD)
+       $(CONFIG_RB) slurm
+       mkdir -p slurm/generated
+
+$(BASE_GENERATED): $(BUILD)
+       $(CONFIG_RB) base
+       mkdir -p base/generated
+       tar -czf base/generated/arvados.tar.gz -C build .
+
+$(API_GENERATED): $(API_GENERATED_IN)
+       $(CONFIG_RB) api
+
+$(SHELL_GENERATED): $(SHELL_GENERATED_IN)
+       $(CONFIG_RB) shell
+
+$(WORKBENCH_GENERATED): $(WORKBENCH_GENERATED_IN)
+       $(CONFIG_RB) workbench
+
+$(COMPUTE_GENERATED): $(COMPUTE_GENERATED_IN)
+       $(CONFIG_RB) compute
+
+$(SSO_GENERATED): $(SSO_GENERATED_IN)
+       $(CONFIG_RB) sso
+
+$(KEEP_GENERATED): $(KEEP_GENERATED_IN)
+       $(CONFIG_RB) keep
+
+DOCKER_BUILD = $(DOCKER) build --rm=true
+
+# ============================================================
+# The main Arvados servers: api, doc, workbench, compute
+
+api-image: passenger-image $(BUILD) $(API_DEPS)
+       @echo "Building api-image"
+       mkdir -p api/generated
+       tar -czf api/generated/api.tar.gz -C build/services api
+       $(DOCKER_BUILD) -t arvados/api api
+       date >api-image
+
+shell-image: base-image $(BUILD) $(SHELL_DEPS)
+       @echo "Building shell-image"
+       mkdir -p shell/generated
+       $(DOCKER_BUILD) -t arvados/shell shell
+       date >shell-image
+
+compute-image: slurm-image $(BUILD) $(COMPUTE_DEPS)
+       @echo "Building compute-image"
+       $(DOCKER_BUILD) -t arvados/compute compute
+       date >compute-image
+
+doc-image: base-image $(BUILD) $(DOC_DEPS)
+       @echo "Building doc-image"
+       mkdir -p doc/generated
+       tar -czf doc/generated/doc.tar.gz -C build doc
+       $(DOCKER_BUILD) -t arvados/doc doc
+       date >doc-image
+
+keep-image: debian-arvados-image $(BUILD) $(KEEP_DEPS)
+       @echo "Building keep-image"
+       $(DOCKER_BUILD) -t arvados/keep keep
+       date >keep-image
+
+jobs-image: base-image $(BUILD) $(JOBS_DEPS)
+       $(DOCKER_BUILD) -t arvados/jobs jobs
+       date >jobs-image
+
+java-bwa-samtools-image: jobs-image $(BUILD) $(JAVA_BWA_SAMTOOLS_DEPS)
+       $(DOCKER_BUILD) -t arvados/jobs-java-bwa-samtools java-bwa-samtools
+       date >java-bwa-samtools-image
+
+bcbio-nextgen-image: $(BUILD) $(BASE_GENERATED) $(BCBIO_NEXTGEN_DEPS)
+       rm -rf bcbio-nextgen/generated
+       cp -r base/generated bcbio-nextgen
+       $(DOCKER_BUILD) -t arvados/bcbio-nextgen bcbio-nextgen
+       date >bcbio-nextgen-image
+
+workbench-image: passenger-image $(BUILD) $(WORKBENCH_DEPS)
+       @echo "Building workbench-image"
+       mkdir -p workbench/generated
+       tar -czf workbench/generated/workbench.tar.gz -C build/apps workbench
+       $(DOCKER_BUILD) -t arvados/workbench workbench
+       date >workbench-image
+
+sso-image: passenger-image $(SSO_DEPS)
+       @echo "Building sso-image"
+       $(DOCKER_BUILD) -t arvados/sso sso
+       date >sso-image
+
+# ============================================================
+# The arvados/base image is the base Debian image plus packages
+# that are dependencies for every Arvados service.
+
+passenger-image: base-image
+       @echo "Building passenger-image"
+       $(DOCKER_BUILD) -t arvados/passenger passenger
+       date >passenger-image
+
+slurm-image: base-image $(SLURM_DEPS)
+       @echo "Building slurm-image"
+       $(DOCKER_BUILD) -t arvados/slurm slurm
+       date >slurm-image
+
+base-image: debian-arvados-image $(BASE_DEPS)
+       @echo "Building base-image"
+       $(DOCKER_BUILD) -t arvados/base base
+       date >base-image
+
+debian-arvados-image:
+       @echo "Building debian-arvados-image"
+       ./mkimage-debootstrap.sh arvados/debian wheezy ftp://ftp.us.debian.org/debian/
+       date >debian-arvados-image
+
+skydns-image:
+       @echo "Downloading skydns-image"
+       $(DOCKER) pull crosbymichael/skydns
+       date >skydns-image
+
+skydock-image:
+       @echo "Downloading skydock-image"
+       $(DOCKER) pull crosbymichael/skydock
+       date >skydock-image
diff --git a/docker/build_tools/build.rb b/docker/build_tools/build.rb
new file mode 100755 (executable)
index 0000000..e8f5809
--- /dev/null
@@ -0,0 +1,232 @@
+#! /usr/bin/env ruby
+
+require 'optparse'
+require 'tempfile'
+require 'yaml'
+
+def main options
+  if not ip_forwarding_enabled?
+    warn "NOTE: IP forwarding must be enabled in the kernel."
+    warn "Turning IP forwarding on now."
+    sudo %w(/sbin/sysctl net.ipv4.ip_forward=1)
+  end
+
+  # Check that:
+  #   * Docker is installed and can be found in the user's path
+  #   * Docker can be run as a non-root user
+  #      - TODO: put the user in the docker group if necessary
+  #      - TODO: mount cgroup automatically
+  #      - TODO: start the docker service if not started
+
+  docker_path = %x(which docker.io).chomp
+
+  if docker_path.empty?
+    docker_path = %x(which docker).chomp
+  end
+
+  if docker_path.empty?
+    warn "Docker not found."
+    warn ""
+    warn "Please make sure that Docker has been installed and"
+    warn "can be found in your PATH."
+    warn ""
+    warn "Installation instructions for a variety of platforms can be found at"
+    warn "http://docs.docker.io/en/latest/installation/"
+    exit 1
+  elsif not docker_ok? docker_path
+    warn "WARNING: docker could not be run."
+    warn "Please make sure that:"
+    warn "  * You have permission to read and write /var/run/docker.sock"
+    warn "  * a 'cgroup' volume is mounted on your machine"
+    warn "  * the docker daemon is running"
+    exit 2
+  end
+
+  # Check that debootstrap is installed.
+  if not debootstrap_ok?
+    warn "Installing debootstrap."
+    sudo '/usr/bin/apt-get', 'install', 'debootstrap'
+  end
+
+  # Generate a config.yml if it does not exist or is empty
+  if not File.size? 'config.yml'
+    print "Generating config.yml.\n"
+    print "Arvados needs to know the email address of the administrative user,\n"
+    print "so that when that user logs in they are automatically made an admin.\n"
+    print "This should be an email address associated with a Google account.\n"
+    print "\n"
+    admin_email_address = ""
+    until is_valid_email? admin_email_address
+      print "Enter your Google ID email address here: "
+      admin_email_address = gets.strip
+      if not is_valid_email? admin_email_address
+        print "That doesn't look like a valid email address. Please try again.\n"
+      end
+    end
+
+    print "Arvados needs to know the shell login name for the administrative user.\n"
+    print "This will also be used as the name for your git repository.\n"
+    print "\n"
+    user_name = ""
+    until is_valid_user_name? user_name
+      print "Enter a shell login name here: "
+      user_name = gets.strip
+      if not is_valid_user_name? user_name
+        print "That doesn't look like a valid shell login name. Please try again.\n"
+      end
+    end
+
+    File.open 'config.yml', 'w' do |config_out|
+      config_out.write "# If a _PW or _SECRET variable is set to an empty string, a password\n"
+      config_out.write "# will be chosen randomly at build time. This is the\n"
+      config_out.write "# recommended setting.\n\n"
+      config = YAML.load_file 'config.yml.example'
+      config['API_AUTO_ADMIN_USER'] = admin_email_address
+      config['ARVADOS_USER_NAME'] = user_name
+      config['API_HOSTNAME'] = generate_api_hostname
+      config['API_WORKBENCH_ADDRESS'] = 'false'
+      config.each_key do |var|
+        config_out.write "#{var}: #{config[var]}\n"
+      end
+    end
+  end
+
+  # If all prerequisites are met, go ahead and build.
+  if ip_forwarding_enabled? and
+      docker_ok? docker_path and
+      debootstrap_ok? and
+      File.exists? 'config.yml'
+    exit 0
+  else
+    exit 6
+  end
+end
+
+# sudo
+#   Execute the arg list 'cmd' under sudo.
+#   cmd can be passed either as a series of arguments or as a
+#   single argument consisting of a list, e.g.:
+#     sudo 'apt-get', 'update'
+#     sudo(['/usr/bin/gpasswd', '-a', ENV['USER'], 'docker'])
+#     sudo %w(/usr/bin/apt-get install lxc-docker)
+#
+def sudo(*cmd)
+  # user can pass a single list in as an argument
+  # to allow usage like: sudo %w(apt-get install foo)
+  warn "You may need to enter your password here."
+  if cmd.length == 1 and cmd[0].class == Array
+    cmd = cmd[0]
+  end
+  system '/usr/bin/sudo', *cmd
+end
+
+# is_valid_email?
+#   Returns true if its arg looks like a valid email address.
+#   This is a very very loose sanity check.
+#
+def is_valid_email? str
+  str.match /^\S+@\S+\.\S+$/
+end
+
+# is_valid_user_name?
+#   Returns true if its arg looks like a valid unix username.
+#   This is a very very loose sanity check.
+#
+def is_valid_user_name? str
+  # borrowed from Debian's adduser (version 3.110)
+  str.match /^[_.A-Za-z0-9][-\@_.A-Za-z0-9]*\$?$/
+end
+
+# generate_api_hostname
+#   Generates a 5-character randomly chosen API hostname.
+#
+def generate_api_hostname
+  rand(2**256).to_s(36)[0...5]
+end
+
+# ip_forwarding_enabled?
+#   Returns 'true' if IP forwarding is enabled in the kernel
+#
+def ip_forwarding_enabled?
+  %x(/sbin/sysctl -n net.ipv4.ip_forward) == "1\n"
+end
+
+# debootstrap_ok?
+#   Returns 'true' if debootstrap is installed and working.
+#
+def debootstrap_ok?
+  return system '/usr/sbin/debootstrap --version > /dev/null 2>&1'
+end
+
+# docker_ok?
+#   Returns 'true' if docker can be run as the current user.
+#
+def docker_ok?(docker_path)
+  return system "#{docker_path} images > /dev/null 2>&1"
+end
+
+# install_docker
+#   Determines which Docker package is suitable for this Linux distro
+#   and installs it, resolving any dependencies.
+#   NOTE: not in use yet.
+
+def install_docker
+  linux_distro = %x(lsb_release --id).split.last
+  linux_release = %x(lsb_release --release).split.last
+  linux_version = linux_distro + " " + linux_release
+  kernel_release = `uname -r`
+
+  case linux_distro
+  when 'Ubuntu'
+    if not linux_release.match '^1[234]\.'
+      warn "Arvados requires at least Ubuntu 12.04 (Precise Pangolin)."
+      warn "Your system is Ubuntu #{linux_release}."
+      exit 3
+    end
+    if linux_release.match '^12' and kernel_release.start_with? '3.2'
+      # Ubuntu Precise ships with a 3.2 kernel and must be upgraded.
+      warn "Your kernel #{kernel_release} must be upgraded to run Docker."
+      warn "To do this:"
+      warn "  sudo apt-get update"
+      warn "  sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring"
+      warn "  sudo reboot"
+      exit 4
+    else
+      # install AUFS
+      sudo 'apt-get', 'update'
+      sudo 'apt-get', 'install', "linux-image-extra-#{kernel_release}"
+    end
+
+    # add Docker repository
+    sudo %w(/usr/bin/apt-key adv
+              --keyserver keyserver.ubuntu.com
+              --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9)
+    source_file = Tempfile.new('arv')
+    source_file.write("deb http://get.docker.io/ubuntu docker main\n")
+    source_file.close
+    sudo '/bin/mv', source_file.path, '/etc/apt/sources.list.d/docker.list'
+    sudo %w(/usr/bin/apt-get update)
+    sudo %w(/usr/bin/apt-get install lxc-docker)
+
+    # Set up for non-root access
+    sudo %w(/usr/sbin/groupadd docker)
+    sudo '/usr/bin/gpasswd', '-a', ENV['USER'], 'docker'
+    sudo %w(/usr/sbin/service docker restart)
+  when 'Debian'
+  else
+    warn "Must be running a Debian or Ubuntu release in order to run Docker."
+    exit 5
+  end
+end
+
+
+if __FILE__ == $PROGRAM_NAME
+  options = { :makefile => File.join(File.dirname(__FILE__), 'Makefile') }
+  OptionParser.new do |opts|
+    opts.on('-m', '--makefile MAKEFILE-PATH',
+            'Path to the Makefile used to build Arvados Docker images') do |mk|
+      options[:makefile] = mk
+    end
+  end
+  main options
+end
diff --git a/docker/build_tools/config.rb b/docker/build_tools/config.rb
new file mode 100755 (executable)
index 0000000..296bc20
--- /dev/null
@@ -0,0 +1,74 @@
+#! /usr/bin/env ruby
+
+require 'yaml'
+require 'fileutils'
+require 'digest'
+
+abort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'
+
+# Initialize config settings from config.yml
+config = YAML.load_file('config.yml')
+
+# ============================================================
+# Add dynamically chosen config settings. These settings should
+# be suitable for any installation.
+
+# Any _PW/_SECRET config settings represent passwords/secrets. If they
+# are blank, choose a password. Make sure the generated password
+# doesn't change if config.yml doesn't change. Otherwise, keys won't
+# match any more if (say) keep's files get regenerated but apiserver's
+# don't.
+config.sort.map do |var,val|
+  if (var.end_with?('_PW') || var.end_with?('_SECRET')) && (config[var].nil? || config[var].empty?)
+    config[var] = Digest::SHA1.hexdigest(`hostname` + var + config.to_yaml)
+  end
+end
+
+# ============================================================
+# For each *.in file in the docker directories, substitute any
+# @@variables@@ found in the file with the appropriate config
+# variable. Support up to 10 levels of nesting.
+#
+# TODO(twp): add the *.in files directory to the source tree, and
+# when expanding them, add them to the "generated" directory with
+# the same tree structure as in the original source. Then all
+# the files can be added to the docker container with a single ADD.
+
+if ARGV[0] and ARGV[0].length > 0
+  globdir = ARGV[0]
+else
+  globdir = '*'
+end
+
+FileUtils.rm_r Dir.glob(globdir + '/generated/*')
+
+File.umask(022)
+Dir.glob(globdir + '/*.in') do |template_file|
+  generated_dir = File.join(File.dirname(template_file), 'generated')
+  Dir.mkdir(generated_dir) unless Dir.exists? generated_dir
+  output_path = File.join(generated_dir, File.basename(template_file, '.in'))
+  output_mode = (File.stat(template_file).mode & 0100) ? 0755 : 0644
+  File.open(output_path, "w", output_mode) do |output|
+    File.open(template_file) do |input|
+      input.each_line do |line|
+
+        # This count is used to short-circuit potential
+        # infinite loops of variable substitution.
+        @count = 0
+        while @count < 10
+          @out = line.gsub!(/@@(.*?)@@/) do |var|
+            if config.key?(Regexp.last_match[1])
+              config[Regexp.last_match[1]]
+            else
+              var.gsub!(/@@/, '@_NOT_FOUND_@')
+            end
+          end
+          break if @out.nil?
+          @count += 1
+        end
+
+        output.write(line)
+      end
+    end
+  end
+end
diff --git a/docker/compute/Dockerfile b/docker/compute/Dockerfile
new file mode 100644 (file)
index 0000000..462115c
--- /dev/null
@@ -0,0 +1,26 @@
+# Arvados compute node Docker container.
+
+FROM arvados/slurm
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+RUN apt-get update -qq
+RUN apt-get install -qqy supervisor python-pip python-pyvcf python-gflags python-google-api-python-client python-virtualenv libattr1-dev libfuse-dev python-dev python-llfuse fuse crunchstat python-arvados-fuse cron dnsmasq
+
+ADD fuse.conf /etc/fuse.conf
+RUN chmod 644 /etc/fuse.conf
+
+RUN /usr/local/rvm/bin/rvm-exec default gem install arvados-cli arvados
+
+# Install Docker from the Arvados package repository (cf. arvados/base)
+RUN apt-get install -qqy iptables ca-certificates lxc apt-transport-https docker.io
+
+RUN addgroup --gid 4005 crunch && mkdir /home/crunch && useradd --uid 4005 --gid 4005 crunch && usermod crunch -G fuse,docker && chown crunch:crunch /home/crunch
+
+# Supervisor.
+ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
+ADD generated/setup.sh /usr/local/bin/setup.sh
+ADD wrapdocker /usr/local/bin/wrapdocker.sh
+
+VOLUME /var/lib/docker
+# Start the supervisor.
+CMD ["/usr/bin/supervisord", "-n"]
diff --git a/docker/compute/fuse.conf b/docker/compute/fuse.conf
new file mode 100644 (file)
index 0000000..4ed21ba
--- /dev/null
@@ -0,0 +1,10 @@
+# Set the maximum number of FUSE mounts allowed to non-root users.
+# The default is 1000.
+#
+#mount_max = 1000
+
+# Allow non-root users to specify the 'allow_other' or 'allow_root'
+# mount options.
+#
+user_allow_other
+
diff --git a/docker/compute/setup.sh.in b/docker/compute/setup.sh.in
new file mode 100755 (executable)
index 0000000..e107d80
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. /etc/profile.d/rvm.sh
+
+export ARVADOS_API_HOST=api
+export ARVADOS_API_HOST_INSECURE=yes
+export ARVADOS_API_TOKEN=@@API_SUPERUSER_SECRET@@
+
+arv node create --node {} > /tmp/node.json
+
+UUID=`grep \"uuid\" /tmp//node.json  |cut -f4 -d\"`
+PING_SECRET=`grep \"ping_secret\" /tmp//node.json  |cut -f4 -d\"`
+
+echo "*/5 * * * * root /usr/bin/curl -k -d ping_secret=$PING_SECRET https://api/arvados/v1/nodes/$UUID/ping" > /etc/cron.d/node_ping
+
+# Send a ping now
+/usr/bin/curl -k -d ping_secret=$PING_SECRET https://api/arvados/v1/nodes/$UUID/ping?ping_secret=$PING_SECRET
+
+# Just make sure /dev/fuse permissions are correct (the device appears after fuse is loaded)
+chmod 1660 /dev/fuse && chgrp fuse /dev/fuse
diff --git a/docker/compute/supervisor.conf b/docker/compute/supervisor.conf
new file mode 100644 (file)
index 0000000..615e55a
--- /dev/null
@@ -0,0 +1,29 @@
+[program:munge]
+user=root
+command=/etc/init.d/munge start
+startsecs=0
+
+[program:slurm]
+user=root
+command=/etc/init.d/slurm-llnl start
+startsecs=0
+
+[program:cron]
+user=root
+command=/etc/init.d/cron start
+startsecs=0
+
+[program:setup]
+user=root
+command=/usr/local/bin/setup.sh
+startsecs=0
+
+[program:docker]
+user=root
+command=/usr/local/bin/wrapdocker.sh
+
+[program:dnsmasq]
+user=root
+command=/etc/init.d/dnsmasq start
+startsecs=0
+
diff --git a/docker/compute/wrapdocker b/docker/compute/wrapdocker
new file mode 100755 (executable)
index 0000000..cee1302
--- /dev/null
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+# Borrowed from https://github.com/jpetazzo/dind under Apache2
+# and slightly modified.
+
+# First, make sure that cgroups are mounted correctly.
+CGROUP=/sys/fs/cgroup
+: {LOG:=stdio}
+
+[ -d $CGROUP ] ||
+       mkdir $CGROUP
+
+mountpoint -q $CGROUP ||
+       mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || {
+               echo "Could not make a tmpfs mount. Did you use -privileged?"
+               exit 1
+       }
+
+if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security
+then
+    mount -t securityfs none /sys/kernel/security || {
+        echo "Could not mount /sys/kernel/security."
+        echo "AppArmor detection and -privileged mode might break."
+    }
+fi
+
+# Mount the cgroup hierarchies exactly as they are in the parent system.
+for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
+do
+        [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
+        mountpoint -q $CGROUP/$SUBSYS ||
+                mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
+
+        # The two following sections address a bug which manifests itself
+        # by a cryptic "lxc-start: no ns_cgroup option specified" when
+        # trying to start containers withina container.
+        # The bug seems to appear when the cgroup hierarchies are not
+        # mounted on the exact same directories in the host, and in the
+        # container.
+
+        # Named, control-less cgroups are mounted with "-o name=foo"
+        # (and appear as such under /proc/<pid>/cgroup) but are usually
+        # mounted on a directory named "foo" (without the "name=" prefix).
+        # Systemd and OpenRC (and possibly others) both create such a
+        # cgroup. To avoid the aforementioned bug, we symlink "foo" to
+        # "name=foo". This shouldn't have any adverse effect.
+        echo $SUBSYS | grep -q ^name= && {
+                NAME=$(echo $SUBSYS | sed s/^name=//)
+                ln -s $SUBSYS $CGROUP/$NAME
+        }
+
+        # Likewise, on at least one system, it has been reported that
+        # systemd would mount the CPU and CPU accounting controllers
+        # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
+        # but on a directory called "cpu,cpuacct" (note the inversion
+        # in the order of the groups). This tries to work around it.
+        [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
+done
+
+# Note: as I write those lines, the LXC userland tools cannot setup
+# a "sub-container" properly if the "devices" cgroup is not in its
+# own hierarchy. Let's detect this and issue a warning.
+grep -q :devices: /proc/1/cgroup ||
+       echo "WARNING: the 'devices' cgroup should be in its own hierarchy."
+grep -qw devices /proc/1/cgroup ||
+       echo "WARNING: it looks like the 'devices' cgroup is not mounted."
+
+# Now, close extraneous file descriptors.
+pushd /proc/self/fd >/dev/null
+for FD in *
+do
+       case "$FD" in
+       # Keep stdin/stdout/stderr
+       [012])
+               ;;
+       # Nuke everything else
+       *)
+               eval exec "$FD>&-"
+               ;;
+       esac
+done
+popd >/dev/null
+
+
+# If a pidfile is still around (for example after a container restart),
+# delete it so that docker can start.
+rm -rf /var/run/docker.pid
+
+exec docker -d
+
diff --git a/docker/config.yml.example b/docker/config.yml.example
new file mode 100644 (file)
index 0000000..4210ec3
--- /dev/null
@@ -0,0 +1,99 @@
+# Configuration for the Rails databases (database names,
+# usernames and passwords).
+
+# Username for your Arvados user. This will be used as your shell login name
+# as well as the name for your git repository.
+ARVADOS_USER_NAME:
+
+# ARVADOS_DOMAIN: the Internet domain of this installation.
+# ARVADOS_DNS_SERVER: the authoritative nameserver for ARVADOS_DOMAIN.
+ARVADOS_DOMAIN:         # e.g. arvados.internal
+ARVADOS_DNS_SERVER:     # e.g. 192.168.0.1
+
+# ==============================
+# API server settings
+# ==============================
+
+# The API server hostname. Must be a 5-character
+# string unique within this installation. This string
+# will also be used as config.uuid_prefix.
+API_HOSTNAME:           # e.g. qr1hi
+
+# The e-mail address of the user you would like to become marked as an admin
+# user on their first login.
+# In the default configuration, authentication happens through the Arvados SSO
+# server, which uses openid against Google's servers, so in that case this
+# should be an address associated with a Google account.
+API_AUTO_ADMIN_USER:
+
+# The location of the Workbench application where users should be
+# redirected if they point their browsers at the API server, e.g.,
+# https://localhost:9899
+API_WORKBENCH_ADDRESS:
+
+# If a _PW variable is set to an empty string, a password
+# will be chosen randomly at build time. This is the
+# recommended setting.
+ARVADOS_DEV_DB: arvados_development
+ARVADOS_DEV_USER: arvados_dev
+ARVADOS_DEV_PW:
+ARVADOS_TEST_DB: arvados_test
+ARVADOS_TEST_USER: arvados_test
+ARVADOS_TEST_PW:
+ARVADOS_PROD_DB: arvados_production
+ARVADOS_PROD_USER: arvados_prod
+ARVADOS_PROD_PW:
+
+# If a _SECRET variable is set to an empty string, a password
+# will be chosen randomly at build time. This is the
+# recommended setting.
+
+# The signing key shared by Keep at the API server to verify
+# blob permission signatures.
+KEEP_SIGNING_SECRET:
+
+# The value for the Rails config.secret_token setting.
+API_SECRET:
+
+# A "superuser" token with which servers can authenticate to
+# the API server, before an administrative user has been created.
+# Leave this blank to generate a secret randomly at build time (recommended).
+API_SUPERUSER_SECRET:
+
+# More than anything this should be auto-generated, but
+# we don't presently have a good place to store it. So just
+# change it and don't be dumb.
+POSTGRES_ROOT_PW: dummy_pw
+
+# The URL of the SSO server that you want your API server to use. If
+# blank, use the sso docker container.
+OMNIAUTH_URL:
+
+# ==============================
+# Workbench settings
+# ==============================
+WORKBENCH_RAILS_MODE: production
+WORKBENCH_DATA_IMPORT_DIR: /data/arvados-workbench-upload/data
+WORKBENCH_DATA_EXPORT_DIR: /data/arvados-workbench-download/data
+WORKBENCH_VCF_PIPELINE_UUID:
+WORKBENCH_SITE_NAME: Arvados Workbench
+WORKBENCH_INSECURE_HTTPS: true
+WORKBENCH_ACTIVATION_CONTACT_LINK: mailto:arvados@curoverse.com
+WORKBENCH_ARVADOS_LOGIN_BASE: https://@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@/login
+WORKBENCH_ARVADOS_V1_BASE: https://@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@/arvados/v1
+WORKBENCH_SECRET:
+
+# ==============================
+# SSO settings
+# ==============================
+SSO_HOSTNAME: sso
+SSO_SECRET:
+SSO_CLIENT_NAME: devsandbox
+# ==============================
+# Default to using auth.curoverse.com as SSO server
+# To use your a local Docker SSO server, set OMNIAUTH_URL and SSO_CLIENT_SECRET
+# to the empty string
+# ==============================
+OMNIAUTH_URL: https://auth.curoverse.com
+SSO_CLIENT_APP_ID: local_docker_installation
+SSO_CLIENT_SECRET: yohbai4eecohshoo1Yoot7tea9zoca9Eiz3Tajahweo9eePaeshaegh9meiye2ph
diff --git a/docker/doc/Dockerfile b/docker/doc/Dockerfile
new file mode 100644 (file)
index 0000000..aa51a38
--- /dev/null
@@ -0,0 +1,28 @@
+# Arvados Documentation Docker container.
+
+FROM arvados/base
+maintainer Ward Vandewege <ward@curoverse.com>
+
+# Install packages
+RUN /bin/mkdir -p /usr/src/arvados && \
+    apt-get update -qq && \
+    apt-get install -qqy curl procps apache2-mpm-worker
+
+ADD generated/doc.tar.gz /usr/src/arvados/
+
+# Build static site
+RUN /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/doc/Gemfile && \
+    /bin/sed -ri 's/^baseurl: .*$/baseurl: /' /usr/src/arvados/doc/_config.yml && \
+    cd /usr/src/arvados/doc && \
+    LANG="en_US.UTF-8" LC_ALL="en_US.UTF-8" /usr/local/rvm/bin/rvm-exec default bundle exec rake
+
+# Configure Apache
+ADD apache2_vhost /etc/apache2/sites-available/doc
+RUN \
+  a2dissite default && \
+  a2ensite doc
+
+ADD apache2_foreground.sh /etc/apache2/foreground.sh
+
+# Start Apache
+CMD ["/etc/apache2/foreground.sh"]
diff --git a/docker/doc/apache2_foreground.sh b/docker/doc/apache2_foreground.sh
new file mode 100755 (executable)
index 0000000..fc6028e
--- /dev/null
@@ -0,0 +1,7 @@
+#! /bin/bash
+
+read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
+trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
+
+source /etc/apache2/envvars
+/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/doc/apache2_vhost b/docker/doc/apache2_vhost
new file mode 100644 (file)
index 0000000..3a07776
--- /dev/null
@@ -0,0 +1,12 @@
+
+ServerName doc.arvados.org
+
+<VirtualHost *:80>
+  ServerAdmin sysadmin@curoverse.com
+
+  ServerName doc.arvados.org
+
+  DocumentRoot /usr/src/arvados/doc/.site/
+
+</VirtualHost>
+
diff --git a/docker/install_sdk.sh b/docker/install_sdk.sh
new file mode 100755 (executable)
index 0000000..1c07c9d
--- /dev/null
@@ -0,0 +1,13 @@
+#! /bin/sh
+
+# Install prerequisites.
+sudo apt-get install curl libcurl3 libcurl3-gnutls libcurl4-openssl-dev python-pip
+
+# Install RVM.
+curl -sSL https://get.rvm.io | bash -s stable
+source ~/.rvm/scripts/rvm
+rvm install 2.1.0
+
+# Install arvados-cli.
+gem install arvados-cli
+sudo pip install --upgrade httplib2
diff --git a/docker/java-bwa-samtools/Dockerfile b/docker/java-bwa-samtools/Dockerfile
new file mode 100644 (file)
index 0000000..713ef21
--- /dev/null
@@ -0,0 +1,23 @@
+FROM arvados/jobs
+MAINTAINER Peter Amstutz <peter.amstutz@curoverse.com>
+
+USER root
+
+RUN apt-get update -qq
+RUN apt-get install -qqy openjdk-7-jre-headless && \
+    cd /tmp && \
+    curl --location http://cache.arvados.org/sourceforge.net/project/bio-bwa/bwa-0.7.9a.tar.bz2 -o bwa-0.7.9a.tar.bz2 && \
+    tar xjf bwa-0.7.9a.tar.bz2 && \
+    cd bwa-0.7.9a && \
+    make && \
+    (find . -executable -type f -print0 | xargs -0 -I {} mv {} /usr/local/bin) && \
+    rm -r /tmp/bwa-0.7.9a* && \
+    cd /tmp && \
+    curl --location http://cache.arvados.org/sourceforge.net/project/samtools/samtools/0.1.19/samtools-0.1.19.tar.bz2 -o samtools-0.1.19.tar.bz2 && \
+    tar xjf samtools-0.1.19.tar.bz2 && \
+    cd samtools-0.1.19 && \
+    make && \
+    (find . -executable -type f -print0 | xargs -0 -I {} mv {} /usr/local/bin) && \
+    rm -r /tmp/samtools-0.1.19*
+
+USER crunch
diff --git a/docker/jobs/Dockerfile b/docker/jobs/Dockerfile
new file mode 100644 (file)
index 0000000..313dd36
--- /dev/null
@@ -0,0 +1,20 @@
+FROM arvados/base
+MAINTAINER Brett Smith <brett@curoverse.com>
+
+# Install dependencies and set up system.
+# The FUSE packages help ensure that we can install the Python SDK (arv-mount).
+RUN /usr/bin/apt-get install -q -y \
+      python-dev python-llfuse python-pip python-virtualenv \
+      libio-socket-ssl-perl libjson-perl liburi-perl libwww-perl dtrx \
+      fuse libattr1-dev libfuse-dev && \
+    /usr/sbin/adduser --disabled-password \
+      --gecos 'Crunch execution user' crunch && \
+    /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job && \
+    /bin/ln -s /usr/src/arvados /usr/local/src/arvados
+
+# Install Arvados packages.
+RUN (find /usr/src/arvados/sdk -name '*.gem' -print0 | \
+      xargs -0rn 1 /usr/local/rvm/bin/rvm-exec default gem install) && \
+     apt-get -qqy install python-arvados-fuse
+
+USER crunch
diff --git a/docker/keep/Dockerfile b/docker/keep/Dockerfile
new file mode 100644 (file)
index 0000000..cd40a72
--- /dev/null
@@ -0,0 +1,12 @@
+# Based on Debian Wheezy
+FROM arvados/debian:wheezy
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+ADD generated/bin/keepstore /usr/local/bin/
+ADD generated/bin/keepproxy /usr/local/bin/
+ADD generated/run-keep /usr/local/bin/
+
+ADD generated/keep_signing_secret /etc/
+
+# Start keep
+CMD ["/usr/local/bin/run-keep"]
diff --git a/docker/keep/keep_signing_secret.in b/docker/keep/keep_signing_secret.in
new file mode 100644 (file)
index 0000000..e5b39c8
--- /dev/null
@@ -0,0 +1 @@
+@@KEEP_SIGNING_SECRET@@
\ No newline at end of file
diff --git a/docker/keep/run-keep.in b/docker/keep/run-keep.in
new file mode 100755 (executable)
index 0000000..a0b4cb0
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+pkf="/etc/keep_signing_secret"
+if [ -s "$pkf" ]
+then
+    permission_args="-permission-key-file=$pkf -enforce-permissions"
+else
+    permission_args=""
+fi
+
+exec keepstore $permission_args -listen=":25107" -volumes="/keep-data"
diff --git a/docker/mkimage-debootstrap.sh b/docker/mkimage-debootstrap.sh
new file mode 100755 (executable)
index 0000000..b4010ef
--- /dev/null
@@ -0,0 +1,239 @@
+#!/bin/bash
+set -e
+
+variant='minbase'
+include='iproute,iputils-ping'
+arch='amd64' # intentionally undocumented for now
+skipDetection=
+strictDebootstrap=
+justTar=
+
+usage() {
+       echo >&2
+       
+       echo >&2 "usage: $0 [options] repo suite [mirror]"
+       
+       echo >&2
+       echo >&2 'options: (not recommended)'
+       echo >&2 "  -p set an http_proxy for debootstrap"
+       echo >&2 "  -v $variant # change default debootstrap variant"
+       echo >&2 "  -i $include # change default package includes"
+       echo >&2 "  -d # strict debootstrap (do not apply any docker-specific tweaks)"
+       echo >&2 "  -s # skip version detection and tagging (ie, precise also tagged as 12.04)"
+       echo >&2 "     # note that this will also skip adding universe and/or security/updates to sources.list"
+       echo >&2 "  -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)"
+       
+       echo >&2
+       echo >&2 "   ie: $0 username/debian squeeze"
+       echo >&2 "       $0 username/debian squeeze http://ftp.uk.debian.org/debian/"
+       
+       echo >&2
+       echo >&2 "   ie: $0 username/ubuntu precise"
+       echo >&2 "       $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/"
+       
+       echo >&2
+       echo >&2 "   ie: $0 -t precise.tar.bz2 precise"
+       echo >&2 "       $0 -t wheezy.tgz wheezy"
+       echo >&2 "       $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/"
+       
+       echo >&2
+}
+
+# these should match the names found at http://www.debian.org/releases/
+debianStable=wheezy
+debianUnstable=sid
+# this should match the name found at http://releases.ubuntu.com/
+ubuntuLatestLTS=precise
+
+while getopts v:i:a:p:dst name; do
+       case "$name" in
+               p)
+                       http_proxy="$OPTARG"
+                       ;;
+               v)
+                       variant="$OPTARG"
+                       ;;
+               i)
+                       include="$OPTARG"
+                       ;;
+               a)
+                       arch="$OPTARG"
+                       ;;
+               d)
+                       strictDebootstrap=1
+                       ;;
+               s)
+                       skipDetection=1
+                       ;;
+               t)
+                       justTar=1
+                       ;;
+               ?)
+                       usage
+                       exit 0
+                       ;;
+       esac
+done
+shift $(($OPTIND - 1))
+
+repo="$1"
+suite="$2"
+mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided
+
+if [ ! "$repo" ] || [ ! "$suite" ]; then
+       usage
+       exit 1
+fi
+
+# some rudimentary detection for whether we need to "sudo" our docker calls
+set +e
+docker=`which docker.io`
+if [[ "$docker" == "" ]]; then
+       docker=`which docker`
+fi
+set -e
+
+if $docker version > /dev/null 2>&1; then
+       docker="$docker"
+elif sudo $docker version > /dev/null 2>&1; then
+       docker="sudo $docker"
+elif command -v $docker > /dev/null 2>&1; then
+       docker="$docker"
+else
+       echo >&2 "warning: either docker isn't installed, or your current user cannot run it;"
+       echo >&2 "         this script is not likely to work as expected"
+       sleep 3
+       docker='docker' # give us a command-not-found later
+fi
+
+# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory
+if [ "$justTar" ]; then
+       if [ ! -d "$(dirname "$repo")" ]; then
+               echo >&2 "error: $(dirname "$repo") does not exist"
+               exit 1
+       fi
+       repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")"
+fi
+
+# will be filled in later, if [ -z "$skipDetection" ]
+lsbDist=''
+
+target="/tmp/docker-rootfs-debootstrap-$suite-$$-$RANDOM"
+
+cd "$(dirname "$(readlink -f "$BASH_SOURCE")")"
+returnTo="$(pwd -P)"
+
+set -x
+
+# bootstrap
+mkdir -p "$target"
+sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror"
+
+cd "$target"
+
+if [ -z "$strictDebootstrap" ]; then
+       # prevent init scripts from running during install/update
+       #  policy-rc.d (for most scripts)
+       echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null
+       sudo chmod +x usr/sbin/policy-rc.d
+       #  initctl (for some pesky upstart scripts)
+       sudo chroot . dpkg-divert --local --rename --add /sbin/initctl
+       sudo ln -sf /bin/true sbin/initctl
+       # see https://github.com/dotcloud/docker/issues/446#issuecomment-16953173
+       
+       # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB)
+       sudo chroot . apt-get clean
+       
+       # while we're at it, apt is unnecessarily slow inside containers
+       #  this forces dpkg not to call sync() after package extraction and speeds up install
+       #    the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization
+       echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null
+       #  we want to effectively run "apt-get clean" after every install to keep images small
+       echo 'DPkg::Post-Invoke {"/bin/rm -f /var/cache/apt/archives/*.deb || true";};' | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null
+       
+       # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them):
+       #  rm /usr/sbin/policy-rc.d
+       #  rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl
+       #  rm /etc/dpkg/dpkg.cfg.d/02apt-speedup
+       #  rm /etc/apt/apt.conf.d/no-cache
+       
+       if [ -z "$skipDetection" ]; then
+               # see also rudimentary platform detection in hack/install.sh
+               lsbDist=''
+               if [ -r etc/lsb-release ]; then
+                       lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")"
+               fi
+               if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then
+                       lsbDist='Debian'
+               fi
+               
+               case "$lsbDist" in
+                       Debian)
+                               # add the updates and security repositories
+                               if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then
+                                       # ${suite}-updates only applies to non-unstable
+                                       sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list
+                                       
+                                       # same for security updates
+                                       echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null
+                               fi
+                               ;;
+                       Ubuntu)
+                               # add the universe, updates, and security repositories
+                               sudo sed -i "
+                                       s/ $suite main$/ $suite main universe/; p;
+                                       s/ $suite main/ ${suite}-updates main/; p;
+                                       s/ $suite-updates main/ ${suite}-security main/
+                               " etc/apt/sources.list
+                               ;;
+               esac
+       fi
+fi
+
+if [ "$justTar" ]; then
+       # create the tarball file so it has the right permissions (ie, not root)
+       touch "$repo"
+       
+       # fill the tarball
+       sudo tar --numeric-owner -caf "$repo" .
+else
+       # create the image (and tag $repo:$suite)
+       sudo tar --numeric-owner -c . | $docker import - $repo:$suite
+       
+       # test the image
+       $docker run -i -t $repo:$suite echo success
+       
+       if [ -z "$skipDetection" ]; then
+               case "$lsbDist" in
+                       Debian)
+                               if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then
+                                       # tag latest
+                                       $docker tag $repo:$suite $repo:latest
+                                       
+                                       if [ -r etc/debian_version ]; then
+                                               # tag the specific debian release version (which is only reasonable to tag on debian stable)
+                                               ver=$(cat etc/debian_version)
+                                               $docker tag $repo:$suite $repo:$ver
+                                       fi
+                               fi
+                               ;;
+                       Ubuntu)
+                               if [ "$suite" = "$ubuntuLatestLTS" ]; then
+                                       # tag latest
+                                       $docker tag $repo:$suite $repo:latest
+                               fi
+                               if [ -r etc/lsb-release ]; then
+                                       lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")"
+                                       if [ "$lsbRelease" ]; then
+                                               # tag specific Ubuntu version number, if available (12.04, etc.)
+                                               $docker tag $repo:$suite $repo:$lsbRelease
+                                       fi
+                               fi
+                               ;;
+               esac
+       fi
+fi
+
+# cleanup
+cd "$returnTo"
+sudo rm -rf "$target"
diff --git a/docker/passenger/Dockerfile b/docker/passenger/Dockerfile
new file mode 100644 (file)
index 0000000..5e0fd76
--- /dev/null
@@ -0,0 +1,19 @@
+# Arvados passenger image
+
+FROM arvados/base
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+# Install packages and build the passenger apache module
+
+RUN apt-get update -qq
+RUN apt-get install -qqy \
+        apt-utils git curl procps apache2-mpm-worker \
+        libcurl4-openssl-dev apache2-threaded-dev \
+        libapr1-dev libaprutil1-dev
+
+RUN cd /usr/src/arvados/services/api && \
+    /usr/local/rvm/bin/rvm-exec default bundle exec passenger-install-apache2-module --auto --languages ruby
+
+RUN cd /usr/src/arvados/services/api && \
+    /usr/local/rvm/bin/rvm-exec default bundle exec passenger-install-apache2-module --snippet > /etc/apache2/conf.d/passenger
+
diff --git a/docker/postgresql/Dockerfile b/docker/postgresql/Dockerfile
new file mode 100644 (file)
index 0000000..b6fed4e
--- /dev/null
@@ -0,0 +1,35 @@
+# PostgreSQL Docker container for Arvados.
+
+FROM arvados/debian:wheezy
+MAINTAINER Tim Pierce <twp@curoverse.com>
+
+# TODO(twp): parameterize variables via autoconf or similar.
+ENV POSTGRES_ROOT_PW   dummy_pw
+
+ENV ARVADOS_DEV_DB     arvados_development
+ENV ARVADOS_DEV_USER   arvados
+ENV ARVADOS_DEV_PW     dummy_pw
+
+ENV ARVADOS_TEST_DB    arvados_test
+ENV ARVADOS_TEST_USER  arvados
+ENV ARVADOS_TEST_PW    dummy_pw
+
+ENV ARVADOS_PROD_DB    arvados_production
+ENV ARVADOS_PROD_USER  arvados
+ENV ARVADOS_PROD_PW    dummy_pw
+
+# Install postgres and apache
+RUN apt-get -q -y install procps postgresql postgresql-server-dev-9.1
+
+# Configure databases and users.
+ADD postgresql.conf /etc/postgresql/9.1/main/
+ADD pg_hba.conf     /etc/postgresql/9.1/main/
+
+ADD postgresql_config.sh /tmp/postgresql_config.sh
+RUN /tmp/postgresql_config.sh
+RUN rm /tmp/postgresql_config.sh
+
+# Accept database connections on port 5432 from outside the container.
+EXPOSE 5432
+
+CMD ["/bin/su", "postgres", "-c", "/usr/lib/postgresql/9.1/bin/postgres -D /var/lib/postgresql/9.1/main -c config_file=/etc/postgresql/9.1/main/postgresql.conf"]
diff --git a/docker/postgresql/pg_hba.conf b/docker/postgresql/pg_hba.conf
new file mode 100644 (file)
index 0000000..c5486ad
--- /dev/null
@@ -0,0 +1,15 @@
+# For full documentation see
+# http://www.postgresql.org/docs/9.1/static/auth-pg-hba-conf.html
+
+# Database administrative login by Unix domain socket
+local   all             postgres                                peer
+
+# TYPE  DATABASE        USER            ADDRESS                 METHOD
+host    all             all             0.0.0.0/0               md5
+
+# "local" is for Unix domain socket connections only
+local   all             all                                     peer
+# IPv4 local connections:
+host    all             all             127.0.0.1/32            md5
+# IPv6 local connections:
+host    all             all             ::1/128                 md5
diff --git a/docker/postgresql/postgresql.conf b/docker/postgresql/postgresql.conf
new file mode 100644 (file)
index 0000000..3da7c1b
--- /dev/null
@@ -0,0 +1,20 @@
+# For full documentation on run-time settings see
+# http://www.postgresql.org/docs/9.2/static/runtime-config.html
+
+listen_addresses = '*'
+data_directory = '/var/lib/postgresql/9.1/main'                # use data in another directory
+hba_file = '/etc/postgresql/9.1/main/pg_hba.conf'      # host-based authentication file
+ident_file = '/etc/postgresql/9.1/main/pg_ident.conf'  # ident configuration file
+external_pid_file = '/var/run/postgresql/9.1-main.pid'         # write an extra PID file
+port = 5432                            # (change requires restart)
+max_connections = 100                  # (change requires restart)
+unix_socket_directory = '/var/run/postgresql'          # (change requires restart)
+ssl = true                             # (change requires restart)
+shared_buffers = 24MB                  # min 128kB
+log_line_prefix = '%t '                        # special values:
+datestyle = 'iso, mdy'
+lc_messages = 'C'                      # locale for system error message
+lc_monetary = 'C'                      # locale for monetary formatting
+lc_numeric = 'C'                       # locale for number formatting
+lc_time = 'C'                          # locale for time formatting
+default_text_search_config = 'pg_catalog.english'
diff --git a/docker/shell/Dockerfile b/docker/shell/Dockerfile
new file mode 100644 (file)
index 0000000..8235159
--- /dev/null
@@ -0,0 +1,24 @@
+# Slurm node Docker container.
+
+FROM arvados/base
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+RUN apt-get update -qq
+RUN apt-get install -qqy \
+    python-pip python-pyvcf python-gflags python-google-api-python-client \
+    python-virtualenv libattr1-dev libfuse-dev python-dev python-llfuse fuse \
+    crunchstat python-arvados-fuse cron vim supervisor openssh-server
+
+ADD fuse.conf /etc/fuse.conf
+RUN chmod 644 /etc/fuse.conf
+
+ADD generated/superuser_token /tmp/superuser_token
+
+RUN /usr/local/rvm/bin/rvm-exec default gem install arvados-cli arvados
+
+# Supervisor.
+ADD supervisor.conf /etc/supervisor/conf.d/arvados.conf
+ADD generated/setup.sh /usr/local/bin/setup.sh
+
+# Start the supervisor.
+CMD ["/usr/bin/supervisord", "-n"]
diff --git a/docker/shell/fuse.conf b/docker/shell/fuse.conf
new file mode 100644 (file)
index 0000000..4ed21ba
--- /dev/null
@@ -0,0 +1,10 @@
+# Set the maximum number of FUSE mounts allowed to non-root users.
+# The default is 1000.
+#
+#mount_max = 1000
+
+# Allow non-root users to specify the 'allow_other' or 'allow_root'
+# mount options.
+#
+user_allow_other
+
diff --git a/docker/shell/setup.sh.in b/docker/shell/setup.sh.in
new file mode 100755 (executable)
index 0000000..03beb4b
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+USER_NAME="@@ARVADOS_USER_NAME@@"
+
+useradd $USER_NAME -s /bin/bash
+mkdir /home/$USER_NAME/.ssh -p
+
+# Install our token
+mkdir -p /home/$USER_NAME/.config/arvados;
+echo "ARVADOS_API_HOST=api" >> /home/$USER_NAME/.config/arvados/settings.conf
+echo "ARVADOS_API_HOST_INSECURE=yes" >> /home/$USER_NAME/.config/arvados/settings.conf
+echo "ARVADOS_API_TOKEN=$(cat /tmp/superuser_token)" >> /home/$USER_NAME/.config/arvados/settings.conf
+chmod 600 /home/$USER_NAME/.config/arvados/settings.conf
+
+chown $USER_NAME:$USER_NAME /home/$USER_NAME -R
+
+rm -f /tmp/superuser_token
+
+
diff --git a/docker/shell/superuser_token.in b/docker/shell/superuser_token.in
new file mode 100644 (file)
index 0000000..49bb34e
--- /dev/null
@@ -0,0 +1 @@
+@@API_SUPERUSER_SECRET@@
diff --git a/docker/shell/supervisor.conf b/docker/shell/supervisor.conf
new file mode 100644 (file)
index 0000000..97ad540
--- /dev/null
@@ -0,0 +1,15 @@
+[program:ssh]
+user=root
+command=/etc/init.d/ssh start
+startsecs=0
+
+[program:cron]
+user=root
+command=/etc/init.d/cron start
+startsecs=0
+
+[program:setup]
+user=root
+command=/usr/local/bin/setup.sh
+startsecs=0
+
diff --git a/docker/slurm/Dockerfile b/docker/slurm/Dockerfile
new file mode 100644 (file)
index 0000000..7e4284f
--- /dev/null
@@ -0,0 +1,12 @@
+# Slurm node Docker container.
+
+FROM arvados/base
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+RUN apt-get update -qq
+RUN apt-get install -qqy slurm-llnl munge
+
+ADD munge.key /etc/munge/
+RUN chown munge:munge /etc/munge/munge.key && chmod 600 /etc/munge/munge.key
+ADD generated/slurm.conf /etc/slurm-llnl/
+
diff --git a/docker/slurm/munge.key b/docker/slurm/munge.key
new file mode 100644 (file)
index 0000000..34036a0
Binary files /dev/null and b/docker/slurm/munge.key differ
diff --git a/docker/slurm/slurm.conf.in b/docker/slurm/slurm.conf.in
new file mode 100644 (file)
index 0000000..7312a0e
--- /dev/null
@@ -0,0 +1,60 @@
+
+ControlMachine=api
+#SlurmUser=slurmd
+SlurmctldPort=6817
+SlurmdPort=6818
+AuthType=auth/munge
+#JobCredentialPrivateKey=/etc/slurm-llnl/slurm-key.pem
+#JobCredentialPublicCertificate=/etc/slurm-llnl/slurm-cert.pem
+StateSaveLocation=/tmp
+SlurmdSpoolDir=/tmp/slurmd
+SwitchType=switch/none
+MpiDefault=none
+SlurmctldPidFile=/var/run/slurmctld.pid
+SlurmdPidFile=/var/run/slurmd.pid
+ProctrackType=proctrack/pgid
+CacheGroups=0
+ReturnToService=2
+TaskPlugin=task/affinity
+#
+# TIMERS
+SlurmctldTimeout=300
+SlurmdTimeout=300
+InactiveLimit=0
+MinJobAge=300
+KillWait=30
+Waittime=0
+#
+# SCHEDULING
+SchedulerType=sched/backfill
+#SchedulerType=sched/builtin
+SchedulerPort=7321
+#SchedulerRootFilter=
+#SelectType=select/linear
+SelectType=select/cons_res
+SelectTypeParameters=CR_CPU_Memory
+FastSchedule=1
+#
+# LOGGING
+SlurmctldDebug=3
+#SlurmctldLogFile=
+SlurmdDebug=3
+#SlurmdLogFile=
+JobCompType=jobcomp/none
+#JobCompLoc=
+JobAcctGatherType=jobacct_gather/none
+#JobAcctLogfile=
+#JobAcctFrequency=
+#
+# COMPUTE NODES
+NodeName=DEFAULT
+# CPUs=8 State=UNKNOWN RealMemory=6967 Weight=6967
+PartitionName=DEFAULT MaxTime=INFINITE State=UP
+PartitionName=compute Default=YES Shared=yes
+#PartitionName=sysadmin Hidden=YES Shared=yes
+
+NodeName=compute[0-1]
+#NodeName=compute0 RealMemory=6967 Weight=6967
+
+PartitionName=compute Nodes=compute[0-1]
+PartitionName=crypto Nodes=compute[0-1]
diff --git a/docker/slurm/supervisor.conf b/docker/slurm/supervisor.conf
new file mode 100644 (file)
index 0000000..64f86b1
--- /dev/null
@@ -0,0 +1,7 @@
+[program:munge]
+user=root
+command=/etc/init.d/munge start
+
+[program:slurm]
+user=root
+command=/etc/init.d/slurm-llnl start
diff --git a/docker/sso/Dockerfile b/docker/sso/Dockerfile
new file mode 100644 (file)
index 0000000..99e3f4e
--- /dev/null
@@ -0,0 +1,30 @@
+# Arvados API server Docker container.
+
+FROM arvados/passenger
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+RUN git clone git://github.com/curoverse/sso-devise-omniauth-provider.git /usr/src/sso-provider && \
+    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/sso-provider/Gemfile
+
+# Install generated config files
+ADD generated/secret_token.rb /usr/src/sso-provider/config/initializers/secret_token.rb
+ADD generated/seeds.rb /usr/src/sso-provider/db/seeds.rb
+ADD generated/apache2_vhost /etc/apache2/sites-available/sso-provider
+ADD generated/apache2_vhost /etc/apache2/sites-available/sso-provider
+
+# Configure Apache and Passenger.
+RUN a2dissite default && \
+    a2ensite sso-provider && \
+    a2enmod rewrite && \
+    a2enmod ssl && \
+    cd /usr/src/sso-provider && \
+    RAILS_ENV=production /usr/local/rvm/bin/rvm-exec default bundle exec rake db:setup && \
+    /usr/local/rvm/bin/rvm-exec default bundle exec rake assets:precompile && \
+    chown www-data:www-data tmp_omniauth log config.ru -R && \
+    chown www-data:www-data db db/production.sqlite3 && \
+    /bin/mkdir /var/run/apache2
+
+ADD apache2_foreground.sh /etc/apache2/foreground.sh
+
+# Start the supervisor.
+CMD ["/etc/apache2/foreground.sh"]
diff --git a/docker/sso/apache2_foreground.sh b/docker/sso/apache2_foreground.sh
new file mode 100755 (executable)
index 0000000..fc6028e
--- /dev/null
@@ -0,0 +1,7 @@
+#! /bin/bash
+
+read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
+trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
+
+source /etc/apache2/envvars
+/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/sso/apache2_vhost.in b/docker/sso/apache2_vhost.in
new file mode 100644 (file)
index 0000000..554a86d
--- /dev/null
@@ -0,0 +1,52 @@
+# VirtualHost definition for the Arvados API server
+
+<VirtualHost *:80>
+  ServerName @@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@
+  ServerAdmin sysadmin@curoverse.com
+
+  RedirectPermanent / https://@@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@/
+
+  LogLevel warn
+  ErrorLog  ${APACHE_LOG_DIR}/error.log
+  CustomLog ${APACHE_LOG_DIR}/access.log combined
+
+</VirtualHost>
+
+<VirtualHost *:443>
+  ServerName @@SSO_HOSTNAME@@.@@ARVADOS_DOMAIN@@
+  ServerAdmin sysadmin@curoverse.com
+
+  RailsEnv production
+  RackBaseURI /
+  RailsAppSpawnerIdleTime 1200
+
+  # Enable streaming
+  PassengerBufferResponse off
+
+  # Index file and Document Root (where the public files are located)
+  DirectoryIndex index.html
+  DocumentRoot /usr/src/sso-provider/public
+
+  LogLevel warn
+  ErrorLog  ${APACHE_LOG_DIR}/ssl_error.log
+  CustomLog ${APACHE_LOG_DIR}/ssl_access.log combined
+
+  <Directory /usr/src/sso-provider/public>
+    Options Indexes FollowSymLinks MultiViews IncludesNoExec
+    AllowOverride None
+    Order allow,deny
+    allow from all
+  </Directory>
+
+  <IfModule mod_ssl.c>
+    SSLEngine on
+    # SSLCertificateChainFile /etc/ssl/certs/startcom.sub.class1.server.ca.pem
+    # SSLCACertificateFile    /etc/ssl/certs/startcom.ca.pem
+    # SSLCertificateFile      /etc/ssl/certs/qr1hi.arvadosapi.com.crt.pem
+    # SSLCertificateKeyFile   /etc/ssl/private/qr1hi.arvadosapi.com.key.pem
+    SSLCertificateFile    /etc/ssl/certs/ssl-cert-snakeoil.pem
+    SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
+    SetEnvIf User-Agent ".*MSIE.*" nokeepalive ssl-unclean-shutdown
+  </IfModule>
+
+</VirtualHost>
diff --git a/docker/sso/secret_token.rb.in b/docker/sso/secret_token.rb.in
new file mode 100644 (file)
index 0000000..bbe3f85
--- /dev/null
@@ -0,0 +1,7 @@
+# Be sure to restart your server when you modify this file.
+
+# Your secret key for verifying the integrity of signed cookies.
+# If you change this key, all old signed cookies will become invalid!
+# Make sure the secret is at least 30 characters and all random,
+# no regular words or you'll be exposed to dictionary attacks.
+CfiOauthProvider::Application.config.secret_token = '@@SSO_SECRET@@'
diff --git a/docker/sso/seeds.rb.in b/docker/sso/seeds.rb.in
new file mode 100644 (file)
index 0000000..b35b939
--- /dev/null
@@ -0,0 +1,9 @@
+
+Client.delete_all
+
+c = Client.new()
+c.name = "@@SSO_CLIENT_NAME@@"
+c.app_id = "@@SSO_CLIENT_APP_ID@@"
+c.app_secret = "@@SSO_CLIENT_SECRET@@"
+c.save!
+
diff --git a/docker/workbench/.gitignore b/docker/workbench/.gitignore
new file mode 100644 (file)
index 0000000..bf969c3
--- /dev/null
@@ -0,0 +1,3 @@
+apache2_vhost
+production.rb
+secret_token.rb
diff --git a/docker/workbench/Dockerfile b/docker/workbench/Dockerfile
new file mode 100644 (file)
index 0000000..94d9f87
--- /dev/null
@@ -0,0 +1,37 @@
+# Arvados Workbench Docker container.
+
+FROM arvados/passenger
+MAINTAINER Ward Vandewege <ward@curoverse.com>
+
+# We need graphviz for the provenance graphs
+RUN apt-get update -qq
+RUN apt-get install -qqy graphviz
+
+# Update Arvados source
+RUN /bin/mkdir -p /usr/src/arvados/apps
+ADD generated/workbench.tar.gz /usr/src/arvados/apps/
+ADD generated/workbench_rails_env /etc/
+RUN /bin/cp /usr/src/arvados/apps/workbench/config/environments/$(cat /etc/workbench_rails_env).rb.example /usr/src/arvados/apps/workbench/config/environments/$(cat /etc/workbench_rails_env).rb
+ADD generated/application.yml /usr/src/arvados/apps/workbench/config/application.yml
+
+RUN RAILS_ENV=$(cat /etc/workbench_rails_env) && \
+    /usr/local/rvm/bin/rvm-exec default bundle install --gemfile=/usr/src/arvados/apps/workbench/Gemfile && \
+    touch /usr/src/arvados/apps/workbench/log/$RAILS_ENV.log && \
+    chmod 666 /usr/src/arvados/apps/workbench/log/$RAILS_ENV.log && \
+    touch /usr/src/arvados/apps/workbench/db/$RAILS_ENV.sqlite3 && \
+    cd /usr/src/arvados/apps/workbench && \
+    /usr/local/rvm/bin/rvm-exec default bundle exec rake assets:precompile && \
+    chown -R www-data:www-data /usr/src/arvados/apps/workbench
+
+# Configure Apache
+ADD generated/apache2_vhost /etc/apache2/sites-available/workbench
+RUN \
+  a2dissite default && \
+  a2ensite workbench && \
+  a2enmod rewrite
+
+ADD apache2_foreground.sh /etc/apache2/foreground.sh
+
+# Start Apache
+CMD ["/etc/apache2/foreground.sh"]
+
diff --git a/docker/workbench/apache2_foreground.sh b/docker/workbench/apache2_foreground.sh
new file mode 100755 (executable)
index 0000000..5475ff0
--- /dev/null
@@ -0,0 +1,12 @@
+#! /bin/bash
+
+read pid cmd state ppid pgrp session tty_nr tpgid rest < /proc/self/stat
+trap "kill -TERM -$pgrp; exit" EXIT TERM KILL SIGKILL SIGTERM SIGQUIT
+
+# Override the default API server address if necessary.
+if [[ "$API_PORT_443_TCP_ADDR" != "" ]]; then
+  sed -i "s/localhost:9900/$API_PORT_443_TCP_ADDR/" /usr/src/arvados/apps/workbench/config/application.yml
+fi
+
+source /etc/apache2/envvars
+/usr/sbin/apache2 -D FOREGROUND
diff --git a/docker/workbench/apache2_vhost.in b/docker/workbench/apache2_vhost.in
new file mode 100644 (file)
index 0000000..05376ea
--- /dev/null
@@ -0,0 +1,26 @@
+<VirtualHost *:80>
+
+  ServerName workbench.@@API_HOSTNAME@@.@@ARVADOS_DOMAIN@@
+  ServerAdmin sysadmin@curoverse.com
+
+  RailsEnv @@WORKBENCH_RAILS_MODE@@
+  RackBaseURI /
+  RailsAppSpawnerIdleTime 1200
+
+  # Index file and Document Root (where the public files are located)
+  DirectoryIndex index.html
+  DocumentRoot /usr/src/arvados/apps/workbench/public
+
+  LogLevel warn
+  ErrorLog  ${APACHE_LOG_DIR}/error.log
+  CustomLog ${APACHE_LOG_DIR}/access.log combined
+
+  <Directory /usr/src/arvados/apps/workbench>
+    Options Indexes FollowSymLinks MultiViews IncludesNoExec
+    AllowOverride None
+    Order allow,deny
+    allow from all
+  </Directory>
+
+</VirtualHost>
+
diff --git a/docker/workbench/application.yml.in b/docker/workbench/application.yml.in
new file mode 100644 (file)
index 0000000..3140188
--- /dev/null
@@ -0,0 +1,27 @@
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+common:
+  # At minimum, you need a nice long randomly generated secret_token here.
+  secret_token: @@WORKBENCH_SECRET@@
+
+  # You probably also want to point to your API server.
+  arvados_login_base: 'https://localhost:9900/login'
+  arvados_v1_base: 'https://localhost:9900/arvados/v1'
+  arvados_insecure_https: @@WORKBENCH_INSECURE_HTTPS@@
+
+  data_import_dir: @@WORKBENCH_DATA_IMPORT_DIR@@
+  data_export_dir: @@WORKBENCH_DATA_EXPORT_DIR@@
+
+  site_name: @@WORKBENCH_SITE_NAME@@
+  activation_contact_link: @@WORKBENCH_ACTIVATION_CONTACT_LINK@@
diff --git a/docker/workbench/production.rb.in b/docker/workbench/production.rb.in
new file mode 100644 (file)
index 0000000..bc3bd33
--- /dev/null
@@ -0,0 +1,91 @@
+ArvadosWorkbench::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # Code is not reloaded between requests
+  config.cache_classes = true
+
+  # Full error reports are disabled and caching is turned on
+  config.consider_all_requests_local       = false
+  config.action_controller.perform_caching = true
+
+  # Disable Rails's static asset server (Apache or nginx will already do this)
+  config.serve_static_assets = false
+
+  # Compress JavaScripts and CSS
+  config.assets.compress = true
+
+  # Don't fallback to assets pipeline if a precompiled asset is missed
+  config.assets.compile = false
+
+  # Generate digests for assets URLs
+  config.assets.digest = true
+
+  # Defaults to nil and saved in location specified by config.assets.prefix
+  # config.assets.manifest = YOUR_PATH
+
+  # Specifies the header that your server uses for sending files
+  # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
+  # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx
+
+  # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
+  # config.force_ssl = true
+
+  # See everything in the log (default is :info)
+  # config.log_level = :debug
+
+  # Prepend all log lines with the following tags
+  # config.log_tags = [ :subdomain, :uuid ]
+
+  # Use a different logger for distributed setups
+  # config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new)
+
+  # Use a different cache store in production
+  # config.cache_store = :mem_cache_store
+
+  # Enable serving of images, stylesheets, and JavaScripts from an asset server
+  # config.action_controller.asset_host = "http://assets.example.com"
+
+  # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)
+  # config.assets.precompile += %w( search.js )
+
+  # Disable delivery errors, bad email addresses will be ignored
+  # config.action_mailer.raise_delivery_errors = false
+
+  # Enable threaded mode
+  # config.threadsafe!
+
+  # Enable locale fallbacks for I18n (makes lookups for any locale fall back to
+  # the I18n.default_locale when a translation can not be found)
+  config.i18n.fallbacks = true
+
+  # Send deprecation notices to registered listeners
+  config.active_support.deprecation = :notify
+
+  # Log the query plan for queries taking more than this (works
+  # with SQLite, MySQL, and PostgreSQL)
+  # config.active_record.auto_explain_threshold_in_seconds = 0.5
+
+  # Log timing data for API transactions
+  config.profiling_enabled = false
+
+  config.arvados_login_base = 'https://' + ENV['API_PORT_443_TCP_ADDR'].to_s + '/login'
+  config.arvados_v1_base = 'https://' + ENV['API_PORT_443_TCP_ADDR'].to_s + '/arvados/v1'
+  config.arvados_insecure_https = @@WORKBENCH_INSECURE_HTTPS@@ # true = do not check server certificate
+
+  config.data_import_dir = '@@WORKBENCH_DATA_IMPORT_DIR@@'
+  config.data_export_dir = '@@WORKBENCH_DATA_EXPORT_DIR@@'
+
+  # Authentication stub: hard code pre-approved API tokens.
+  # config.accept_api_token = { rand(2**256).to_s(36) => true }
+  config.accept_api_token = {}
+
+  config.vcf_pipeline_uuid = '@@WORKBENCH_VCF_PIPELINE_UUID@@'
+
+  config.site_name = '@@WORKBENCH_SITE_NAME@@'
+  config.activation_contact_link = '@@WORKBENCH_ACTIVATION_CONTACT_LINK@@'
+  config.arvados_docsite = 'http://doc.arvados.org'
+
+  config.arvados_theme = 'default'
+
+  config.show_user_agreement_inline = false
+end
diff --git a/docker/workbench/secret_token.rb.in b/docker/workbench/secret_token.rb.in
new file mode 100644 (file)
index 0000000..91c1a5c
--- /dev/null
@@ -0,0 +1,7 @@
+# Be sure to restart your server when you modify this file.
+
+# Your secret key for verifying the integrity of signed cookies.
+# If you change this key, all old signed cookies will become invalid!
+# Make sure the secret is at least 30 characters and all random,
+# no regular words or you'll be exposed to dictionary attacks.
+ArvadosWorkbench::Application.config.secret_token = '@@WORKBENCH_SECRET@@'
diff --git a/docker/workbench/workbench_rails_env.in b/docker/workbench/workbench_rails_env.in
new file mode 100644 (file)
index 0000000..f4f7638
--- /dev/null
@@ -0,0 +1 @@
+@@WORKBENCH_RAILS_MODE@@
\ No newline at end of file
diff --git a/sdk/cli/.gitignore b/sdk/cli/.gitignore
new file mode 100644 (file)
index 0000000..51463cf
--- /dev/null
@@ -0,0 +1,3 @@
+arvados-cli*gem
+tmp
+Gemfile.lock
diff --git a/sdk/cli/Gemfile b/sdk/cli/Gemfile
new file mode 100644 (file)
index 0000000..638a00c
--- /dev/null
@@ -0,0 +1,4 @@
+source 'https://rubygems.org'
+gemspec
+gem 'minitest', '>= 5.0.0'
+gem 'rake'
diff --git a/sdk/cli/Rakefile b/sdk/cli/Rakefile
new file mode 100644 (file)
index 0000000..cf4652f
--- /dev/null
@@ -0,0 +1,8 @@
+require 'rake/testtask'
+
+Rake::TestTask.new do |t|
+  t.libs << 'test'
+end
+
+desc 'Run tests'
+task default: :test
diff --git a/sdk/cli/arvados-cli.gemspec b/sdk/cli/arvados-cli.gemspec
new file mode 100644 (file)
index 0000000..5fcf546
--- /dev/null
@@ -0,0 +1,36 @@
+if not File.exists?('/usr/bin/git') then
+  STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
+  exit
+end
+
+git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
+git_timestamp = Time.at(git_timestamp.to_i).utc
+
+Gem::Specification.new do |s|
+  s.name        = 'arvados-cli'
+  s.version     = "0.1.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.date        = git_timestamp.strftime("%Y-%m-%d")
+  s.summary     = "Arvados CLI tools"
+  s.description = "Arvados command line tools, git commit #{git_hash}"
+  s.authors     = ["Arvados Authors"]
+  s.email       = 'gem-dev@curoverse.com'
+  #s.bindir      = '.'
+  s.licenses    = ['Apache License, Version 2.0']
+  s.files       = ["bin/arv","bin/arv-run-pipeline-instance","bin/arv-crunch-job","bin/arv-tag","bin/crunch-job"]
+  s.executables << "arv"
+  s.executables << "arv-run-pipeline-instance"
+  s.executables << "arv-crunch-job"
+  s.executables << "arv-tag"
+  s.required_ruby_version = '>= 2.1.0'
+  s.add_runtime_dependency 'arvados', '~> 0.1', '>= 0.1.0'
+  s.add_runtime_dependency 'google-api-client', '~> 0.6.3', '>= 0.6.3'
+  s.add_runtime_dependency 'activesupport', '~> 3.2', '>= 3.2.13'
+  s.add_runtime_dependency 'json', '~> 1.7', '>= 1.7.7'
+  s.add_runtime_dependency 'trollop', '~> 2.0'
+  s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'
+  s.add_runtime_dependency 'oj', '~> 2.0', '>= 2.0.3'
+  s.add_runtime_dependency 'curb', '~> 0.8'
+  s.add_runtime_dependency('jwt', '>= 0.1.5', '< 1.0.0')
+  s.homepage    =
+    'https://arvados.org'
+end
diff --git a/sdk/cli/bin/arv b/sdk/cli/bin/arv
new file mode 100755 (executable)
index 0000000..a142dba
--- /dev/null
@@ -0,0 +1,683 @@
+#!/usr/bin/env ruby
+
+# Arvados cli client
+#
+# Ward Vandewege <ward@curoverse.com>
+
+require 'fileutils'
+
+if RUBY_VERSION < '1.9.3' then
+  abort <<-EOS
+#{$0.gsub(/^\.\//,'')} requires Ruby version 1.9.3 or higher.
+  EOS
+end
+
+begin
+  require 'curb'
+  require 'rubygems'
+  require 'google/api_client'
+  require 'json'
+  require 'pp'
+  require 'trollop'
+  require 'andand'
+  require 'oj'
+  require 'active_support/inflector'
+  require 'yaml'
+  require 'tempfile'
+  require 'net/http'
+rescue LoadError
+  abort <<-EOS
+
+Please install all required gems:
+
+  gem install activesupport andand curb google-api-client json oj trollop yaml
+
+  EOS
+end
+
+# Search for 'ENTRY POINT' to see where things get going
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.irregular 'specimen', 'specimens'
+  inflect.irregular 'human', 'humans'
+end
+
+module Kernel
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    $VERBOSE = nil
+    result = yield
+    $VERBOSE = original_verbosity
+    return result
+  end
+end
+
+class Google::APIClient
+ def discovery_document(api, version)
+   api = api.to_s
+   discovery_uri = self.discovery_uri(api, version)
+   discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
+   return @discovery_documents[discovery_uri_hash] ||=
+     begin
+       # fetch new API discovery doc if stale
+       cached_doc = File.expand_path "~/.cache/arvados/discovery-#{discovery_uri_hash}.json" rescue nil
+
+       if cached_doc.nil? or not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
+         response = self.execute!(:http_method => :get,
+                                  :uri => discovery_uri,
+                                  :authenticated => false)
+
+         begin
+           FileUtils.makedirs(File.dirname cached_doc)
+           File.open(cached_doc, 'w') do |f|
+             f.puts response.body
+           end
+         rescue
+           return JSON.load response.body
+         end
+       end
+
+       File.open(cached_doc) { |f| JSON.load f }
+     end
+ end
+end
+
+class ArvadosClient < Google::APIClient
+  def execute(*args)
+    if args.last.is_a? Hash
+      args.last[:headers] ||= {}
+      args.last[:headers]['Accept'] ||= 'application/json'
+    end
+    super(*args)
+  end
+end
+
+def init_config
+  # read authentication data from arvados configuration file if present
+  lineno = 0
+  config_file = File.expand_path('~/.config/arvados/settings.conf') rescue nil
+  if not config_file.nil? and File.exist? config_file then
+    File.open(config_file, 'r').each do |line|
+      lineno = lineno + 1
+      # skip comments
+      if line.match('^\s*#') then
+        next
+      end
+      var, val = line.chomp.split('=', 2)
+      # allow environment settings to override config files.
+      if var and val
+        ENV[var] ||= val
+      else
+        warn "#{config_file}: #{lineno}: could not parse `#{line}'"
+      end
+    end
+  end
+end
+
+
+subcommands = %w(copy create edit keep pipeline run tag ws)
+
+def check_subcommands client, arvados, subcommand, global_opts, remaining_opts
+  case subcommand
+  when 'create'
+    arv_create client, arvados, global_opts, remaining_opts
+  when 'edit'
+    arv_edit client, arvados, global_opts, remaining_opts
+  when 'copy', 'tag', 'ws', 'run'
+    exec `which arv-#{subcommand}`.strip, *remaining_opts
+  when 'keep'
+    @sub = remaining_opts.shift
+    if ['get', 'put', 'ls', 'normalize'].index @sub then
+      # Native Arvados
+      exec `which arv-#{@sub}`.strip, *remaining_opts
+    elsif ['less', 'check'].index @sub then
+      # wh* shims
+      exec `which wh#{@sub}`.strip, *remaining_opts
+    elsif @sub == 'docker'
+      exec `which arv-keepdocker`.strip, *remaining_opts
+    else
+      puts "Usage: arv keep [method] [--parameters]\n"
+      puts "Use 'arv keep [method] --help' to get more information about specific methods.\n\n"
+      puts "Available methods: ls, get, put, less, check, docker"
+    end
+    abort
+  when 'pipeline'
+    sub = remaining_opts.shift
+    if sub == 'run'
+      exec `which arv-run-pipeline-instance`.strip, *remaining_opts
+    else
+      puts "Usage: arv pipeline [method] [--parameters]\n"
+      puts "Use 'arv pipeline [method] --help' to get more information about specific methods.\n\n"
+      puts "Available methods: run"
+    end
+    abort
+  end
+end
+
+def command_exists?(command)
+  File.executable?(command) || ENV['PATH'].split(':').any? {|folder| File.executable?(File.join(folder, command))}
+end
+
+def run_editor path
+  pid = Process::fork
+  if pid.nil?
+    editor = nil
+    [ENV["VISUAL"], ENV["EDITOR"], "nano", "vi"].each do |e|
+      editor ||= e if e and command_exists? e
+    end
+    if editor.nil?
+      abort "Could not find any editor to use, please set $VISUAL or $EDITOR to your desired editor."
+    end
+    exec editor, path
+  else
+    Process.wait pid
+  end
+
+  if $?.exitstatus != 0
+    raise "Editor exited with status #{$?.exitstatus}"
+  end
+end
+
+def edit_and_commit_object initial_obj, tmp_stem, global_opts, &block
+
+  content = case global_opts[:format]
+            when 'json'
+              Oj.dump(initial_obj, :indent => 1)
+            when 'yaml'
+              initial_obj.to_yaml
+            else
+              abort "Unrecognized format #{global_opts[:format]}"
+            end
+
+  tmp_file = Tempfile.new([tmp_stem, ".#{global_opts[:format]}"])
+  tmp_file.write(content)
+  tmp_file.close
+
+  begin
+    error_text = ''
+    while true
+      begin
+        run_editor tmp_file.path
+
+        tmp_file.open
+        newcontent = tmp_file.read()
+        tmp_file.close
+
+        # Strip lines starting with '#'
+        newcontent = newcontent.lines.select {|l| !l.start_with? '#'}.join
+
+        # Load the new object
+        newobj = case global_opts[:format]
+                 when 'json'
+                   Oj.load(newcontent)
+                 when 'yaml'
+                   YAML.load(newcontent)
+                 end
+
+        yield newobj
+
+        break
+      rescue => e
+        can_retry = true
+        if e.is_a? Psych::SyntaxError
+          this_error = "YAML error parsing your input: #{e}"
+        elsif e.is_a? JSON::ParserError or e.is_a? Oj::ParseError
+          this_error = "JSON error parsing your input: #{e}"
+        elsif e.is_a? ArvadosAPIError
+          this_error = "API responded with error #{e}"
+        else
+          this_error = "#{e.class}: #{e}"
+          can_retry = false
+        end
+        puts this_error
+
+        tmp_file.open
+        newcontent = tmp_file.read()
+        tmp_file.close
+
+        if newcontent == error_text or not can_retry
+          FileUtils::cp tmp_file.path, tmp_file.path + ".saved"
+          puts "File is unchanged, edit aborted." if can_retry
+          abort "Saved contents to " + tmp_file.path + ".saved"
+        else
+          tmp_file.open
+          tmp_file.truncate 0
+          error_text = this_error.to_s.lines.map {|l| '# ' + l}.join + "\n"
+          error_text += "# Please fix the error and try again.\n"
+          error_text += newcontent.lines.select {|l| !l.start_with? '#'}.join
+          tmp_file.write error_text
+          tmp_file.close
+        end
+      end
+    end
+  ensure
+    tmp_file.close(true)
+  end
+
+  nil
+end
+
+class ArvadosAPIError < RuntimeError
+end
+
+def check_response result
+  begin
+    results = JSON.parse result.body
+  rescue JSON::ParserError, Oj::ParseError => e
+    raise "Failed to parse server response:\n" + e.to_s
+  end
+
+  if result.response.status != 200
+    raise ArvadosAPIError.new("#{result.response.status}: #{
+                              ((results['errors'] && results['errors'].join('\n')) ||
+                                Net::HTTPResponse::CODE_TO_OBJ[status.to_s].to_s.sub(/^Net::HTTP/, '').titleize)}")
+  end
+
+  results
+end
+
+def arv_edit client, arvados, global_opts, remaining_opts
+  uuid = remaining_opts.shift
+  if uuid.nil? or uuid == "-h" or uuid == "--help"
+    puts head_banner
+    puts "Usage: arv edit [uuid] [fields...]\n\n"
+    puts "Fetch the specified Arvados object, select the specified fields, \n"
+    puts "open an interactive text editor on a text representation (json or\n"
+    puts "yaml, use --format) and then update the object.  Will use 'nano'\n"
+    puts "by default, customize with the EDITOR or VISUAL environment variable.\n"
+    exit 255
+  end
+
+  if not $stdout.tty?
+    puts "Not connected to a TTY, cannot run interactive editor."
+    exit 1
+  end
+
+  # determine controller
+
+  m = /([a-z0-9]{5})-([a-z0-9]{5})-([a-z0-9]{15})/.match uuid
+  if !m
+    if /^[a-f0-9]{32}/.match uuid
+      abort "Arvados collections are not editable."
+    else
+      abort "'#{uuid}' does not appear to be an Arvados uuid"
+    end
+  end
+
+  rsc = nil
+  arvados.discovery_document["resources"].each do |k,v|
+    klass = k.singularize.camelize
+    dig = Digest::MD5.hexdigest(klass).to_i(16).to_s(36)[-5..-1]
+    if dig == m[2]
+      rsc = k
+    end
+  end
+
+  if rsc.nil?
+    abort "Could not determine resource type #{m[2]}"
+  end
+
+  begin
+    result = client.execute(:api_method => eval('arvados.' + rsc + '.get'),
+                            :parameters => {"uuid" => uuid},
+                            :authenticated => false,
+                            :headers => {
+                              authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                            })
+    oldobj = check_response result
+  rescue => e
+    abort "Server error: #{e}"
+  end
+
+  if remaining_opts.length > 0
+    oldobj.select! { |k, v| remaining_opts.include? k }
+  end
+
+  edit_and_commit_object oldobj, uuid, global_opts do |newobj|
+    newobj.select! {|k| newobj[k] != oldobj[k]}
+    if !newobj.empty?
+      result = client.execute(:api_method => eval('arvados.' + rsc + '.update'),
+                     :parameters => {"uuid" => uuid},
+                     :body_object => { rsc.singularize => newobj },
+                     :authenticated => false,
+                     :headers => {
+                       authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                     })
+      results = check_response result
+      puts "Updated object #{results['uuid']}"
+    else
+      puts "Object is unchanged, did not update."
+    end
+  end
+
+  exit 0
+end
+
+def arv_create client, arvados, global_opts, remaining_opts
+  types = resource_types(arvados.discovery_document)
+  create_opts = Trollop::options do
+    opt :project_uuid, "Project uuid in which to create the object", :type => :string
+    stop_on resource_types(arvados.discovery_document)
+  end
+
+  object_type = remaining_opts.shift
+  if object_type.nil?
+    abort "Missing resource type, must be one of #{types.join ', '}"
+  end
+
+  rsc = arvados.discovery_document["resources"].keys.select { |k| object_type == k.singularize }
+  if rsc.empty?
+    abort "Could not determine resource type #{object_type}"
+  end
+  rsc = rsc.first
+
+  discovered_params = arvados.discovery_document["resources"][rsc]["methods"]["create"]["parameters"]
+  method_opts = Trollop::options do
+    banner head_banner
+    banner "Usage: arv create [--project-uuid] #{object_type} [create parameters]"
+    banner ""
+    banner "This method supports the following parameters:"
+    banner ""
+    discovered_params.each do |k,v|
+      opts = Hash.new()
+      opts[:type] = v["type"].to_sym if v.include?("type")
+      if [:datetime, :text, :object, :array].index opts[:type]
+        opts[:type] = :string                       # else trollop bork
+      end
+      opts[:default] = v["default"] if v.include?("default")
+      opts[:default] = v["default"].to_i if opts[:type] == :integer
+      opts[:default] = to_boolean(v["default"]) if opts[:type] == :boolean
+      opts[:required] = true if v.include?("required") and v["required"]
+      description = ''
+      description = '  ' + v["description"] if v.include?("description")
+      opt k.to_sym, description, opts
+    end
+  end
+
+  initial_obj = {}
+  if create_opts[:project_uuid]
+    initial_obj["owner_uuid"] = create_opts[:project_uuid]
+  end
+
+  edit_and_commit_object initial_obj, "", global_opts do |newobj|
+    result = client.execute(:api_method => eval('arvados.' + rsc + '.create'),
+                   :parameters => method_opts,
+                   :body_object => {object_type => newobj},
+                   :authenticated => false,
+                   :headers => {
+                     authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                   })
+    results = check_response result
+    puts "Created object #{results['uuid']}"
+  end
+
+  exit 0
+end
+
+def to_boolean(s)
+  !!(s =~ /^(true|t|yes|y|1)$/i)
+end
+
+def head_banner
+  "Arvados command line client\n"
+end
+
+def help_methods(discovery_document, resource, method=nil)
+  banner = head_banner
+  banner += "Usage: arv #{resource} [method] [--parameters]\n"
+  banner += "Use 'arv #{resource} [method] --help' to get more information about specific methods.\n\n"
+  banner += "The #{resource} resource supports the following methods:"
+  banner += "\n\n"
+  discovery_document["resources"][resource.pluralize]["methods"].
+    each do |k,v|
+    description = ''
+    if v.include? "description"
+      # add only the first line of the discovery doc description
+      description = '  ' + v["description"].split("\n").first.chomp
+    end
+    banner += "   #{sprintf("%20s",k)}#{description}\n"
+  end
+  banner += "\n"
+  STDERR.puts banner
+
+  if not method.nil? and method != '--help' and method != '-h' then
+    abort "Unknown method #{method.inspect} " +
+                  "for resource #{resource.inspect}"
+  end
+  exit 255
+end
+
+def help_resources(option_parser, discovery_document, resource)
+  option_parser.educate
+  exit 255
+end
+
+def resource_types discovery_document
+  resource_types = Array.new()
+  discovery_document["resources"].each do |k,v|
+    resource_types << k.singularize
+  end
+  resource_types
+end
+
+def parse_arguments(discovery_document, subcommands)
+  resources_and_subcommands = resource_types(discovery_document) + subcommands
+
+  option_parser = Trollop::Parser.new do
+    version __FILE__
+    banner head_banner
+    banner "Usage: arv [--flags] subcommand|resource [method] [--parameters]"
+    banner ""
+    banner "Available flags:"
+
+    opt :dry_run, "Don't actually do anything", :short => "-n"
+    opt :verbose, "Print some things on stderr"
+    opt :format,
+        "Set the output format. Must be one of json (default), yaml or uuid.",
+        :type => :string,
+        :default => 'json'
+    opt :short, "Return only UUIDs (equivalent to --format=uuid)"
+
+    banner ""
+    banner "Use 'arv subcommand|resource --help' to get more information about a particular command or resource."
+    banner ""
+    banner "Available subcommands: #{subcommands.join(', ')}"
+    banner ""
+
+    banner "Available resources: #{discovery_document['resources'].keys.map { |k| k.singularize }.join(', ')}"
+
+    banner ""
+    banner "Additional options:"
+
+    conflicts :short, :format
+    stop_on resources_and_subcommands
+  end
+
+  global_opts = Trollop::with_standard_exception_handling option_parser do
+    o = option_parser.parse ARGV
+  end
+
+  unless %w(json yaml uuid).include?(global_opts[:format])
+    $stderr.puts "#{$0}: --format must be one of json, yaml or uuid."
+    $stderr.puts "Use #{$0} --help for more information."
+    abort
+  end
+
+  if global_opts[:short]
+    global_opts[:format] = 'uuid'
+  end
+
+  resource = ARGV.shift
+
+  if not subcommands.include? resource
+    if not resources_and_subcommands.include?(resource)
+      puts "Resource or subcommand '#{resource}' is not recognized.\n\n" if !resource.nil?
+      help_resources(option_parser, discovery_document, resource)
+    end
+
+    method = ARGV.shift
+    if not (discovery_document["resources"][resource.pluralize]["methods"].
+            include?(method))
+      help_methods(discovery_document, resource, method)
+    end
+
+    discovered_params = discovery_document\
+    ["resources"][resource.pluralize]\
+    ["methods"][method]["parameters"]
+    method_opts = Trollop::options do
+      banner head_banner
+      banner "Usage: arv #{resource} #{method} [--parameters]"
+      banner ""
+      banner "This method supports the following parameters:"
+      banner ""
+      discovered_params.each do |k,v|
+        opts = Hash.new()
+        opts[:type] = v["type"].to_sym if v.include?("type")
+        if [:datetime, :text, :object, :array].index opts[:type]
+          opts[:type] = :string                       # else trollop bork
+        end
+        opts[:default] = v["default"] if v.include?("default")
+        opts[:default] = v["default"].to_i if opts[:type] == :integer
+        opts[:default] = to_boolean(v["default"]) if opts[:type] == :boolean
+        opts[:required] = true if v.include?("required") and v["required"]
+        description = ''
+        description = '  ' + v["description"] if v.include?("description")
+        opt k.to_sym, description, opts
+      end
+
+      body_object = discovery_document["resources"][resource.pluralize]["methods"][method]["request"]
+      if body_object and discovered_params[resource].nil?
+        is_required = true
+        if body_object["required"] == false
+          is_required = false
+        end
+        opt resource.to_sym, "#{resource} (request body)", {
+          required: is_required,
+          type: :string
+        }
+      end
+    end
+
+    discovered_params.each do |k,v|
+      k = k.to_sym
+      if ['object', 'array'].index(v["type"]) and method_opts.has_key? k
+        if method_opts[k].andand.match /^\//
+          method_opts[k] = File.open method_opts[k], 'rb' do |f| f.read end
+        end
+      end
+    end
+  end
+
+  return resource, method, method_opts, global_opts, ARGV
+end
+
+#
+# ENTRY POINT
+#
+
+init_config
+
+ENV['ARVADOS_API_VERSION'] ||= 'v1'
+
+if not ENV.include?('ARVADOS_API_HOST') or not ENV.include?('ARVADOS_API_TOKEN') then
+  abort <<-EOS
+ARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables.
+  EOS
+end
+
+# do this if you're testing with a dev server and you don't care about SSL certificate checks:
+if ENV['ARVADOS_API_HOST_INSECURE']
+  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
+end
+
+begin
+  client = ArvadosClient.new(:host => ENV['ARVADOS_API_HOST'], :application_name => 'arvados-cli', :application_version => '1.0')
+  arvados = client.discovered_api('arvados', ENV['ARVADOS_API_VERSION'])
+rescue Exception => e
+  puts "Failed to connect to Arvados API server: #{e}"
+  exit 1
+end
+
+# Parse arguments here
+resource_schema, method, method_opts, global_opts, remaining_opts = parse_arguments(arvados.discovery_document, subcommands)
+
+check_subcommands client, arvados, resource_schema, global_opts, remaining_opts
+
+controller = resource_schema.pluralize
+
+api_method = 'arvados.' + controller + '.' + method
+
+if global_opts[:dry_run]
+  if global_opts[:verbose]
+    $stderr.puts "#{api_method} #{method_opts.inspect}"
+  end
+  exit
+end
+
+request_parameters = {_profile:true}.merge(method_opts)
+resource_body = request_parameters.delete(resource_schema.to_sym)
+if resource_body
+  request_body = {
+    resource_schema => resource_body
+  }
+else
+  request_body = nil
+end
+
+case api_method
+when
+  'arvados.jobs.log_tail_follow'
+
+  # Special case for methods that respond with data streams rather
+  # than JSON (TODO: use the discovery document instead of a static
+  # list of methods)
+  uri_s = eval(api_method).generate_uri(request_parameters)
+  Curl::Easy.perform(uri_s) do |curl|
+    curl.headers['Accept'] = 'text/plain'
+    curl.headers['Authorization'] = "OAuth2 #{ENV['ARVADOS_API_TOKEN']}"
+    if ENV['ARVADOS_API_HOST_INSECURE']
+      curl.ssl_verify_peer = false
+      curl.ssl_verify_host = false
+    end
+    if global_opts[:verbose]
+      curl.on_header { |data| $stderr.write data }
+    end
+    curl.on_body { |data| $stdout.write data }
+  end
+  exit 0
+else
+  result = client.execute(:api_method => eval(api_method),
+                          :parameters => request_parameters,
+                          :body_object => request_body,
+                          :authenticated => false,
+                          :headers => {
+                            authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                          })
+end
+
+begin
+  results = JSON.parse result.body
+rescue JSON::ParserError => e
+  abort "Failed to parse server response:\n" + e.to_s
+end
+
+if results["errors"] then
+  abort "Error: #{results["errors"][0]}"
+end
+
+case global_opts[:format]
+when 'json'
+  puts Oj.dump(results, :indent => 1)
+when 'yaml'
+  puts results.to_yaml
+else
+  if results["items"] and results["kind"].match /list$/i
+    results['items'].each do |i| puts i['uuid'] end
+  elsif results['uuid'].nil?
+    abort("Response did not include a uuid:\n" +
+          Oj.dump(results, :indent => 1) +
+          "\n")
+  else
+    puts results['uuid']
+  end
+end
diff --git a/sdk/cli/bin/arv-copy b/sdk/cli/bin/arv-copy
new file mode 120000 (symlink)
index 0000000..1ad64f4
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-copy
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-crunch-job b/sdk/cli/bin/arv-crunch-job
new file mode 100755 (executable)
index 0000000..8455d99
--- /dev/null
@@ -0,0 +1,2 @@
+#!/usr/bin/env ruby
+exec File.join(File.dirname(File.realpath(__FILE__)), 'crunch-job'), *ARGV
diff --git a/sdk/cli/bin/arv-get b/sdk/cli/bin/arv-get
new file mode 120000 (symlink)
index 0000000..bfd8274
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-get
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-keepdocker b/sdk/cli/bin/arv-keepdocker
new file mode 120000 (symlink)
index 0000000..f35d645
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-keepdocker
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-ls b/sdk/cli/bin/arv-ls
new file mode 120000 (symlink)
index 0000000..64613d9
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-ls
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-mount b/sdk/cli/bin/arv-mount
new file mode 120000 (symlink)
index 0000000..7ad787e
--- /dev/null
@@ -0,0 +1 @@
+../../../services/fuse/bin/arv-mount
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-normalize b/sdk/cli/bin/arv-normalize
new file mode 120000 (symlink)
index 0000000..beee344
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-normalize
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-put b/sdk/cli/bin/arv-put
new file mode 120000 (symlink)
index 0000000..487caf4
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-put
\ No newline at end of file
diff --git a/sdk/cli/bin/arv-run-pipeline-instance b/sdk/cli/bin/arv-run-pipeline-instance
new file mode 100755 (executable)
index 0000000..63313fc
--- /dev/null
@@ -0,0 +1,819 @@
+#!/usr/bin/env ruby
+
+# == Synopsis
+#
+#  arv-run-pipeline-instance --template pipeline-template-uuid [options] [--] [parameters]
+#  arv-run-pipeline-instance --instance pipeline-instance-uuid [options]
+#
+# Satisfy a pipeline template by finding or submitting a mapreduce job
+# for each pipeline component.
+#
+# == Options
+#
+# [--template uuid] Use the specified pipeline template.
+#
+# [--template path] Load the pipeline template from the specified
+#                   local file.
+#
+# [--instance uuid] Use the specified pipeline instance.
+#
+# [-n, --dry-run] Do not start any new jobs or wait for existing jobs
+#                 to finish. Just find out whether jobs are finished,
+#                 queued, or running for each component
+#
+# [--submit] Do not try to satisfy any components. Just
+#                          create an instance, print its UUID to
+#                          stdout, and exit.
+#
+# [--no-wait] Make only as much progress as possible without entering
+#             a sleep/poll loop.
+#
+# [--no-reuse] Do not reuse existing jobs to satisfy pipeline
+#              components. Submit a new job for every component.
+#
+# [--debug] Print extra debugging information on stderr.
+#
+# [--debug-level N] Increase amount of debugging information. Default
+#                   1, possible range 0..3.
+#
+# [--status-text path] Print plain text status report to a file or
+#                      fifo. Default: /dev/stdout
+#
+# [--status-json path] Print JSON status report to a file or
+#                      fifo. Default: /dev/null
+#
+# [--description] Description for the pipeline instance.
+#
+# == Parameters
+#
+# [param_name=param_value]
+#
+# [param_name param_value] Set (or override) the default value for
+#                          every parameter with the given name.
+#
+# [component_name::param_name=param_value]
+# [component_name::param_name param_value]
+# [--component_name::param_name=param_value]
+# [--component_name::param_name param_value] Set the value of a
+#                                            parameter for a single
+#                                            component.
+#
+class WhRunPipelineInstance
+end
+
+if RUBY_VERSION < '1.9.3' then
+  abort <<-EOS
+#{$0.gsub(/^\.\//,'')} requires Ruby version 1.9.3 or higher.
+  EOS
+end
+
+$arvados_api_version = ENV['ARVADOS_API_VERSION'] || 'v1'
+$arvados_api_host = ENV['ARVADOS_API_HOST'] or
+  abort "#{$0}: fatal: ARVADOS_API_HOST environment variable not set."
+$arvados_api_token = ENV['ARVADOS_API_TOKEN'] or
+  abort "#{$0}: fatal: ARVADOS_API_TOKEN environment variable not set."
+
+begin
+  require 'arvados'
+  require 'rubygems'
+  require 'json'
+  require 'pp'
+  require 'trollop'
+  require 'google/api_client'
+rescue LoadError => l
+  puts $:
+  abort <<-EOS
+#{$0}: fatal: #{l.message}
+Some runtime dependencies may be missing.
+Try: gem install arvados pp google-api-client json trollop
+  EOS
+end
+
+def debuglog(message, verbosity=1)
+  $stderr.puts "#{File.split($0).last} #{$$}: #{message}" if $debuglevel >= verbosity
+end
+
+module Kernel
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    $VERBOSE = nil
+    result = yield
+    $VERBOSE = original_verbosity
+    return result
+  end
+end
+
+if $arvados_api_host.match /local/
+  # You probably don't care about SSL certificate checks if you're
+  # testing with a dev server.
+  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
+end
+
+
+# Parse command line options (the kind that control the behavior of
+# this program, that is, not the pipeline component parameters).
+
+p = Trollop::Parser.new do
+  version __FILE__
+  opt(:dry_run,
+      "Do not start any new jobs or wait for existing jobs to finish. Just find out whether jobs are finished, queued, or running for each component.",
+      :type => :boolean,
+      :short => :n)
+  opt(:status_text,
+      "Store plain text status in given file.",
+      :short => :none,
+      :type => :string,
+      :default => '/dev/stdout')
+  opt(:status_json,
+      "Store json-formatted pipeline in given file.",
+      :short => :none,
+      :type => :string,
+      :default => '/dev/null')
+  opt(:no_wait,
+      "Do not wait for jobs to finish. Just look up status, submit new jobs if needed, and exit.",
+      :short => :none,
+      :type => :boolean)
+  opt(:no_reuse,
+      "Do not reuse existing jobs to satisfy pipeline components. Submit a new job for every component.",
+      :short => :none,
+      :type => :boolean)
+  opt(:debug,
+      "Print extra debugging information on stderr.",
+      :type => :boolean)
+  opt(:debug_level,
+      "Set debug verbosity level.",
+      :short => :none,
+      :type => :integer)
+  opt(:template,
+      "UUID of pipeline template, or path to local pipeline template file.",
+      :short => :none,
+      :type => :string)
+  opt(:instance,
+      "UUID of pipeline instance.",
+      :short => :none,
+      :type => :string)
+  opt(:submit,
+      "Submit the pipeline instance to the server, and exit. Let the Crunch dispatch service satisfy the components by finding/running jobs.",
+      :short => :none,
+      :type => :boolean)
+  opt(:run_pipeline_here,
+      "Manage the pipeline instance in-process. Submit jobs to Crunch as needed. Do not exit until the pipeline finishes (or fails).",
+      :short => :none,
+      :type => :boolean)
+  opt(:run_jobs_here,
+      "Run jobs in the local terminal session instead of submitting them to Crunch. Implies --run-pipeline-here. Note: this results in a significantly different job execution environment, and some Crunch features are not supported. It can be necessary to modify a pipeline in order to make it run this way.",
+      :short => :none,
+      :type => :boolean)
+  opt(:run_here,
+      "Synonym for --run-jobs-here.",
+      :short => :none,
+      :type => :boolean)
+  opt(:description,
+      "Description for the pipeline instance.",
+      :short => :none,
+      :type => :string)
+  stop_on [:'--']
+end
+$options = Trollop::with_standard_exception_handling p do
+  p.parse ARGV
+end
+$debuglevel = $options[:debug_level] || ($options[:debug] && 1) || 0
+
+$options[:run_jobs_here] ||= $options[:run_here] # old flag name
+$options[:run_pipeline_here] ||= $options[:run_jobs_here] # B requires A
+
+if $options[:instance]
+  if $options[:template] or $options[:submit]
+    abort "#{$0}: syntax error: --instance cannot be combined with --template or --submit."
+  end
+elsif not $options[:template]
+  puts "error: you must supply a --template or --instance."
+  p.educate
+  abort
+end
+
+if $options[:run_pipeline_here] == $options[:submit]
+  abort "#{$0}: error: you must supply --run-pipeline-here, --run-jobs-here, or --submit."
+end
+
+# Suppress SSL certificate checks if ARVADOS_API_HOST_INSECURE
+
+module Kernel
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    $VERBOSE = nil
+    result = yield
+    $VERBOSE = original_verbosity
+    return result
+  end
+end
+
+if ENV['ARVADOS_API_HOST_INSECURE']
+  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
+end
+
+# Set up the API client.
+
+$arv = Arvados.new api_version: 'v1'
+$client = $arv.client
+$arvados = $arv.arvados_api
+
+class PipelineInstance
+  def self.find(uuid)
+    result = $client.execute(:api_method => $arvados.pipeline_instances.get,
+                             :parameters => {
+                               :uuid => uuid
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                             })
+    j = JSON.parse result.body, :symbolize_names => true
+    unless j.is_a? Hash and j[:uuid]
+      debuglog "Failed to get pipeline_instance: #{j[:errors] rescue nil}", 0
+      nil
+    else
+      debuglog "Retrieved pipeline_instance #{j[:uuid]}"
+      self.new(j)
+    end
+  end
+  def self.create(attributes)
+    result = $client.execute(:api_method => $arvados.pipeline_instances.create,
+                             :body_object => {
+                               :pipeline_instance => attributes
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                             })
+    j = JSON.parse result.body, :symbolize_names => true
+    unless j.is_a? Hash and j[:uuid]
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nFailed to create pipeline_instance: #{j[:errors] rescue nil} #{j.inspect}"
+    end
+    debuglog "Created pipeline instance: #{j[:uuid]}"
+    self.new(j)
+  end
+  def save
+    result = $client.execute(:api_method => $arvados.pipeline_instances.update,
+                             :parameters => {
+                               :uuid => @pi[:uuid]
+                             },
+                             :body_object => {
+                               :pipeline_instance => @attributes_to_update
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                             })
+    j = JSON.parse result.body, :symbolize_names => true
+    unless j.is_a? Hash and j[:uuid]
+      debuglog "Failed to save pipeline_instance: #{j[:errors] rescue nil}", 0
+      nil
+    else
+      @attributes_to_update = {}
+      @pi = j
+    end
+  end
+  def []=(x,y)
+    @attributes_to_update[x] = y
+    @pi[x] = y
+  end
+  def [](x)
+    @pi[x]
+  end
+
+  def log_stderr(msg)
+    $arv.log.create log: {
+      event_type: 'stderr',
+      object_uuid: self[:uuid],
+      owner_uuid: self[:owner_uuid],
+      properties: {"text" => msg},
+    }
+  end
+
+  protected
+  def initialize(j)
+    @attributes_to_update = {}
+    @pi = j
+  end
+end
+
+class JobCache
+  def self.get(uuid)
+    @cache ||= {}
+    result = $client.execute(:api_method => $arvados.jobs.get,
+                             :parameters => {
+                               :uuid => uuid
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                             })
+    @cache[uuid] = JSON.parse result.body, :symbolize_names => true
+  end
+  def self.where(conditions)
+    result = $client.execute(:api_method => $arvados.jobs.list,
+                             :parameters => {
+                               :limit => 10000,
+                               :where => conditions.to_json
+                             },
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                             })
+    list = JSON.parse result.body, :symbolize_names => true
+    if list and list[:items].is_a? Array
+      list[:items]
+    else
+      []
+    end
+  end
+  def self.create(pipeline, component, job, create_params)
+    @cache ||= {}
+
+    body = {job: no_nil_values(job)}.merge(no_nil_values(create_params))
+
+    result = $client.execute(:api_method => $arvados.jobs.create,
+                             :body_object => body,
+                             :authenticated => false,
+                             :headers => {
+                               authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                             })
+    j = JSON.parse result.body, :symbolize_names => true
+    if j.is_a? Hash and j[:uuid]
+      @cache[j[:uuid]] = j
+    else
+      debuglog "create job: #{j[:errors] rescue nil} with attributes #{body}", 0
+
+      msg = ""
+      j[:errors].each do |err|
+        msg += "Error creating job for component #{component}: #{err}\n"
+      end
+      msg += "Job submission was: #{body.to_json}"
+
+      pipeline.log_stderr(msg)
+      nil
+    end
+  end
+
+  protected
+
+  def self.no_nil_values(hash)
+    hash.reject { |key, value| value.nil? }
+  end
+end
+
+class WhRunPipelineInstance
+  attr_reader :instance
+
+  def initialize(_options)
+    @options = _options
+  end
+
+  def fetch_template(template)
+    if template.match /[^-0-9a-z]/
+      # Doesn't look like a uuid -- use it as a filename.
+      @template = JSON.parse File.read(template), :symbolize_names => true
+    else
+      result = $client.execute(:api_method => $arvados.pipeline_templates.get,
+                               :parameters => {
+                                 :uuid => template
+                               },
+                               :authenticated => false,
+                               :headers => {
+                                 authorization: 'OAuth2 '+ENV['ARVADOS_API_TOKEN']
+                               })
+      @template = JSON.parse result.body, :symbolize_names => true
+      if !@template[:uuid]
+        abort "#{$0}: fatal: failed to retrieve pipeline template #{template} #{@template[:errors].inspect rescue nil}"
+      end
+    end
+    self
+  end
+
+  def fetch_instance(instance_uuid)
+    @instance = PipelineInstance.find(instance_uuid)
+    @template = @instance
+    self
+  end
+
+  def apply_parameters(params_args)
+    params_args.shift if params_args[0] == '--'
+    params = {}
+    while !params_args.empty?
+      if (re = params_args[0].match /^(--)?([^-].*?)=(.+)/)
+        params[re[2]] = re[3]
+        params_args.shift
+      elsif params_args.size > 1
+        param = params_args.shift.sub /^--/, ''
+        params[param] = params_args.shift
+      else
+        abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: I do not know what to do with arg \"#{params_args[0]}\""
+      end
+    end
+
+    if not @template[:components].is_a?(Hash)
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: Template missing \"components\" hash"
+    end
+    @components = @template[:components].dup
+
+    bad_components = @components.each_pair.select do |cname, cspec|
+      not cspec.is_a?(Hash)
+    end
+    if bad_components.any?
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: Components not specified with hashes: #{bad_components.map(&:first).join(', ')}"
+    end
+
+    bad_components = @components.each_pair.select do |cname, cspec|
+      not cspec[:script_parameters].is_a?(Hash)
+    end
+    if bad_components.any?
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nSyntax error: Components missing \"script_parameters\" hashes: #{bad_components.map(&:first).join(', ')}"
+    end
+
+    errors = []
+    @components.each do |componentname, component|
+      component[:script_parameters].each do |parametername, parameter|
+        parameter = { :value => parameter } unless parameter.is_a? Hash
+        value =
+          (params["#{componentname}::#{parametername}"] ||
+           parameter[:value] ||
+           (parameter[:output_of].nil? &&
+            (params[parametername.to_s] ||
+             parameter[:default])) ||
+           nil)
+        if value.nil? and
+            ![false,'false',0,'0'].index parameter[:required]
+          if parameter[:output_of]
+            if not @components[parameter[:output_of].intern]
+              errors << [componentname, parametername, "output_of refers to nonexistent component '#{parameter[:output_of]}'"]
+            end
+            next
+          end
+          errors << [componentname, parametername, "required parameter is missing"]
+        end
+        debuglog "parameter #{componentname}::#{parametername} == #{value}"
+
+        component[:script_parameters][parametername] =
+          parameter.dup.merge(value: value)
+      end
+    end
+    if !errors.empty?
+      abort "\n#{Time.now} -- pipeline_template #{@template[:uuid]}\nErrors:\n#{errors.collect { |c,p,e| "#{c}::#{p} - #{e}\n" }.join ""}"
+    end
+    debuglog "options=" + @options.pretty_inspect
+    self
+  end
+
+  def setup_instance
+    if @instance
+      @instance[:properties][:run_options] ||= {}
+      if @options[:no_reuse]
+        # override properties of existing instance
+        @instance[:properties][:run_options][:enable_job_reuse] = false
+      else
+        # Default to "enable reuse" if not specified. (This code path
+        # can go away when old clients go away.)
+        if @instance[:properties][:run_options][:enable_job_reuse].nil?
+          @instance[:properties][:run_options][:enable_job_reuse] = true
+        end
+      end
+    else
+      description = $options[:description]
+      description = ("Created at #{Time.now.localtime}" + (@template[:name].andand.size.andand>0 ? " using the pipeline template *#{@template[:name]}*" : "")) if !description
+      @instance = PipelineInstance.
+        create(components: @components,
+               properties: {
+                 run_options: {
+                   enable_job_reuse: !@options[:no_reuse]
+                 }
+               },
+               pipeline_template_uuid: @template[:uuid],
+               description: description,
+               state: ($options[:submit] ? 'RunningOnServer' : 'RunningOnClient'))
+    end
+    self
+  end
+
+  def run
+    moretodo = true
+    interrupted = false
+
+    if @instance[:started_at].nil?
+      @instance[:started_at] = Time.now
+    end
+
+    job_creation_failed = 0
+    while moretodo
+      moretodo = false
+      @components.each do |cname, c|
+        job = nil
+        owner_uuid = @instance[:owner_uuid]
+        # Is the job satisfying this component already known to be
+        # finished? (Already meaning "before we query API server about
+        # the job's current state")
+        c_already_finished = (c[:job] &&
+                              c[:job][:uuid] &&
+                              ["Complete", "Failed", "Cancelled"].include?(c[:job][:state]))
+        if !c[:job] and
+            c[:script_parameters].select { |pname, p| p.is_a? Hash and p[:output_of]}.empty?
+          # No job yet associated with this component and is component inputs
+          # are fully specified (any output_of script_parameters are resolved
+          # to real value)
+          my_submit_id = "instance #{@instance[:uuid]} rand #{rand(2**64).to_s(36)}"
+          job = JobCache.create(@instance, cname, {
+            :script => c[:script],
+            :script_parameters => Hash[c[:script_parameters].map do |key, spec|
+                                         [key, spec[:value]]
+                                       end],
+            :script_version => c[:script_version],
+            :repository => c[:repository],
+            :nondeterministic => c[:nondeterministic],
+            :runtime_constraints => c[:runtime_constraints],
+            :owner_uuid => owner_uuid,
+            :is_locked_by_uuid => (@options[:run_jobs_here] ? owner_uuid : nil),
+            :submit_id => my_submit_id,
+            :state => (if @options[:run_jobs_here] then "Running" else "Queued" end)
+          }, {
+            # This is the right place to put these attributes when
+            # dealing with new API servers.
+            :minimum_script_version => c[:minimum_script_version],
+            :exclude_script_versions => c[:exclude_minimum_script_versions],
+            :find_or_create => (@instance[:properties][:run_options].andand[:enable_job_reuse] &&
+                                !c[:nondeterministic]),
+            :filters => c[:filters]
+          })
+          if job
+            debuglog "component #{cname} new job #{job[:uuid]}"
+            c[:job] = job
+            c[:run_in_process] = (@options[:run_jobs_here] and
+                                  job[:submit_id] == my_submit_id)
+          else
+            debuglog "component #{cname} new job failed", 0
+            job_creation_failed += 1
+          end
+        end
+
+        if c[:job] and c[:run_in_process] and not ["Complete", "Failed", "Cancelled"].include? c[:job][:state]
+          report_status
+          begin
+            require 'open3'
+            Open3.popen3("arv-crunch-job", "--force-unlock",
+                         "--job", c[:job][:uuid]) do |stdin, stdout, stderr, wait_thr|
+              debuglog "arv-crunch-job pid #{wait_thr.pid} started", 0
+              stdin.close
+              while true
+                rready, wready, = IO.select([stdout, stderr], [])
+                break if !rready[0]
+                begin
+                  buf = rready[0].read_nonblock(2**20)
+                rescue EOFError
+                  break
+                end
+                (rready[0] == stdout ? $stdout : $stderr).write(buf)
+              end
+              stdout.close
+              stderr.close
+              debuglog "arv-crunch-job pid #{wait_thr.pid} exit #{wait_thr.value.to_i}", 0
+            end
+            if not $arv.job.get(uuid: c[:job][:uuid])[:finished_at]
+              raise Exception.new("arv-crunch-job did not set finished_at.")
+            end
+          rescue Exception => e
+            debuglog "Interrupted (#{e}). Failing job.", 0
+            $arv.job.update(uuid: c[:job][:uuid],
+                            job: {
+                              state: "Failed"
+                            })
+          end
+        end
+
+        if c[:job] and c[:job][:uuid]
+          if ["Running", "Queued"].include?(c[:job][:state])
+            # Job is running (or may be soon) so update copy of job record
+            c[:job] = JobCache.get(c[:job][:uuid])
+          end
+
+          if c[:job][:state] == "Complete"
+            # Populate script_parameters of other components waiting for
+            # this job
+            @components.each do |c2name, c2|
+              c2[:script_parameters].each do |pname, p|
+                if p.is_a? Hash and p[:output_of] == cname.to_s
+                  debuglog "parameter #{c2name}::#{pname} == #{c[:job][:output]}"
+                  c2[:script_parameters][pname] = {value: c[:job][:output]}
+                  moretodo = true
+                end
+              end
+            end
+            unless c_already_finished
+              # This is my first time discovering that the job
+              # succeeded. (At the top of this loop, I was still
+              # waiting for it to finish.)
+
+              if @instance[:name].andand.length.andand > 0
+                pipeline_name = @instance[:name]
+              elsif @template.andand[:name].andand.length.andand > 0
+                pipeline_name = @template[:name]
+              else
+                pipeline_name = @instance[:uuid]
+              end
+              if c[:output_name] != false
+                # Create a collection located in the same project as the pipeline with the contents of the output.
+                portable_data_hash = c[:job][:output]
+                collections = $arv.collection.list(limit: 1,
+                                                   filters: [['portable_data_hash', '=', portable_data_hash]],
+                                                   select: ["portable_data_hash", "manifest_text"]
+                                                   )[:items]
+                if collections.any?
+                  name = c[:output_name] || "Output #{portable_data_hash[0..7]} of #{cname} of #{pipeline_name}"
+
+                  # check if there is a name collision.
+                  name_collisions = $arv.collection.list(filters: [["owner_uuid", "=", owner_uuid],
+                                                                   ["name", "=", name]])[:items]
+
+                  newcollection_actual = nil
+                  if name_collisions.any? and name_collisions.first[:portable_data_hash] == portable_data_hash
+                    # There is already a collection with the same name and the
+                    # same contents, so just point to that.
+                    newcollection_actual = name_collisions.first
+                  end
+
+                  if newcollection_actual.nil?
+                    # Did not find a collection with the same name (or the
+                    # collection has a different portable data hash) so create
+                    # a new collection with ensure_unique_name: true.
+                    newcollection = {
+                      owner_uuid: owner_uuid,
+                      name: name,
+                      portable_data_hash: collections.first[:portable_data_hash],
+                      manifest_text: collections.first[:manifest_text]
+                    }
+                    debuglog "Creating collection #{newcollection}", 0
+                    newcollection_actual = $arv.collection.create collection: newcollection, ensure_unique_name: true
+                  end
+
+                  c[:output_uuid] = newcollection_actual[:uuid]
+                else
+                  debuglog "Could not find a collection with portable data hash #{portable_data_hash}", 0
+                end
+              end
+            end
+          elsif ["Queued", "Running"].include? c[:job][:state]
+            # Job is running or queued to run, so indicate that pipeline
+            # should continue to run
+            moretodo = true
+          elsif c[:job][:state] == "Cancelled"
+            debuglog "component #{cname} job #{c[:job][:uuid]} cancelled."
+            moretodo = false
+          elsif c[:job][:state] == "Failed"
+            moretodo = false
+          end
+        end
+      end
+      @instance[:components] = @components
+      report_status
+
+      if @options[:no_wait]
+        moretodo = false
+      end
+
+      # If job creation fails, just give up on this pipeline instance.
+      if job_creation_failed > 0
+        moretodo = false
+      end
+
+      if moretodo
+        begin
+          sleep 10
+        rescue Interrupt
+          debuglog "interrupt", 0
+          interrupted = true
+          break
+        end
+      end
+    end
+
+    c_in_state = @components.values.group_by { |c|
+      c[:job] and c[:job][:state]
+    }
+    succeeded = c_in_state["Complete"].andand.count || 0
+    failed = (c_in_state["Failed"].andand.count || 0) + (c_in_state["Cancelled"].andand.count || 0)
+    ended = succeeded + failed
+
+    success = (succeeded == @components.length)
+
+    # A job create call failed. Just give up.
+    if job_creation_failed > 0
+      debuglog "job creation failed - giving up on this pipeline instance", 0
+      success = false
+      failed += 1
+    end
+
+    if interrupted
+     if success
+        @instance[:state] = 'Complete'
+     else
+        @instance[:state] = 'Paused'
+      end
+    else
+      if ended == @components.length or failed > 0
+        @instance[:state] = success ? 'Complete' : 'Failed'
+      end
+    end
+
+    if @instance[:finished_at].nil? and ['Complete', 'Failed'].include? @instance[:state]
+      @instance[:finished_at] = Time.now
+    end
+
+    debuglog "pipeline instance state is #{@instance[:state]}"
+
+    # set components_summary
+    components_summary = {"todo" => @components.length - ended, "done" => succeeded, "failed" => failed}
+    @instance[:components_summary] = components_summary
+
+    @instance.save
+  end
+
+  def cleanup
+    if @instance and @instance[:state] == 'RunningOnClient'
+      @instance[:state] = 'Paused'
+      @instance.save
+    end
+  end
+
+  def uuid
+    @instance[:uuid]
+  end
+
+  protected
+
+  def report_status
+    @instance.save
+
+    if @options[:status_json] != '/dev/null'
+      File.open(@options[:status_json], 'w') do |f|
+        f.puts @components.pretty_inspect
+      end
+    end
+
+    if @options[:status_text] != '/dev/null'
+      File.open(@options[:status_text], 'w') do |f|
+        f.puts ""
+        f.puts "#{Time.now} -- pipeline_instance #{@instance[:uuid]}"
+        namewidth = @components.collect { |cname, c| cname.size }.max
+        @components.each do |cname, c|
+          jstatus = if !c[:job]
+                      "-"
+                    else case c[:job][:state]
+                         when "Running"
+                           "#{c[:job][:tasks_summary].inspect}"
+                         when "Complete"
+                           c[:job][:output]
+                         when "Cancelled"
+                           "cancelled #{c[:job][:cancelled_at]}"
+                         when "Failed"
+                           "failed #{c[:job][:finished_at]}"
+                         when "Queued"
+                           "queued #{c[:job][:created_at]}"
+                         end
+                    end
+          f.puts "#{cname.to_s.ljust namewidth} #{c[:job] ? c[:job][:uuid] : '-'.ljust(27)} #{jstatus}"
+        end
+      end
+    end
+  end
+
+  def abort(msg)
+    if @instance
+      if ["New", "Ready", "RunningOnClient",
+          "RunningOnServer"].include?(@instance[:state])
+        @instance[:state] = "Failed"
+        @instance[:finished_at] = Time.now
+        @instance.save
+      end
+      @instance.log_stderr(msg)
+    end
+    Kernel::abort(msg)
+  end
+end
+
+runner = WhRunPipelineInstance.new($options)
+begin
+  if $options[:template]
+    runner.fetch_template($options[:template])
+  else
+    runner.fetch_instance($options[:instance])
+  end
+  runner.apply_parameters(p.leftovers)
+  runner.setup_instance
+  if $options[:submit]
+    runner.instance.save
+    puts runner.instance[:uuid]
+  else
+    runner.run
+  end
+rescue Exception => e
+  runner.cleanup
+  raise e
+end
diff --git a/sdk/cli/bin/arv-tag b/sdk/cli/bin/arv-tag
new file mode 100755 (executable)
index 0000000..e400dab
--- /dev/null
@@ -0,0 +1,235 @@
+#! /usr/bin/env ruby
+
+# arv tag usage:
+#   arv tag add tag1 [tag2 ...] --object obj_uuid1 [--object obj_uuid2 ...]
+#   arv tag remove tag1 [tag2 ...] --object obj_uuid1 [--object obj_uuid2 ...]
+#   arv tag remove tag1 [tag2 ...] --all
+
+def usage_string
+  return "\nUsage:\n" +
+    "arv tag add tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\n" +
+    "arv tag remove tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\n" +
+    "arv tag remove --all\n"
+end
+
+def usage
+  abort usage_string
+end
+
+def api_call(method, parameters:{}, request_body:{})
+  request_body[:api_token] = ENV['ARVADOS_API_TOKEN']
+  result = $client.execute(:api_method => method,
+                           :parameters => parameters,
+                           :body_object => request_body,
+                           :authenticated => false)
+
+  begin
+    results = JSON.parse result.body
+  rescue JSON::ParserError => e
+    abort "Failed to parse server response:\n" + e.to_s
+  end
+
+  if results["errors"]
+    abort "Error: #{results["errors"][0]}"
+  end
+
+  return results
+end
+
+def tag_add(tag, obj_uuid)
+  return api_call($arvados.links.create,
+                  request_body: {
+                    :link => {
+                      :name       => tag,
+                      :link_class => :tag,
+                      :head_uuid  => obj_uuid,
+                    }
+                  })
+end
+
+def tag_remove(tag, obj_uuids=nil)
+  # If we got a list of objects to untag, look up the uuids for the
+  # links that need to be deleted.
+  link_uuids = []
+  if obj_uuids
+    obj_uuids.each do |uuid|
+      link = api_call($arvados.links.list,
+                      request_body: {
+                        :where => {
+                          :link_class => :tag,
+                          :name => tag,
+                          :head_uuid => uuid,
+                        }
+                      })
+      if link['items_available'] > 0
+        link_uuids.push link['items'][0]['uuid']
+      end
+    end
+  else
+    all_tag_links = api_call($arvados.links.list,
+                             request_body: {
+                               :where => {
+                                 :link_class => :tag,
+                                 :name => tag,
+                               }
+                             })
+    link_uuids = all_tag_links['items'].map { |obj| obj['uuid'] }
+  end
+
+  results = []
+  if link_uuids
+    link_uuids.each do |uuid|
+      results.push api_call($arvados.links.delete, parameters:{ :uuid => uuid })
+    end
+  else
+    $stderr.puts "no tags found to remove"
+  end
+
+  return results
+end
+
+if RUBY_VERSION < '1.9.3' then
+  abort <<-EOS
+#{$0.gsub(/^\.\//,'')} requires Ruby version 1.9.3 or higher.
+EOS
+end
+
+$arvados_api_version = ENV['ARVADOS_API_VERSION'] || 'v1'
+$arvados_api_host = ENV['ARVADOS_API_HOST'] or
+  abort "#{$0}: fatal: ARVADOS_API_HOST environment variable not set."
+$arvados_api_token = ENV['ARVADOS_API_TOKEN'] or
+  abort "#{$0}: fatal: ARVADOS_API_TOKEN environment variable not set."
+$arvados_api_host_insecure = ENV['ARVADOS_API_HOST_INSECURE'] == 'yes'
+
+begin
+  require 'rubygems'
+  require 'google/api_client'
+  require 'json'
+  require 'pp'
+  require 'oj'
+  require 'trollop'
+rescue LoadError
+  abort <<-EOS
+#{$0}: fatal: some runtime dependencies are missing.
+Try: gem install pp google-api-client json trollop
+  EOS
+end
+
+def debuglog(message, verbosity=1)
+  $stderr.puts "#{File.split($0).last} #{$$}: #{message}" if $debuglevel >= verbosity
+end
+
+module Kernel
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    $VERBOSE = nil
+    result = yield
+    $VERBOSE = original_verbosity
+    return result
+  end
+end
+
+if $arvados_api_host_insecure or $arvados_api_host.match /local/
+  # You probably don't care about SSL certificate checks if you're
+  # testing with a dev server.
+  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }
+end
+
+class Google::APIClient
+  def discovery_document(api, version)
+    api = api.to_s
+    return @discovery_documents["#{api}:#{version}"] ||=
+      begin
+        response = self.execute!(
+                                 :http_method => :get,
+                                 :uri => self.discovery_uri(api, version),
+                                 :authenticated => false
+                                 )
+        response.body.class == String ? JSON.parse(response.body) : response.body
+      end
+  end
+end
+
+global_opts = Trollop::options do
+  banner usage_string
+  banner ""
+  opt :dry_run, "Don't actually do anything", :short => "-n"
+  opt :verbose, "Print some things on stderr", :short => "-v"
+  opt :uuid, "Return the UUIDs of the objects in the response, one per line (default)", :short => nil
+  opt :json, "Return the entire response received from the API server, as a JSON object", :short => "-j"
+  opt :human, "Return the response received from the API server, as a JSON object with whitespace added for human consumption", :short => "-h"
+  opt :pretty, "Synonym of --human", :short => nil
+  opt :yaml, "Return the response received from the API server, in YAML format", :short => "-y"
+  stop_on ['add', 'remove']
+end
+
+p = Trollop::Parser.new do
+  opt(:all,
+      "Remove this tag from all objects under your ownership. Only valid with `tag remove'.",
+      :short => :none)
+  opt(:object,
+      "The UUID of an object to which this tag operation should be applied.",
+      :type => :string,
+      :multi => true,
+      :short => :o)
+end
+
+$options = Trollop::with_standard_exception_handling p do
+  p.parse ARGV
+end
+
+if $options[:all] and ARGV[0] != 'remove'
+  usage
+end
+
+# Set up the API client.
+
+$client ||= Google::APIClient.
+  new(:host => $arvados_api_host,
+      :application_name => File.split($0).last,
+      :application_version => $application_version.to_s)
+$arvados = $client.discovered_api('arvados', $arvados_api_version)
+
+results = []
+cmd = ARGV.shift
+
+if ARGV.empty?
+  usage
+end
+
+case cmd
+when 'add'
+  ARGV.each do |tag|
+    $options[:object].each do |obj|
+      results.push(tag_add(tag, obj))
+    end
+  end
+when 'remove'
+  ARGV.each do |tag|
+    if $options[:all] then
+      results.concat tag_remove(tag)
+    else
+      results.concat tag_remove(tag, $options[:object])
+    end
+  end
+else
+  usage
+end
+
+if global_opts[:human] or global_opts[:pretty] then
+  puts Oj.dump(results, :indent => 1)
+elsif global_opts[:yaml] then
+  puts results.to_yaml
+elsif global_opts[:json] then
+  puts Oj.dump(results)
+else
+  results.each do |r|
+    if r['uuid'].nil?
+      abort("Response did not include a uuid:\n" +
+            Oj.dump(r, :indent => 1) +
+            "\n")
+    else
+      puts r['uuid']
+    end
+  end
+end
diff --git a/sdk/cli/bin/arv-ws b/sdk/cli/bin/arv-ws
new file mode 120000 (symlink)
index 0000000..622916b
--- /dev/null
@@ -0,0 +1 @@
+../../python/bin/arv-ws
\ No newline at end of file
diff --git a/sdk/cli/bin/crunch-job b/sdk/cli/bin/crunch-job
new file mode 100755 (executable)
index 0000000..3539a57
--- /dev/null
@@ -0,0 +1,1979 @@
+#!/usr/bin/perl
+# -*- mode: perl; perl-indent-level: 2; indent-tabs-mode: nil; -*-
+
+=head1 NAME
+
+crunch-job: Execute job steps, save snapshots as requested, collate output.
+
+=head1 SYNOPSIS
+
+Obtain job details from Arvados, run tasks on compute nodes (typically
+invoked by scheduler on controller):
+
+ crunch-job --job x-y-z --git-dir /path/to/repo/.git
+
+Obtain job details from command line, run tasks on local machine
+(typically invoked by application or developer on VM):
+
+ crunch-job --job '{"script_version":"/path/to/working/tree","script":"scriptname",...}'
+
+ crunch-job --job '{"repository":"https://github.com/curoverse/arvados.git","script_version":"master","script":"scriptname",...}'
+
+=head1 OPTIONS
+
+=over
+
+=item --force-unlock
+
+If the job is already locked, steal the lock and run it anyway.
+
+=item --git-dir
+
+Path to a .git directory (or a git URL) where the commit given in the
+job's C<script_version> attribute is to be found. If this is I<not>
+given, the job's C<repository> attribute will be used.
+
+=item --job-api-token
+
+Arvados API authorization token to use during the course of the job.
+
+=item --no-clear-tmp
+
+Do not clear per-job/task temporary directories during initial job
+setup. This can speed up development and debugging when running jobs
+locally.
+
+=item --job
+
+UUID of the job to run, or a JSON-encoded job resource without a
+UUID. If the latter is given, a new job object will be created.
+
+=back
+
+=head1 RUNNING JOBS LOCALLY
+
+crunch-job's log messages appear on stderr along with the job tasks'
+stderr streams. The log is saved in Keep at each checkpoint and when
+the job finishes.
+
+If the job succeeds, the job's output locator is printed on stdout.
+
+While the job is running, the following signals are accepted:
+
+=over
+
+=item control-C, SIGINT, SIGQUIT
+
+Save a checkpoint, terminate any job tasks that are running, and stop.
+
+=item SIGALRM
+
+Save a checkpoint and continue.
+
+=item SIGHUP
+
+Refresh node allocation (i.e., check whether any nodes have been added
+or unallocated) and attributes of the Job record that should affect
+behavior (e.g., cancel job if cancelled_at becomes non-nil).
+
+=back
+
+=cut
+
+
+use strict;
+use POSIX ':sys_wait_h';
+use POSIX qw(strftime);
+use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
+use Arvados;
+use Cwd qw(realpath);
+use Data::Dumper;
+use Digest::MD5 qw(md5_hex);
+use Getopt::Long;
+use IPC::Open2;
+use IO::Select;
+use File::Temp;
+use Fcntl ':flock';
+use File::Path qw( make_path remove_tree );
+
+use constant EX_TEMPFAIL => 75;
+
+$ENV{"TMPDIR"} ||= "/tmp";
+unless (defined $ENV{"CRUNCH_TMP"}) {
+  $ENV{"CRUNCH_TMP"} = $ENV{"TMPDIR"} . "/crunch-job";
+  if ($ENV{"USER"} ne "crunch" && $< != 0) {
+    # use a tmp dir unique for my uid
+    $ENV{"CRUNCH_TMP"} .= "-$<";
+  }
+}
+
+# Create the tmp directory if it does not exist
+if ( ! -d $ENV{"CRUNCH_TMP"} ) {
+  make_path $ENV{"CRUNCH_TMP"} or die "Failed to create temporary working directory: " . $ENV{"CRUNCH_TMP"};
+}
+
+$ENV{"JOB_WORK"} = $ENV{"CRUNCH_TMP"} . "/work";
+$ENV{"CRUNCH_INSTALL"} = "$ENV{CRUNCH_TMP}/opt";
+$ENV{"CRUNCH_WORK"} = $ENV{"JOB_WORK"}; # deprecated
+mkdir ($ENV{"JOB_WORK"});
+
+my $force_unlock;
+my $git_dir;
+my $jobspec;
+my $job_api_token;
+my $no_clear_tmp;
+my $resume_stash;
+GetOptions('force-unlock' => \$force_unlock,
+           'git-dir=s' => \$git_dir,
+           'job=s' => \$jobspec,
+           'job-api-token=s' => \$job_api_token,
+           'no-clear-tmp' => \$no_clear_tmp,
+           'resume-stash=s' => \$resume_stash,
+    );
+
+if (defined $job_api_token) {
+  $ENV{ARVADOS_API_TOKEN} = $job_api_token;
+}
+
+my $have_slurm = exists $ENV{SLURM_JOBID} && exists $ENV{SLURM_NODELIST};
+my $local_job = 0;
+
+
+$SIG{'USR1'} = sub
+{
+  $main::ENV{CRUNCH_DEBUG} = 1;
+};
+$SIG{'USR2'} = sub
+{
+  $main::ENV{CRUNCH_DEBUG} = 0;
+};
+
+
+
+my $arv = Arvados->new('apiVersion' => 'v1');
+
+my $Job;
+my $job_id;
+my $dbh;
+my $sth;
+my @jobstep;
+
+my $User = api_call("users/current");
+
+if ($jobspec =~ /^[-a-z\d]+$/)
+{
+  # $jobspec is an Arvados UUID, not a JSON job specification
+  $Job = api_call("jobs/get", uuid => $jobspec);
+  if (!$force_unlock) {
+    # Claim this job, and make sure nobody else does
+    eval { api_call("jobs/lock", uuid => $Job->{uuid}); };
+    if ($@) {
+      Log(undef, "Error while locking job, exiting ".EX_TEMPFAIL);
+      exit EX_TEMPFAIL;
+    };
+  }
+}
+else
+{
+  $Job = JSON::decode_json($jobspec);
+
+  if (!$resume_stash)
+  {
+    map { croak ("No $_ specified") unless $Job->{$_} }
+    qw(script script_version script_parameters);
+  }
+
+  $Job->{'is_locked_by_uuid'} = $User->{'uuid'};
+  $Job->{'started_at'} = gmtime;
+  $Job->{'state'} = 'Running';
+
+  $Job = api_call("jobs/create", job => $Job);
+}
+$job_id = $Job->{'uuid'};
+
+my $keep_logfile = $job_id . '.log.txt';
+log_writer_start($keep_logfile);
+
+$Job->{'runtime_constraints'} ||= {};
+$Job->{'runtime_constraints'}->{'max_tasks_per_node'} ||= 0;
+my $max_ncpus = $Job->{'runtime_constraints'}->{'max_tasks_per_node'};
+
+my $gem_versions = `gem list --quiet arvados-cli 2>/dev/null`;
+if ($? == 0) {
+  $gem_versions =~ s/^arvados-cli \(/ with arvados-cli Gem version(s) /;
+  chomp($gem_versions);
+  chop($gem_versions);  # Closing parentheses
+} else {
+  $gem_versions = "";
+}
+Log(undef,
+    "running from " . ((-e $0) ? realpath($0) : "stdin") . $gem_versions);
+
+Log (undef, "check slurm allocation");
+my @slot;
+my @node;
+# Should use $ENV{SLURM_TASKS_PER_NODE} instead of sinfo? (eg. "4(x3),2,4(x2)")
+my @sinfo;
+if (!$have_slurm)
+{
+  my $localcpus = 0 + `grep -cw ^processor /proc/cpuinfo` || 1;
+  push @sinfo, "$localcpus localhost";
+}
+if (exists $ENV{SLURM_NODELIST})
+{
+  push @sinfo, `sinfo -h --format='%c %N' --nodes=\Q$ENV{SLURM_NODELIST}\E`;
+}
+foreach (@sinfo)
+{
+  my ($ncpus, $slurm_nodelist) = split;
+  $ncpus = $max_ncpus if $max_ncpus && $ncpus > $max_ncpus;
+
+  my @nodelist;
+  while ($slurm_nodelist =~ s/^([^\[,]+?(\[.*?\])?)(,|$)//)
+  {
+    my $nodelist = $1;
+    if ($nodelist =~ /\[((\d+)(-(\d+))?(,(\d+)(-(\d+))?)*)\]/)
+    {
+      my $ranges = $1;
+      foreach (split (",", $ranges))
+      {
+       my ($a, $b);
+       if (/(\d+)-(\d+)/)
+       {
+         $a = $1;
+         $b = $2;
+       }
+       else
+       {
+         $a = $_;
+         $b = $_;
+       }
+       push @nodelist, map {
+         my $n = $nodelist;
+         $n =~ s/\[[-,\d]+\]/$_/;
+         $n;
+       } ($a..$b);
+      }
+    }
+    else
+    {
+      push @nodelist, $nodelist;
+    }
+  }
+  foreach my $nodename (@nodelist)
+  {
+    Log (undef, "node $nodename - $ncpus slots");
+    my $node = { name => $nodename,
+                ncpus => $ncpus,
+                losing_streak => 0,
+                hold_until => 0 };
+    foreach my $cpu (1..$ncpus)
+    {
+      push @slot, { node => $node,
+                   cpu => $cpu };
+    }
+  }
+  push @node, @nodelist;
+}
+
+
+
+# Ensure that we get one jobstep running on each allocated node before
+# we start overloading nodes with concurrent steps
+
+@slot = sort { $a->{cpu} <=> $b->{cpu} } @slot;
+
+
+$Job->update_attributes(
+  'tasks_summary' => { 'failed' => 0,
+                       'todo' => 1,
+                       'running' => 0,
+                       'done' => 0 });
+
+Log (undef, "start");
+$SIG{'INT'} = sub { $main::please_freeze = 1; };
+$SIG{'QUIT'} = sub { $main::please_freeze = 1; };
+$SIG{'TERM'} = \&croak;
+$SIG{'TSTP'} = sub { $main::please_freeze = 1; };
+$SIG{'ALRM'} = sub { $main::please_info = 1; };
+$SIG{'CONT'} = sub { $main::please_continue = 1; };
+$SIG{'HUP'} = sub { $main::please_refresh = 1; };
+
+$main::please_freeze = 0;
+$main::please_info = 0;
+$main::please_continue = 0;
+$main::please_refresh = 0;
+my $jobsteps_must_output_keys = 0;     # becomes 1 when any task outputs a key
+
+grep { $ENV{$1} = $2 if /^(NOCACHE.*?)=(.*)/ } split ("\n", $$Job{knobs});
+$ENV{"CRUNCH_JOB_UUID"} = $job_id;
+$ENV{"JOB_UUID"} = $job_id;
+
+
+my @jobstep_todo = ();
+my @jobstep_done = ();
+my @jobstep_tomerge = ();
+my $jobstep_tomerge_level = 0;
+my $squeue_checked;
+my $squeue_kill_checked;
+my $latest_refresh = scalar time;
+
+
+
+if (defined $Job->{thawedfromkey})
+{
+  thaw ($Job->{thawedfromkey});
+}
+else
+{
+  my $first_task = api_call("job_tasks/create", job_task => {
+    'job_uuid' => $Job->{'uuid'},
+    'sequence' => 0,
+    'qsequence' => 0,
+    'parameters' => {},
+  });
+  push @jobstep, { 'level' => 0,
+                  'failures' => 0,
+                   'arvados_task' => $first_task,
+                };
+  push @jobstep_todo, 0;
+}
+
+
+if (!$have_slurm)
+{
+  must_lock_now("$ENV{CRUNCH_TMP}/.lock", "a job is already running here.");
+}
+
+my $build_script = handle_readall(\*DATA);
+my $nodelist = join(",", @node);
+my $git_tar_count = 0;
+
+if (!defined $no_clear_tmp) {
+  # Clean out crunch_tmp/work, crunch_tmp/opt, crunch_tmp/src*
+  Log (undef, "Clean work dirs");
+
+  my $cleanpid = fork();
+  if ($cleanpid == 0)
+  {
+    srun (["srun", "--nodelist=$nodelist", "-D", $ENV{'TMPDIR'}],
+          ['bash', '-c', 'if mount | grep -q $JOB_WORK/; then for i in $JOB_WORK/*keep $CRUNCH_TMP/task/*.keep; do /bin/fusermount -z -u $i; done; fi; sleep 1; rm -rf $JOB_WORK $CRUNCH_INSTALL $CRUNCH_TMP/task $CRUNCH_TMP/src*']);
+    exit (1);
+  }
+  while (1)
+  {
+    last if $cleanpid == waitpid (-1, WNOHANG);
+    freeze_if_want_freeze ($cleanpid);
+    select (undef, undef, undef, 0.1);
+  }
+  Log (undef, "Cleanup command exited ".exit_status_s($?));
+}
+
+# If this job requires a Docker image, install that.
+my $docker_bin = "/usr/bin/docker.io";
+my ($docker_locator, $docker_stream, $docker_hash);
+if ($docker_locator = $Job->{docker_image_locator}) {
+  ($docker_stream, $docker_hash) = find_docker_image($docker_locator);
+  if (!$docker_hash)
+  {
+    croak("No Docker image hash found from locator $docker_locator");
+  }
+  $docker_stream =~ s/^\.//;
+  my $docker_install_script = qq{
+if ! $docker_bin images -q --no-trunc | grep -qxF \Q$docker_hash\E; then
+    arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load
+fi
+};
+  my $docker_pid = fork();
+  if ($docker_pid == 0)
+  {
+    srun (["srun", "--nodelist=" . join(',', @node)],
+          ["/bin/sh", "-ec", $docker_install_script]);
+    exit ($?);
+  }
+  while (1)
+  {
+    last if $docker_pid == waitpid (-1, WNOHANG);
+    freeze_if_want_freeze ($docker_pid);
+    select (undef, undef, undef, 0.1);
+  }
+  if ($? != 0)
+  {
+    croak("Installing Docker image from $docker_locator exited "
+          .exit_status_s($?));
+  }
+
+  if ($Job->{arvados_sdk_version}) {
+    # The job also specifies an Arvados SDK version.  Add the SDKs to the
+    # tar file for the build script to install.
+    Log(undef, sprintf("Packing Arvados SDK version %s for installation",
+                       $Job->{arvados_sdk_version}));
+    add_git_archive("git", "--git-dir=$git_dir", "archive",
+                    "--prefix=.arvados.sdk/",
+                    $Job->{arvados_sdk_version}, "sdk");
+  }
+}
+
+if (!defined $git_dir && $Job->{'script_version'} =~ m{^/}) {
+  # If script_version looks like an absolute path, *and* the --git-dir
+  # argument was not given -- which implies we were not invoked by
+  # crunch-dispatch -- we will use the given path as a working
+  # directory instead of resolving script_version to a git commit (or
+  # doing anything else with git).
+  $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{'script_version'};
+  $ENV{"CRUNCH_SRC"} = $Job->{'script_version'};
+}
+else {
+  # Resolve the given script_version to a git commit sha1. Also, if
+  # the repository is remote, clone it into our local filesystem: this
+  # ensures "git archive" will work, and is necessary to reliably
+  # resolve a symbolic script_version like "master^".
+  $ENV{"CRUNCH_SRC"} = "$ENV{CRUNCH_TMP}/src";
+
+  Log (undef, "Looking for version ".$Job->{script_version}." from repository ".$Job->{repository});
+
+  $ENV{"CRUNCH_SRC_COMMIT"} = $Job->{script_version};
+
+  # If we're running under crunch-dispatch, it will have already
+  # pulled the appropriate source tree into its own repository, and
+  # given us that repo's path as $git_dir.
+  #
+  # If we're running a "local" job, we might have to fetch content
+  # from a remote repository.
+  #
+  # (Currently crunch-dispatch gives a local path with --git-dir, but
+  # we might as well accept URLs there too in case it changes its
+  # mind.)
+  my $repo = $git_dir || $Job->{'repository'};
+
+  # Repository can be remote or local. If remote, we'll need to fetch it
+  # to a local dir before doing `git log` et al.
+  my $repo_location;
+
+  if ($repo =~ m{://|^[^/]*:}) {
+    # $repo is a git url we can clone, like git:// or https:// or
+    # file:/// or [user@]host:repo.git. Note "user/name@host:foo" is
+    # not recognized here because distinguishing that from a local
+    # path is too fragile. If you really need something strange here,
+    # use the ssh:// form.
+    $repo_location = 'remote';
+  } elsif ($repo =~ m{^\.*/}) {
+    # $repo is a local path to a git index. We'll also resolve ../foo
+    # to ../foo/.git if the latter is a directory. To help
+    # disambiguate local paths from named hosted repositories, this
+    # form must be given as ./ or ../ if it's a relative path.
+    if (-d "$repo/.git") {
+      $repo = "$repo/.git";
+    }
+    $repo_location = 'local';
+  } else {
+    # $repo is none of the above. It must be the name of a hosted
+    # repository.
+    my $arv_repo_list = api_call("repositories/list",
+                                 'filters' => [['name','=',$repo]]);
+    my @repos_found = @{$arv_repo_list->{'items'}};
+    my $n_found = $arv_repo_list->{'serverResponse'}->{'items_available'};
+    if ($n_found > 0) {
+      Log(undef, "Repository '$repo' -> "
+          . join(", ", map { $_->{'uuid'} } @repos_found));
+    }
+    if ($n_found != 1) {
+      croak("Error: Found $n_found repositories with name '$repo'.");
+    }
+    $repo = $repos_found[0]->{'fetch_url'};
+    $repo_location = 'remote';
+  }
+  Log(undef, "Using $repo_location repository '$repo'");
+  $ENV{"CRUNCH_SRC_URL"} = $repo;
+
+  # Resolve given script_version (we'll call that $treeish here) to a
+  # commit sha1 ($commit).
+  my $treeish = $Job->{'script_version'};
+  my $commit;
+  if ($repo_location eq 'remote') {
+    # We minimize excess object-fetching by re-using the same bare
+    # repository in CRUNCH_TMP/.git for multiple crunch-jobs -- we
+    # just keep adding remotes to it as needed.
+    my $local_repo = $ENV{'CRUNCH_TMP'}."/.git";
+    my $gitcmd = "git --git-dir=\Q$local_repo\E";
+
+    # Set up our local repo for caching remote objects, making
+    # archives, etc.
+    if (!-d $local_repo) {
+      make_path($local_repo) or croak("Error: could not create $local_repo");
+    }
+    # This works (exits 0 and doesn't delete fetched objects) even
+    # if $local_repo is already initialized:
+    `$gitcmd init --bare`;
+    if ($?) {
+      croak("Error: $gitcmd init --bare exited ".exit_status_s($?));
+    }
+
+    # If $treeish looks like a hash (or abbrev hash) we look it up in
+    # our local cache first, since that's cheaper. (We don't want to
+    # do that with tags/branches though -- those change over time, so
+    # they should always be resolved by the remote repo.)
+    if ($treeish =~ /^[0-9a-f]{7,40}$/s) {
+      # Hide stderr because it's normal for this to fail:
+      my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E 2>/dev/null`;
+      if ($? == 0 &&
+          # Careful not to resolve a branch named abcdeff to commit 1234567:
+          $sha1 =~ /^$treeish/ &&
+          $sha1 =~ /^([0-9a-f]{40})$/s) {
+        $commit = $1;
+        Log(undef, "Commit $commit already present in $local_repo");
+      }
+    }
+
+    if (!defined $commit) {
+      # If $treeish isn't just a hash or abbrev hash, or isn't here
+      # yet, we need to fetch the remote to resolve it correctly.
+
+      # First, remove all local heads. This prevents a name that does
+      # not exist on the remote from resolving to (or colliding with)
+      # a previously fetched branch or tag (possibly from a different
+      # remote).
+      remove_tree("$local_repo/refs/heads", {keep_root => 1});
+
+      Log(undef, "Fetching objects from $repo to $local_repo");
+      `$gitcmd fetch --no-progress --tags ''\Q$repo\E \Q+refs/heads/*:refs/heads/*\E`;
+      if ($?) {
+        croak("Error: `$gitcmd fetch` exited ".exit_status_s($?));
+      }
+    }
+
+    # Now that the data is all here, we will use our local repo for
+    # the rest of our git activities.
+    $repo = $local_repo;
+  }
+
+  my $gitcmd = "git --git-dir=\Q$repo\E";
+  my $sha1 = `$gitcmd rev-list -n1 ''\Q$treeish\E`;
+  unless ($? == 0 && $sha1 =~ /^([0-9a-f]{40})$/) {
+    croak("`$gitcmd rev-list` exited "
+          .exit_status_s($?)
+          .", '$treeish' not found. Giving up.");
+  }
+  $commit = $1;
+  Log(undef, "Version $treeish is commit $commit");
+
+  if ($commit ne $Job->{'script_version'}) {
+    # Record the real commit id in the database, frozentokey, logs,
+    # etc. -- instead of an abbreviation or a branch name which can
+    # become ambiguous or point to a different commit in the future.
+    if (!$Job->update_attributes('script_version' => $commit)) {
+      croak("Error: failed to update job's script_version attribute");
+    }
+  }
+
+  $ENV{"CRUNCH_SRC_COMMIT"} = $commit;
+  add_git_archive("$gitcmd archive ''\Q$commit\E");
+}
+
+my $git_archive = combined_git_archive();
+if (!defined $git_archive) {
+  Log(undef, "Skip install phase (no git archive)");
+  if ($have_slurm) {
+    Log(undef, "Warning: This probably means workers have no source tree!");
+  }
+}
+else {
+  Log(undef, "Run install script on all workers");
+
+  my @srunargs = ("srun",
+                  "--nodelist=$nodelist",
+                  "-D", $ENV{'TMPDIR'}, "--job-name=$job_id");
+  my @execargs = ("sh", "-c",
+                  "mkdir -p $ENV{CRUNCH_INSTALL} && cd $ENV{CRUNCH_TMP} && perl -");
+
+  my $installpid = fork();
+  if ($installpid == 0)
+  {
+    srun (\@srunargs, \@execargs, {}, $build_script . $git_archive);
+    exit (1);
+  }
+  while (1)
+  {
+    last if $installpid == waitpid (-1, WNOHANG);
+    freeze_if_want_freeze ($installpid);
+    select (undef, undef, undef, 0.1);
+  }
+  my $install_exited = $?;
+  Log (undef, "Install script exited ".exit_status_s($install_exited));
+  foreach my $tar_filename (map { tar_filename_n($_); } (1..$git_tar_count)) {
+    unlink($tar_filename);
+  }
+  exit (1) if $install_exited != 0;
+}
+
+foreach (qw (script script_version script_parameters runtime_constraints))
+{
+  Log (undef,
+       "$_ " .
+       (ref($Job->{$_}) ? JSON::encode_json($Job->{$_}) : $Job->{$_}));
+}
+foreach (split (/\n/, $Job->{knobs}))
+{
+  Log (undef, "knob " . $_);
+}
+
+
+
+$main::success = undef;
+
+
+
+ONELEVEL:
+
+my $thisround_succeeded = 0;
+my $thisround_failed = 0;
+my $thisround_failed_multiple = 0;
+
+@jobstep_todo = sort { $jobstep[$a]->{level} <=> $jobstep[$b]->{level}
+                      or $a <=> $b } @jobstep_todo;
+my $level = $jobstep[$jobstep_todo[0]]->{level};
+Log (undef, "start level $level");
+
+
+
+my %proc;
+my @freeslot = (0..$#slot);
+my @holdslot;
+my %reader;
+my $progress_is_dirty = 1;
+my $progress_stats_updated = 0;
+
+update_progress_stats();
+
+
+
+THISROUND:
+for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++)
+{
+  my $id = $jobstep_todo[$todo_ptr];
+  my $Jobstep = $jobstep[$id];
+  if ($Jobstep->{level} != $level)
+  {
+    next;
+  }
+
+  pipe $reader{$id}, "writer" or croak ($!);
+  my $flags = fcntl ($reader{$id}, F_GETFL, 0) or croak ($!);
+  fcntl ($reader{$id}, F_SETFL, $flags | O_NONBLOCK) or croak ($!);
+
+  my $childslot = $freeslot[0];
+  my $childnode = $slot[$childslot]->{node};
+  my $childslotname = join (".",
+                           $slot[$childslot]->{node}->{name},
+                           $slot[$childslot]->{cpu});
+  my $childpid = fork();
+  if ($childpid == 0)
+  {
+    $SIG{'INT'} = 'DEFAULT';
+    $SIG{'QUIT'} = 'DEFAULT';
+    $SIG{'TERM'} = 'DEFAULT';
+
+    foreach (values (%reader))
+    {
+      close($_);
+    }
+    fcntl ("writer", F_SETFL, 0) or croak ($!); # no close-on-exec
+    open(STDOUT,">&writer");
+    open(STDERR,">&writer");
+
+    undef $dbh;
+    undef $sth;
+
+    delete $ENV{"GNUPGHOME"};
+    $ENV{"TASK_UUID"} = $Jobstep->{'arvados_task'}->{'uuid'};
+    $ENV{"TASK_QSEQUENCE"} = $id;
+    $ENV{"TASK_SEQUENCE"} = $level;
+    $ENV{"JOB_SCRIPT"} = $Job->{script};
+    while (my ($param, $value) = each %{$Job->{script_parameters}}) {
+      $param =~ tr/a-z/A-Z/;
+      $ENV{"JOB_PARAMETER_$param"} = $value;
+    }
+    $ENV{"TASK_SLOT_NODE"} = $slot[$childslot]->{node}->{name};
+    $ENV{"TASK_SLOT_NUMBER"} = $slot[$childslot]->{cpu};
+    $ENV{"TASK_WORK"} = $ENV{"CRUNCH_TMP"}."/task/$childslotname";
+    $ENV{"HOME"} = $ENV{"TASK_WORK"};
+    $ENV{"TASK_KEEPMOUNT"} = $ENV{"TASK_WORK"}.".keep";
+    $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
+    $ENV{"CRUNCH_NODE_SLOTS"} = $slot[$childslot]->{node}->{ncpus};
+    $ENV{"PATH"} = $ENV{"CRUNCH_INSTALL"} . "/bin:" . $ENV{"PATH"};
+
+    $ENV{"GZIP"} = "-n";
+
+    my @srunargs = (
+      "srun",
+      "--nodelist=".$childnode->{name},
+      qw(-n1 -c1 -N1 -D), $ENV{'TMPDIR'},
+      "--job-name=$job_id.$id.$$",
+       );
+    my $command =
+       "if [ -e $ENV{TASK_WORK} ]; then rm -rf $ENV{TASK_WORK}; fi; "
+        ."mkdir -p $ENV{CRUNCH_TMP} $ENV{JOB_WORK} $ENV{TASK_WORK} $ENV{TASK_KEEPMOUNT} "
+       ."&& cd $ENV{CRUNCH_TMP} ";
+    $command .= "&& exec arv-mount --by-id --allow-other $ENV{TASK_KEEPMOUNT} --exec ";
+    if ($docker_hash)
+    {
+      my $cidfile = "$ENV{CRUNCH_TMP}/$ENV{TASK_UUID}.cid";
+      $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 ";
+      $command .= "$docker_bin run --rm=true --attach=stdout --attach=stderr --attach=stdin -i --user=crunch --cidfile=$cidfile --sig-proxy ";
+
+      # Dynamically configure the container to use the host system as its
+      # DNS server.  Get the host's global addresses from the ip command,
+      # and turn them into docker --dns options using gawk.
+      $command .=
+          q{$(ip -o address show scope global |
+              gawk 'match($4, /^([0-9\.:]+)\//, x){print "--dns", x[1]}') };
+
+      # The source tree and $destdir directory (which we have
+      # installed on the worker host) are available in the container,
+      # under the same path.
+      $command .= "--volume=\Q$ENV{CRUNCH_SRC}:$ENV{CRUNCH_SRC}:ro\E ";
+      $command .= "--volume=\Q$ENV{CRUNCH_INSTALL}:$ENV{CRUNCH_INSTALL}:ro\E ";
+
+      # Currently, we make arv-mount's mount point appear at /keep
+      # inside the container (instead of using the same path as the
+      # host like we do with CRUNCH_SRC and CRUNCH_INSTALL). However,
+      # crunch scripts and utilities must not rely on this. They must
+      # use $TASK_KEEPMOUNT.
+      $command .= "--volume=\Q$ENV{TASK_KEEPMOUNT}:/keep:ro\E ";
+      $ENV{TASK_KEEPMOUNT} = "/keep";
+
+      # TASK_WORK is almost exactly like a docker data volume: it
+      # starts out empty, is writable, and persists until no
+      # containers use it any more. We don't use --volumes-from to
+      # share it with other containers: it is only accessible to this
+      # task, and it goes away when this task stops.
+      #
+      # However, a docker data volume is writable only by root unless
+      # the mount point already happens to exist in the container with
+      # different permissions. Therefore, we [1] assume /tmp already
+      # exists in the image and is writable by the crunch user; [2]
+      # avoid putting TASK_WORK inside CRUNCH_TMP (which won't be
+      # writable if they are created by docker while setting up the
+      # other --volumes); and [3] create $TASK_WORK inside the
+      # container using $build_script.
+      $command .= "--volume=/tmp ";
+      $ENV{"TASK_WORK"} = "/tmp/crunch-job-task-work/$childslotname";
+      $ENV{"HOME"} = $ENV{"TASK_WORK"};
+      $ENV{"TASK_TMPDIR"} = $ENV{"TASK_WORK"}; # deprecated
+
+      # TODO: Share a single JOB_WORK volume across all task
+      # containers on a given worker node, and delete it when the job
+      # ends (and, in case that doesn't work, when the next job
+      # starts).
+      #
+      # For now, use the same approach as TASK_WORK above.
+      $ENV{"JOB_WORK"} = "/tmp/crunch-job-work";
+
+      while (my ($env_key, $env_val) = each %ENV)
+      {
+        if ($env_key =~ /^(ARVADOS|CRUNCH|JOB|TASK)_/) {
+          $command .= "--env=\Q$env_key=$env_val\E ";
+        }
+      }
+      $command .= "--env=\QHOME=$ENV{HOME}\E ";
+      $command .= "\Q$docker_hash\E ";
+      $command .= "stdbuf --output=0 --error=0 ";
+      $command .= "perl - $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+    } else {
+      # Non-docker run
+      $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 ";
+      $command .= "stdbuf --output=0 --error=0 ";
+      $command .= "perl - $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"};
+    }
+
+    my @execargs = ('bash', '-c', $command);
+    srun (\@srunargs, \@execargs, undef, $build_script);
+    # exec() failed, we assume nothing happened.
+    die "srun() failed on build script\n";
+  }
+  close("writer");
+  if (!defined $childpid)
+  {
+    close $reader{$id};
+    delete $reader{$id};
+    next;
+  }
+  shift @freeslot;
+  $proc{$childpid} = { jobstep => $id,
+                      time => time,
+                      slot => $childslot,
+                      jobstepname => "$job_id.$id.$childpid",
+                    };
+  croak ("assert failed: \$slot[$childslot]->{'pid'} exists") if exists $slot[$childslot]->{pid};
+  $slot[$childslot]->{pid} = $childpid;
+
+  Log ($id, "job_task ".$Jobstep->{'arvados_task'}->{'uuid'});
+  Log ($id, "child $childpid started on $childslotname");
+  $Jobstep->{starttime} = time;
+  $Jobstep->{node} = $childnode->{name};
+  $Jobstep->{slotindex} = $childslot;
+  delete $Jobstep->{stderr};
+  delete $Jobstep->{finishtime};
+
+  $Jobstep->{'arvados_task'}->{started_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{starttime});
+  $Jobstep->{'arvados_task'}->save;
+
+  splice @jobstep_todo, $todo_ptr, 1;
+  --$todo_ptr;
+
+  $progress_is_dirty = 1;
+
+  while (!@freeslot
+        ||
+        (@slot > @freeslot && $todo_ptr+1 > $#jobstep_todo))
+  {
+    last THISROUND if $main::please_freeze;
+    if ($main::please_info)
+    {
+      $main::please_info = 0;
+      freeze();
+      create_output_collection();
+      save_meta(1);
+      update_progress_stats();
+    }
+    my $gotsome
+       = readfrompipes ()
+       + reapchildren ();
+    if (!$gotsome)
+    {
+      check_refresh_wanted();
+      check_squeue();
+      update_progress_stats();
+      select (undef, undef, undef, 0.1);
+    }
+    elsif (time - $progress_stats_updated >= 30)
+    {
+      update_progress_stats();
+    }
+    if (($thisround_failed_multiple >= 8 && $thisround_succeeded == 0) ||
+       ($thisround_failed_multiple >= 16 && $thisround_failed_multiple > $thisround_succeeded))
+    {
+      my $message = "Repeated failure rate too high ($thisround_failed_multiple/"
+         .($thisround_failed+$thisround_succeeded)
+         .") -- giving up on this round";
+      Log (undef, $message);
+      last THISROUND;
+    }
+
+    # move slots from freeslot to holdslot (or back to freeslot) if necessary
+    for (my $i=$#freeslot; $i>=0; $i--) {
+      if ($slot[$freeslot[$i]]->{node}->{hold_until} > scalar time) {
+       push @holdslot, (splice @freeslot, $i, 1);
+      }
+    }
+    for (my $i=$#holdslot; $i>=0; $i--) {
+      if ($slot[$holdslot[$i]]->{node}->{hold_until} <= scalar time) {
+       push @freeslot, (splice @holdslot, $i, 1);
+      }
+    }
+
+    # give up if no nodes are succeeding
+    if (!grep { $_->{node}->{losing_streak} == 0 &&
+                    $_->{node}->{hold_count} < 4 } @slot) {
+      my $message = "Every node has failed -- giving up on this round";
+      Log (undef, $message);
+      last THISROUND;
+    }
+  }
+}
+
+
+push @freeslot, splice @holdslot;
+map { $slot[$freeslot[$_]]->{node}->{losing_streak} = 0 } (0..$#freeslot);
+
+
+Log (undef, "wait for last ".(scalar keys %proc)." children to finish");
+while (%proc)
+{
+  if ($main::please_continue) {
+    $main::please_continue = 0;
+    goto THISROUND;
+  }
+  $main::please_info = 0, freeze(), create_output_collection(), save_meta(1) if $main::please_info;
+  readfrompipes ();
+  if (!reapchildren())
+  {
+    check_refresh_wanted();
+    check_squeue();
+    update_progress_stats();
+    select (undef, undef, undef, 0.1);
+    killem (keys %proc) if $main::please_freeze;
+  }
+}
+
+update_progress_stats();
+freeze_if_want_freeze();
+
+
+if (!defined $main::success)
+{
+  if (@jobstep_todo &&
+      $thisround_succeeded == 0 &&
+      ($thisround_failed == 0 || $thisround_failed > 4))
+  {
+    my $message = "stop because $thisround_failed tasks failed and none succeeded";
+    Log (undef, $message);
+    $main::success = 0;
+  }
+  if (!@jobstep_todo)
+  {
+    $main::success = 1;
+  }
+}
+
+goto ONELEVEL if !defined $main::success;
+
+
+release_allocation();
+freeze();
+my $collated_output = &create_output_collection();
+
+if (!$collated_output) {
+  Log (undef, "Failed to write output collection");
+}
+else {
+  Log(undef, "output hash " . $collated_output);
+  $Job->update_attributes('output' => $collated_output);
+}
+
+Log (undef, "finish");
+
+save_meta();
+
+my $final_state;
+if ($collated_output && $main::success) {
+  $final_state = 'Complete';
+} else {
+  $final_state = 'Failed';
+}
+$Job->update_attributes('state' => $final_state);
+
+exit (($final_state eq 'Complete') ? 0 : 1);
+
+
+
+sub update_progress_stats
+{
+  $progress_stats_updated = time;
+  return if !$progress_is_dirty;
+  my ($todo, $done, $running) = (scalar @jobstep_todo,
+                                scalar @jobstep_done,
+                                scalar @slot - scalar @freeslot - scalar @holdslot);
+  $Job->{'tasks_summary'} ||= {};
+  $Job->{'tasks_summary'}->{'todo'} = $todo;
+  $Job->{'tasks_summary'}->{'done'} = $done;
+  $Job->{'tasks_summary'}->{'running'} = $running;
+  $Job->update_attributes('tasks_summary' => $Job->{'tasks_summary'});
+  Log (undef, "status: $done done, $running running, $todo todo");
+  $progress_is_dirty = 0;
+}
+
+
+
+sub reapchildren
+{
+  my $pid = waitpid (-1, WNOHANG);
+  return 0 if $pid <= 0;
+
+  my $whatslot = ($slot[$proc{$pid}->{slot}]->{node}->{name}
+                 . "."
+                 . $slot[$proc{$pid}->{slot}]->{cpu});
+  my $jobstepid = $proc{$pid}->{jobstep};
+  my $elapsed = time - $proc{$pid}->{time};
+  my $Jobstep = $jobstep[$jobstepid];
+
+  my $childstatus = $?;
+  my $exitvalue = $childstatus >> 8;
+  my $exitinfo = "exit ".exit_status_s($childstatus);
+  $Jobstep->{'arvados_task'}->reload;
+  my $task_success = $Jobstep->{'arvados_task'}->{success};
+
+  Log ($jobstepid, "child $pid on $whatslot $exitinfo success=$task_success");
+
+  if (!defined $task_success) {
+    # task did not indicate one way or the other --> fail
+    $Jobstep->{'arvados_task'}->{success} = 0;
+    $Jobstep->{'arvados_task'}->save;
+    $task_success = 0;
+  }
+
+  if (!$task_success)
+  {
+    my $temporary_fail;
+    $temporary_fail ||= $Jobstep->{node_fail};
+    $temporary_fail ||= ($exitvalue == 111);
+
+    ++$thisround_failed;
+    ++$thisround_failed_multiple if $Jobstep->{'failures'} >= 1;
+
+    # Check for signs of a failed or misconfigured node
+    if (++$slot[$proc{$pid}->{slot}]->{node}->{losing_streak} >=
+       2+$slot[$proc{$pid}->{slot}]->{node}->{ncpus}) {
+      # Don't count this against jobstep failure thresholds if this
+      # node is already suspected faulty and srun exited quickly
+      if ($slot[$proc{$pid}->{slot}]->{node}->{hold_until} &&
+         $elapsed < 5) {
+       Log ($jobstepid, "blaming failure on suspect node " .
+             $slot[$proc{$pid}->{slot}]->{node}->{name});
+        $temporary_fail ||= 1;
+      }
+      ban_node_by_slot($proc{$pid}->{slot});
+    }
+
+    Log ($jobstepid, sprintf('failure (#%d, %s) after %d seconds',
+                             ++$Jobstep->{'failures'},
+                             $temporary_fail ? 'temporary ' : 'permanent',
+                             $elapsed));
+
+    if (!$temporary_fail || $Jobstep->{'failures'} >= 3) {
+      # Give up on this task, and the whole job
+      $main::success = 0;
+      $main::please_freeze = 1;
+    }
+    # Put this task back on the todo queue
+    push @jobstep_todo, $jobstepid;
+    $Job->{'tasks_summary'}->{'failed'}++;
+  }
+  else
+  {
+    ++$thisround_succeeded;
+    $slot[$proc{$pid}->{slot}]->{node}->{losing_streak} = 0;
+    $slot[$proc{$pid}->{slot}]->{node}->{hold_until} = 0;
+    push @jobstep_done, $jobstepid;
+    Log ($jobstepid, "success in $elapsed seconds");
+  }
+  $Jobstep->{exitcode} = $childstatus;
+  $Jobstep->{finishtime} = time;
+  $Jobstep->{'arvados_task'}->{finished_at} = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($Jobstep->{finishtime});
+  $Jobstep->{'arvados_task'}->save;
+  process_stderr ($jobstepid, $task_success);
+  Log ($jobstepid, "output " . $Jobstep->{'arvados_task'}->{output});
+
+  close $reader{$jobstepid};
+  delete $reader{$jobstepid};
+  delete $slot[$proc{$pid}->{slot}]->{pid};
+  push @freeslot, $proc{$pid}->{slot};
+  delete $proc{$pid};
+
+  if ($task_success) {
+    # Load new tasks
+    my $newtask_list = [];
+    my $newtask_results;
+    do {
+      $newtask_results = api_call(
+        "job_tasks/list",
+        'where' => {
+          'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid}
+        },
+        'order' => 'qsequence',
+        'offset' => scalar(@$newtask_list),
+      );
+      push(@$newtask_list, @{$newtask_results->{items}});
+    } while (@{$newtask_results->{items}});
+    foreach my $arvados_task (@$newtask_list) {
+      my $jobstep = {
+        'level' => $arvados_task->{'sequence'},
+        'failures' => 0,
+        'arvados_task' => $arvados_task
+      };
+      push @jobstep, $jobstep;
+      push @jobstep_todo, $#jobstep;
+    }
+  }
+
+  $progress_is_dirty = 1;
+  1;
+}
+
+sub check_refresh_wanted
+{
+  my @stat = stat $ENV{"CRUNCH_REFRESH_TRIGGER"};
+  if (@stat && $stat[9] > $latest_refresh) {
+    $latest_refresh = scalar time;
+    my $Job2 = api_call("jobs/get", uuid => $jobspec);
+    for my $attr ('cancelled_at',
+                  'cancelled_by_user_uuid',
+                  'cancelled_by_client_uuid',
+                  'state') {
+      $Job->{$attr} = $Job2->{$attr};
+    }
+    if ($Job->{'state'} ne "Running") {
+      if ($Job->{'state'} eq "Cancelled") {
+        Log (undef, "Job cancelled at " . $Job->{'cancelled_at'} . " by user " . $Job->{'cancelled_by_user_uuid'});
+      } else {
+        Log (undef, "Job state unexpectedly changed to " . $Job->{'state'});
+      }
+      $main::success = 0;
+      $main::please_freeze = 1;
+    }
+  }
+}
+
+sub check_squeue
+{
+  # return if the kill list was checked <4 seconds ago
+  if (defined $squeue_kill_checked && $squeue_kill_checked > time - 4)
+  {
+    return;
+  }
+  $squeue_kill_checked = time;
+
+  # use killem() on procs whose killtime is reached
+  for (keys %proc)
+  {
+    if (exists $proc{$_}->{killtime}
+       && $proc{$_}->{killtime} <= time)
+    {
+      killem ($_);
+    }
+  }
+
+  # return if the squeue was checked <60 seconds ago
+  if (defined $squeue_checked && $squeue_checked > time - 60)
+  {
+    return;
+  }
+  $squeue_checked = time;
+
+  if (!$have_slurm)
+  {
+    # here is an opportunity to check for mysterious problems with local procs
+    return;
+  }
+
+  # get a list of steps still running
+  my @squeue = `squeue -s -h -o '%i %j' && echo ok`;
+  chop @squeue;
+  if ($squeue[-1] ne "ok")
+  {
+    return;
+  }
+  pop @squeue;
+
+  # which of my jobsteps are running, according to squeue?
+  my %ok;
+  foreach (@squeue)
+  {
+    if (/^(\d+)\.(\d+) (\S+)/)
+    {
+      if ($1 eq $ENV{SLURM_JOBID})
+      {
+       $ok{$3} = 1;
+      }
+    }
+  }
+
+  # which of my active child procs (>60s old) were not mentioned by squeue?
+  foreach (keys %proc)
+  {
+    if ($proc{$_}->{time} < time - 60
+       && !exists $ok{$proc{$_}->{jobstepname}}
+       && !exists $proc{$_}->{killtime})
+    {
+      # kill this proc if it hasn't exited in 30 seconds
+      $proc{$_}->{killtime} = time + 30;
+    }
+  }
+}
+
+
+sub release_allocation
+{
+  if ($have_slurm)
+  {
+    Log (undef, "release job allocation");
+    system "scancel $ENV{SLURM_JOBID}";
+  }
+}
+
+
+sub readfrompipes
+{
+  my $gotsome = 0;
+  foreach my $job (keys %reader)
+  {
+    my $buf;
+    while (0 < sysread ($reader{$job}, $buf, 8192))
+    {
+      print STDERR $buf if $ENV{CRUNCH_DEBUG};
+      $jobstep[$job]->{stderr} .= $buf;
+      preprocess_stderr ($job);
+      if (length ($jobstep[$job]->{stderr}) > 16384)
+      {
+       substr ($jobstep[$job]->{stderr}, 0, 8192) = "";
+      }
+      $gotsome = 1;
+    }
+  }
+  return $gotsome;
+}
+
+
+sub preprocess_stderr
+{
+  my $job = shift;
+
+  while ($jobstep[$job]->{stderr} =~ /^(.*?)\n/) {
+    my $line = $1;
+    substr $jobstep[$job]->{stderr}, 0, 1+length($line), "";
+    Log ($job, "stderr $line");
+    if ($line =~ /srun: error: (SLURM job $ENV{SLURM_JOB_ID} has expired|Unable to confirm allocation for job $ENV{SLURM_JOB_ID})/) {
+      # whoa.
+      $main::please_freeze = 1;
+    }
+    elsif ($line =~ /srun: error: (Node failure on|Unable to create job step) /) {
+      $jobstep[$job]->{node_fail} = 1;
+      ban_node_by_slot($jobstep[$job]->{slotindex});
+    }
+  }
+}
+
+
+sub process_stderr
+{
+  my $job = shift;
+  my $task_success = shift;
+  preprocess_stderr ($job);
+
+  map {
+    Log ($job, "stderr $_");
+  } split ("\n", $jobstep[$job]->{stderr});
+}
+
+sub fetch_block
+{
+  my $hash = shift;
+  my ($keep, $child_out, $output_block);
+
+  my $cmd = "arv-get \Q$hash\E";
+  open($keep, '-|', $cmd) or die "fetch_block: $cmd: $!";
+  $output_block = '';
+  while (1) {
+    my $buf;
+    my $bytes = sysread($keep, $buf, 1024 * 1024);
+    if (!defined $bytes) {
+      die "reading from arv-get: $!";
+    } elsif ($bytes == 0) {
+      # sysread returns 0 at the end of the pipe.
+      last;
+    } else {
+      # some bytes were read into buf.
+      $output_block .= $buf;
+    }
+  }
+  close $keep;
+  return $output_block;
+}
+
+# create_output_collections generates a new collection containing the
+# output of each successfully completed task, and returns the
+# portable_data_hash for the new collection.
+#
+sub create_output_collection
+{
+  Log (undef, "collate");
+
+  my ($child_out, $child_in);
+  my $pid = open2($child_out, $child_in, 'python', '-c',
+                  'import arvados; ' .
+                  'import sys; ' .
+                  'print arvados.api()' .
+                  '.collections()' .
+                  '.create(body={"manifest_text":sys.stdin.read()})' .
+                  '.execute()["portable_data_hash"]'
+      );
+
+  for (@jobstep)
+  {
+    next if (!exists $_->{'arvados_task'}->{'output'} ||
+             !$_->{'arvados_task'}->{'success'});
+    my $output = $_->{'arvados_task'}->{output};
+    if ($output !~ /^[0-9a-f]{32}(\+\S+)*$/)
+    {
+      print $child_in $output;
+    }
+    elsif (defined (my $outblock = fetch_block ($output)))
+    {
+      print $child_in $outblock;
+    }
+    else
+    {
+      Log (undef, "XXX fetch_block($output) failed XXX");
+      $main::success = 0;
+    }
+  }
+  $child_in->close;
+
+  my $joboutput;
+  my $s = IO::Select->new($child_out);
+  if ($s->can_read(120)) {
+    sysread($child_out, $joboutput, 64 * 1024 * 1024);
+    chomp($joboutput);
+    # TODO: Ensure exit status == 0.
+  } else {
+    Log (undef, "timed out while creating output collection");
+  }
+  # TODO: kill $pid instead of waiting, now that we've decided to
+  # ignore further output.
+  waitpid($pid, 0);
+
+  return $joboutput;
+}
+
+
+sub killem
+{
+  foreach (@_)
+  {
+    my $sig = 2;               # SIGINT first
+    if (exists $proc{$_}->{"sent_$sig"} &&
+       time - $proc{$_}->{"sent_$sig"} > 4)
+    {
+      $sig = 15;               # SIGTERM if SIGINT doesn't work
+    }
+    if (exists $proc{$_}->{"sent_$sig"} &&
+       time - $proc{$_}->{"sent_$sig"} > 4)
+    {
+      $sig = 9;                        # SIGKILL if SIGTERM doesn't work
+    }
+    if (!exists $proc{$_}->{"sent_$sig"})
+    {
+      Log ($proc{$_}->{jobstep}, "sending 2x signal $sig to pid $_");
+      kill $sig, $_;
+      select (undef, undef, undef, 0.1);
+      if ($sig == 2)
+      {
+       kill $sig, $_;     # srun wants two SIGINT to really interrupt
+      }
+      $proc{$_}->{"sent_$sig"} = time;
+      $proc{$_}->{"killedafter"} = time - $proc{$_}->{"time"};
+    }
+  }
+}
+
+
+sub fhbits
+{
+  my($bits);
+  for (@_) {
+    vec($bits,fileno($_),1) = 1;
+  }
+  $bits;
+}
+
+
+# Send log output to Keep via arv-put.
+#
+# $log_pipe_in and $log_pipe_out are the input and output filehandles to the arv-put pipe.
+# $log_pipe_pid is the pid of the arv-put subprocess.
+#
+# The only functions that should access these variables directly are:
+#
+# log_writer_start($logfilename)
+#     Starts an arv-put pipe, reading data on stdin and writing it to
+#     a $logfilename file in an output collection.
+#
+# log_writer_send($txt)
+#     Writes $txt to the output log collection.
+#
+# log_writer_finish()
+#     Closes the arv-put pipe and returns the output that it produces.
+#
+# log_writer_is_active()
+#     Returns a true value if there is currently a live arv-put
+#     process, false otherwise.
+#
+my ($log_pipe_in, $log_pipe_out, $log_pipe_pid);
+
+sub log_writer_start($)
+{
+  my $logfilename = shift;
+  $log_pipe_pid = open2($log_pipe_out, $log_pipe_in,
+                        'arv-put', '--portable-data-hash',
+                        '--retries', '3',
+                        '--filename', $logfilename,
+                        '-');
+}
+
+sub log_writer_send($)
+{
+  my $txt = shift;
+  print $log_pipe_in $txt;
+}
+
+sub log_writer_finish()
+{
+  return unless $log_pipe_pid;
+
+  close($log_pipe_in);
+  my $arv_put_output;
+
+  my $s = IO::Select->new($log_pipe_out);
+  if ($s->can_read(120)) {
+    sysread($log_pipe_out, $arv_put_output, 1024);
+    chomp($arv_put_output);
+  } else {
+    Log (undef, "timed out reading from 'arv-put'");
+  }
+
+  waitpid($log_pipe_pid, 0);
+  $log_pipe_pid = $log_pipe_in = $log_pipe_out = undef;
+  if ($?) {
+    Log("log_writer_finish: arv-put exited ".exit_status_s($?))
+  }
+
+  return $arv_put_output;
+}
+
+sub log_writer_is_active() {
+  return $log_pipe_pid;
+}
+
+sub Log                                # ($jobstep_id, $logmessage)
+{
+  if ($_[1] =~ /\n/) {
+    for my $line (split (/\n/, $_[1])) {
+      Log ($_[0], $line);
+    }
+    return;
+  }
+  my $fh = select STDERR; $|=1; select $fh;
+  my $message = sprintf ("%s %d %s %s", $job_id, $$, @_);
+  $message =~ s{([^ -\176])}{"\\" . sprintf ("%03o", ord($1))}ge;
+  $message .= "\n";
+  my $datetime;
+  if (log_writer_is_active() || -t STDERR) {
+    my @gmtime = gmtime;
+    $datetime = sprintf ("%04d-%02d-%02d_%02d:%02d:%02d",
+                        $gmtime[5]+1900, $gmtime[4]+1, @gmtime[3,2,1,0]);
+  }
+  print STDERR ((-t STDERR) ? ($datetime." ".$message) : $message);
+
+  if (log_writer_is_active()) {
+    log_writer_send($datetime . " " . $message);
+  }
+}
+
+
+sub croak
+{
+  my ($package, $file, $line) = caller;
+  my $message = "@_ at $file line $line\n";
+  Log (undef, $message);
+  freeze() if @jobstep_todo;
+  create_output_collection() if @jobstep_todo;
+  cleanup();
+  save_meta();
+  die;
+}
+
+
+sub cleanup
+{
+  return unless $Job;
+  if ($Job->{'state'} eq 'Cancelled') {
+    $Job->update_attributes('finished_at' => scalar gmtime);
+  } else {
+    $Job->update_attributes('state' => 'Failed');
+  }
+}
+
+
+sub save_meta
+{
+  my $justcheckpoint = shift; # false if this will be the last meta saved
+  return if $justcheckpoint;  # checkpointing is not relevant post-Warehouse.pm
+  return unless log_writer_is_active();
+
+  my $loglocator = log_writer_finish();
+  Log (undef, "log manifest is $loglocator");
+  $Job->{'log'} = $loglocator;
+  $Job->update_attributes('log', $loglocator);
+}
+
+
+sub freeze_if_want_freeze
+{
+  if ($main::please_freeze)
+  {
+    release_allocation();
+    if (@_)
+    {
+      # kill some srun procs before freeze+stop
+      map { $proc{$_} = {} } @_;
+      while (%proc)
+      {
+       killem (keys %proc);
+       select (undef, undef, undef, 0.1);
+       my $died;
+       while (($died = waitpid (-1, WNOHANG)) > 0)
+       {
+         delete $proc{$died};
+       }
+      }
+    }
+    freeze();
+    create_output_collection();
+    cleanup();
+    save_meta();
+    exit 1;
+  }
+}
+
+
+sub freeze
+{
+  Log (undef, "Freeze not implemented");
+  return;
+}
+
+
+sub thaw
+{
+  croak ("Thaw not implemented");
+}
+
+
+sub freezequote
+{
+  my $s = shift;
+  $s =~ s/\\/\\\\/g;
+  $s =~ s/\n/\\n/g;
+  return $s;
+}
+
+
+sub freezeunquote
+{
+  my $s = shift;
+  $s =~ s{\\(.)}{$1 eq "n" ? "\n" : $1}ge;
+  return $s;
+}
+
+
+sub srun
+{
+  my $srunargs = shift;
+  my $execargs = shift;
+  my $opts = shift || {};
+  my $stdin = shift;
+  my $args = $have_slurm ? [@$srunargs, @$execargs] : $execargs;
+
+  $Data::Dumper::Terse = 1;
+  $Data::Dumper::Indent = 0;
+  my $show_cmd = Dumper($args);
+  $show_cmd =~ s/(TOKEN\\*=)[^\s\']+/${1}[...]/g;
+  $show_cmd =~ s/\n/ /g;
+  warn "starting: $show_cmd\n";
+
+  if (defined $stdin) {
+    my $child = open STDIN, "-|";
+    defined $child or die "no fork: $!";
+    if ($child == 0) {
+      print $stdin or die $!;
+      close STDOUT or die $!;
+      exit 0;
+    }
+  }
+
+  return system (@$args) if $opts->{fork};
+
+  exec @$args;
+  warn "ENV size is ".length(join(" ",%ENV));
+  die "exec failed: $!: @$args";
+}
+
+
+sub ban_node_by_slot {
+  # Don't start any new jobsteps on this node for 60 seconds
+  my $slotid = shift;
+  $slot[$slotid]->{node}->{hold_until} = 60 + scalar time;
+  $slot[$slotid]->{node}->{hold_count}++;
+  Log (undef, "backing off node " . $slot[$slotid]->{node}->{name} . " for 60 seconds");
+}
+
+sub must_lock_now
+{
+  my ($lockfile, $error_message) = @_;
+  open L, ">", $lockfile or croak("$lockfile: $!");
+  if (!flock L, LOCK_EX|LOCK_NB) {
+    croak("Can't lock $lockfile: $error_message\n");
+  }
+}
+
+sub find_docker_image {
+  # Given a Keep locator, check to see if it contains a Docker image.
+  # If so, return its stream name and Docker hash.
+  # If not, return undef for both values.
+  my $locator = shift;
+  my ($streamname, $filename);
+  my $image = api_call("collections/get", uuid => $locator);
+  if ($image) {
+    foreach my $line (split(/\n/, $image->{manifest_text})) {
+      my @tokens = split(/\s+/, $line);
+      next if (!@tokens);
+      $streamname = shift(@tokens);
+      foreach my $filedata (grep(/^\d+:\d+:/, @tokens)) {
+        if (defined($filename)) {
+          return (undef, undef);  # More than one file in the Collection.
+        } else {
+          $filename = (split(/:/, $filedata, 3))[2];
+        }
+      }
+    }
+  }
+  if (defined($filename) and ($filename =~ /^([0-9A-Fa-f]{64})\.tar$/)) {
+    return ($streamname, $1);
+  } else {
+    return (undef, undef);
+  }
+}
+
+sub retry_count {
+  # Calculate the number of times an operation should be retried,
+  # assuming exponential backoff, and that we're willing to retry as
+  # long as tasks have been running.  Enforce a minimum of 3 retries.
+  my ($starttime, $endtime, $timediff, $retries);
+  if (@jobstep) {
+    $starttime = $jobstep[0]->{starttime};
+    $endtime = $jobstep[-1]->{finishtime};
+  }
+  if (!defined($starttime)) {
+    $timediff = 0;
+  } elsif (!defined($endtime)) {
+    $timediff = time - $starttime;
+  } else {
+    $timediff = ($endtime - $starttime) - (time - $endtime);
+  }
+  if ($timediff > 0) {
+    $retries = int(log($timediff) / log(2));
+  } else {
+    $retries = 1;  # Use the minimum.
+  }
+  return ($retries > 3) ? $retries : 3;
+}
+
+sub retry_op {
+  # Pass in two function references.
+  # This method will be called with the remaining arguments.
+  # If it dies, retry it with exponential backoff until it succeeds,
+  # or until the current retry_count is exhausted.  After each failure
+  # that can be retried, the second function will be called with
+  # the current try count (0-based), next try time, and error message.
+  my $operation = shift;
+  my $retry_callback = shift;
+  my $retries = retry_count();
+  foreach my $try_count (0..$retries) {
+    my $next_try = time + (2 ** $try_count);
+    my $result = eval { $operation->(@_); };
+    if (!$@) {
+      return $result;
+    } elsif ($try_count < $retries) {
+      $retry_callback->($try_count, $next_try, $@);
+      my $sleep_time = $next_try - time;
+      sleep($sleep_time) if ($sleep_time > 0);
+    }
+  }
+  # Ensure the error message ends in a newline, so Perl doesn't add
+  # retry_op's line number to it.
+  chomp($@);
+  die($@ . "\n");
+}
+
+sub api_call {
+  # Pass in a /-separated API method name, and arguments for it.
+  # This function will call that method, retrying as needed until
+  # the current retry_count is exhausted, with a log on the first failure.
+  my $method_name = shift;
+  my $log_api_retry = sub {
+    my ($try_count, $next_try_at, $errmsg) = @_;
+    $errmsg =~ s/\s*\bat \Q$0\E line \d+\.?\s*//;
+    $errmsg =~ s/\s/ /g;
+    $errmsg =~ s/\s+$//;
+    my $retry_msg;
+    if ($next_try_at < time) {
+      $retry_msg = "Retrying.";
+    } else {
+      my $next_try_fmt = strftime "%Y-%m-%dT%H:%M:%SZ", gmtime($next_try_at);
+      $retry_msg = "Retrying at $next_try_fmt.";
+    }
+    Log(undef, "API method $method_name failed: $errmsg. $retry_msg");
+  };
+  my $method = $arv;
+  foreach my $key (split(/\//, $method_name)) {
+    $method = $method->{$key};
+  }
+  return retry_op(sub { $method->execute(@_); }, $log_api_retry, @_);
+}
+
+sub exit_status_s {
+  # Given a $?, return a human-readable exit code string like "0" or
+  # "1" or "0 with signal 1" or "1 with signal 11".
+  my $exitcode = shift;
+  my $s = $exitcode >> 8;
+  if ($exitcode & 0x7f) {
+    $s .= " with signal " . ($exitcode & 0x7f);
+  }
+  if ($exitcode & 0x80) {
+    $s .= " with core dump";
+  }
+  return $s;
+}
+
+sub handle_readall {
+  # Pass in a glob reference to a file handle.
+  # Read all its contents and return them as a string.
+  my $fh_glob_ref = shift;
+  local $/ = undef;
+  return <$fh_glob_ref>;
+}
+
+sub tar_filename_n {
+  my $n = shift;
+  return sprintf("%s/git.%s.%d.tar", $ENV{CRUNCH_TMP}, $job_id, $n);
+}
+
+sub add_git_archive {
+  # Pass in a git archive command as a string or list, a la system().
+  # This method will save its output to be included in the archive sent to the
+  # build script.
+  my $git_input;
+  $git_tar_count++;
+  if (!open(GIT_ARCHIVE, ">", tar_filename_n($git_tar_count))) {
+    croak("Failed to save git archive: $!");
+  }
+  my $git_pid = open2(">&GIT_ARCHIVE", $git_input, @_);
+  close($git_input);
+  waitpid($git_pid, 0);
+  close(GIT_ARCHIVE);
+  if ($?) {
+    croak("Failed to save git archive: git exited " . exit_status_s($?));
+  }
+}
+
+sub combined_git_archive {
+  # Combine all saved tar archives into a single archive, then return its
+  # contents in a string.  Return undef if no archives have been saved.
+  if ($git_tar_count < 1) {
+    return undef;
+  }
+  my $base_tar_name = tar_filename_n(1);
+  foreach my $tar_to_append (map { tar_filename_n($_); } (2..$git_tar_count)) {
+    my $tar_exit = system("tar", "-Af", $base_tar_name, $tar_to_append);
+    if ($tar_exit != 0) {
+      croak("Error preparing build archive: tar -A exited " .
+            exit_status_s($tar_exit));
+    }
+  }
+  if (!open(GIT_TAR, "<", $base_tar_name)) {
+    croak("Could not open build archive: $!");
+  }
+  my $tar_contents = handle_readall(\*GIT_TAR);
+  close(GIT_TAR);
+  return $tar_contents;
+}
+
+__DATA__
+#!/usr/bin/perl
+#
+# This is crunch-job's internal dispatch script.  crunch-job running on the API
+# server invokes this script on individual compute nodes, or localhost if we're
+# running a job locally.  It gets called in two modes:
+#
+# * No arguments: Installation mode.  Read a tar archive from the DATA
+#   file handle; it includes the Crunch script's source code, and
+#   maybe SDKs as well.  Those should be installed in the proper
+#   locations.  This runs outside of any Docker container, so don't try to
+#   introspect Crunch's runtime environment.
+#
+# * With arguments: Crunch script run mode.  This script should set up the
+#   environment, then run the command specified in the arguments.  This runs
+#   inside any Docker container.
+
+use Fcntl ':flock';
+use File::Path qw( make_path remove_tree );
+use POSIX qw(getcwd);
+
+# Map SDK subdirectories to the path environments they belong to.
+my %SDK_ENVVARS = ("perl/lib" => "PERLLIB", "ruby/lib" => "RUBYLIB");
+
+my $destdir = $ENV{"CRUNCH_SRC"};
+my $commit = $ENV{"CRUNCH_SRC_COMMIT"};
+my $repo = $ENV{"CRUNCH_SRC_URL"};
+my $install_dir = $ENV{"CRUNCH_INSTALL"} || (getcwd() . "/opt");
+my $job_work = $ENV{"JOB_WORK"};
+my $task_work = $ENV{"TASK_WORK"};
+
+for my $dir ($destdir, $job_work, $task_work) {
+  if ($dir) {
+    make_path $dir;
+    -e $dir or die "Failed to create temporary directory ($dir): $!";
+  }
+}
+
+if ($task_work) {
+  remove_tree($task_work, {keep_root => 1});
+}
+
+open(STDOUT_ORIG, ">&", STDOUT);
+open(STDERR_ORIG, ">&", STDERR);
+open(STDOUT, ">>", "$destdir.log");
+open(STDERR, ">&", STDOUT);
+
+### Crunch script run mode
+if (@ARGV) {
+  # We want to do routine logging during task 0 only.  This gives the user
+  # the information they need, but avoids repeating the information for every
+  # task.
+  my $Log;
+  if ($ENV{TASK_SEQUENCE} eq "0") {
+    $Log = sub {
+      my $msg = shift;
+      printf STDERR_ORIG "[Crunch] $msg\n", @_;
+    };
+  } else {
+    $Log = sub { };
+  }
+
+  my $python_src = "$install_dir/python";
+  my $venv_dir = "$job_work/.arvados.venv";
+  my $venv_built = -e "$venv_dir/bin/activate";
+  if ((!$venv_built) and (-d $python_src) and can_run("virtualenv")) {
+    shell_or_die("virtualenv", "--quiet", "--system-site-packages",
+                 "--python=python2.7", $venv_dir);
+    shell_or_die("$venv_dir/bin/pip", "--quiet", "install", $python_src);
+    $venv_built = 1;
+    $Log->("Built Python SDK virtualenv");
+  }
+
+  my $pkgs;
+  if ($venv_built) {
+    $Log->("Running in Python SDK virtualenv");
+    $pkgs = `(\Q$venv_dir/bin/pip\E freeze 2>/dev/null | grep arvados) || dpkg-query --show '*arvados*'`;
+    my $orig_argv = join(" ", map { quotemeta($_); } @ARGV);
+    @ARGV = ("/bin/sh", "-ec",
+             ". \Q$venv_dir/bin/activate\E; exec $orig_argv");
+  } elsif (-d $python_src) {
+    $Log->("Warning: virtualenv not found inside Docker container default " +
+           "\$PATH. Can't install Python SDK.");
+  } else {
+    $pkgs = `(pip freeze 2>/dev/null | grep arvados) || dpkg-query --show '*arvados*'`;
+  }
+
+  if ($pkgs) {
+    $Log->("Using Arvados SDK:");
+    foreach my $line (split /\n/, $pkgs) {
+      $Log->($line);
+    }
+  } else {
+    $Log->("Arvados SDK packages not found");
+  }
+
+  while (my ($sdk_dir, $sdk_envkey) = each(%SDK_ENVVARS)) {
+    my $sdk_path = "$install_dir/$sdk_dir";
+    if (-d $sdk_path) {
+      if ($ENV{$sdk_envkey}) {
+        $ENV{$sdk_envkey} = "$sdk_path:" . $ENV{$sdk_envkey};
+      } else {
+        $ENV{$sdk_envkey} = $sdk_path;
+      }
+      $Log->("Arvados SDK added to %s", $sdk_envkey);
+    }
+  }
+
+  close(STDOUT);
+  close(STDERR);
+  open(STDOUT, ">&", STDOUT_ORIG);
+  open(STDERR, ">&", STDERR_ORIG);
+  exec(@ARGV);
+  die "Cannot exec `@ARGV`: $!";
+}
+
+### Installation mode
+open L, ">", "$destdir.lock" or die "$destdir.lock: $!";
+flock L, LOCK_EX;
+if (readlink ("$destdir.commit") eq $commit && -d $destdir) {
+  # This version already installed -> nothing to do.
+  exit(0);
+}
+
+unlink "$destdir.commit";
+mkdir $destdir;
+open TARX, "|-", "tar", "-xC", $destdir;
+{
+  local $/ = undef;
+  print TARX <DATA>;
+}
+if(!close(TARX)) {
+  die "'tar -xC $destdir' exited $?: $!";
+}
+
+mkdir $install_dir;
+
+my $sdk_root = "$destdir/.arvados.sdk/sdk";
+if (-d $sdk_root) {
+  foreach my $sdk_lang (("python",
+                         map { (split /\//, $_, 2)[0]; } keys(%SDK_ENVVARS))) {
+    if (-d "$sdk_root/$sdk_lang") {
+      if (!rename("$sdk_root/$sdk_lang", "$install_dir/$sdk_lang")) {
+        die "Failed to install $sdk_lang SDK: $!";
+      }
+    }
+  }
+}
+
+my $python_dir = "$install_dir/python";
+if ((-d $python_dir) and can_run("python2.7") and
+    (system("python2.7", "$python_dir/setup.py", "--quiet", "egg_info") != 0)) {
+  # egg_info failed, probably when it asked git for a build tag.
+  # Specify no build tag.
+  open(my $pysdk_cfg, ">>", "$python_dir/setup.cfg");
+  print $pysdk_cfg "\n[egg_info]\ntag_build =\n";
+  close($pysdk_cfg);
+}
+
+if (-e "$destdir/crunch_scripts/install") {
+    shell_or_die ("$destdir/crunch_scripts/install", $install_dir);
+} elsif (!-e "./install.sh" && -e "./tests/autotests.sh") {
+    # Old version
+    shell_or_die ("./tests/autotests.sh", $install_dir);
+} elsif (-e "./install.sh") {
+    shell_or_die ("./install.sh", $install_dir);
+}
+
+if ($commit) {
+    unlink "$destdir.commit.new";
+    symlink ($commit, "$destdir.commit.new") or die "$destdir.commit.new: $!";
+    rename ("$destdir.commit.new", "$destdir.commit") or die "$destdir.commit: $!";
+}
+
+close L;
+
+sub can_run {
+  my $command_name = shift;
+  open(my $which, "-|", "which", $command_name);
+  while (<$which>) { }
+  close($which);
+  return ($? == 0);
+}
+
+sub shell_or_die
+{
+  if ($ENV{"DEBUG"}) {
+    print STDERR "@_\n";
+  }
+  if (system (@_) != 0) {
+    my $err = $!;
+    my $exitstatus = sprintf("exit %d signal %d", $? >> 8, $? & 0x7f);
+    open STDERR, ">&STDERR_ORIG";
+    system ("cat $destdir.log >&2");
+    die "@_ failed ($err): $exitstatus";
+  }
+}
+
+__DATA__
diff --git a/sdk/cli/test/test_arv-collection-create.rb b/sdk/cli/test/test_arv-collection-create.rb
new file mode 100644 (file)
index 0000000..18bef40
--- /dev/null
@@ -0,0 +1,41 @@
+require 'minitest/autorun'
+require 'digest/md5'
+require 'active_support/core_ext'
+
+class TestCollectionCreate < Minitest::Test
+  def setup
+  end
+
+  def test_small_collection
+    skip "Waiting unitl #4534 is implemented"
+
+    uuid = Digest::MD5.hexdigest(foo_manifest) + '+' + foo_manifest.size.to_s
+    out, err = capture_subprocess_io do
+      assert_arv('--format', 'uuid', 'collection', 'create', '--collection', {
+                   uuid: uuid,
+                   manifest_text: foo_manifest
+                 }.to_json)
+    end
+    assert /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(out)
+    assert_equal '', err
+    $stderr.puts err
+  end
+
+  protected
+  def assert_arv(*args)
+    expect = case args.first
+             when true, false
+               args.shift
+             else
+               true
+             end
+    assert_equal(expect,
+                 system(['./bin/arv', 'arv'], *args),
+                 "`arv #{args.join ' '}` " +
+                 "should exit #{if expect then 0 else 'non-zero' end}")
+  end
+
+  def foo_manifest
+    ". #{Digest::MD5.hexdigest('foo')}+3 0:3:foo\n"
+  end
+end
diff --git a/sdk/cli/test/test_arv-get.rb b/sdk/cli/test/test_arv-get.rb
new file mode 100644 (file)
index 0000000..67dd399
--- /dev/null
@@ -0,0 +1,285 @@
+require 'minitest/autorun'
+require 'digest/md5'
+
+class TestArvGet < Minitest::Test
+  def setup
+    begin
+      Dir.mkdir './tmp'
+    rescue Errno::EEXIST
+    end
+    @@foo_manifest_locator ||= `echo -n foo | ./bin/arv-put --filename foo --no-progress -`.strip
+    @@baz_locator ||= `echo -n baz | ./bin/arv-put --as-raw --no-progress -`.strip
+    @@multilevel_manifest_locator ||= `echo ./foo/bar #{@@baz_locator} 0:3:baz | ./bin/arv-put --as-raw --no-progress -`.strip
+  end
+
+  def test_no_args
+    out, err = capture_subprocess_io do
+      assert_arv_get false
+    end
+    assert_equal '', out
+    assert_match /^usage:/, err
+  end
+
+  def test_help
+    out, err = capture_subprocess_io do
+      assert_arv_get '-h'
+    end
+    $stderr.write err
+    assert_equal '', err
+    assert_match /^usage:/, out
+  end
+
+  def test_file_to_dev_stdout
+    skip "Waiting unitl #4534 is implemented"
+
+    test_file_to_stdout('/dev/stdout')
+  end
+
+  def test_file_to_stdout(specify_stdout_as='-')
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator + '/foo', specify_stdout_as
+    end
+    assert_equal '', err
+    assert_equal 'foo', out
+  end
+
+  def test_file_to_file
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/foo'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_no_overwrite_file
+    skip "Waiting unitl #4534 is implemented"
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/foo', 'tmp/foo'
+    end
+    assert_match /Error:/, err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_no_overwrite_file_in_dir
+    skip "Waiting unitl #4534 is implemented"
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_match /Error:/, err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_force_overwrite
+    skip "Waiting unitl #4534 is implemented"
+
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    assert_equal 'baz', IO.read('tmp/foo')
+    out, err = capture_subprocess_io do
+      assert_arv_get '-f', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_match '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_file_to_file_skip_existing
+    skip "Waiting unitl #4534 is implemented"
+
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'baz'
+    end
+    assert_equal 'baz', IO.read('tmp/foo')
+    out, err = capture_subprocess_io do
+      assert_arv_get '--skip-existing', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_match '', err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/foo')
+  end
+
+  def test_file_to_dir
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_dir_to_file
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/foo'
+    end
+    assert_equal '', out
+    assert_match /^usage:/, err
+  end
+
+  def test_dir_to_empty_string
+    out, err = capture_subprocess_io do
+      assert_arv_get false, @@foo_manifest_locator + '/', ''
+    end
+    assert_equal '', out
+    assert_match /^usage:/, err
+  end
+
+  def test_nonexistent_block
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6'
+    end
+    assert_equal '', out
+    assert_match /Error:/, err
+  end
+
+  def test_nonexistent_manifest
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert_arv_get false, 'f1554a91e925d6213ce7c3103c5110c6/', 'tmp/'
+    end
+    assert_equal '', out
+    assert_match /Error:/, err
+  end
+
+  def test_manifest_root_to_dir
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_manifest_root_to_dir_noslash
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_display_md5sum
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-r', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_equal "#{Digest::MD5.hexdigest('foo')}  ./foo\n", err
+    assert_equal '', out
+    assert_equal 'foo', IO.read('tmp/foo')
+  end
+
+  def test_md5sum_nowrite
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-n', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'
+    end
+    assert_equal "#{Digest::MD5.hexdigest('foo')}  ./foo\n", err
+    assert_equal '', out
+    assert_equal false, File.exists?('tmp/foo')
+  end
+
+  def test_sha1_nowrite
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get '-n', '-r', '--hash', 'sha1', @@foo_manifest_locator+'/', 'tmp/'
+    end
+    assert_equal "#{Digest::SHA1.hexdigest('foo')}  ./foo\n", err
+    assert_equal '', out
+    assert_equal false, File.exists?('tmp/foo')
+  end
+
+  def test_block_to_file
+    skip "Waiting unitl #4534 is implemented"
+
+    remove_tmp_foo
+    out, err = capture_subprocess_io do
+      assert_arv_get @@foo_manifest_locator, 'tmp/foo'
+    end
+    assert_equal '', err
+    assert_equal '', out
+
+    digest = Digest::MD5.hexdigest('foo')
+    !(IO.read('tmp/foo')).gsub!( /^(. #{digest}+3)(.*)( 0:3:foo)$/).nil?
+  end
+
+  def test_create_directory_tree
+    skip "Waiting unitl #4534 is implemented"
+
+    `rm -rf ./tmp/arv-get-test/`
+    Dir.mkdir './tmp/arv-get-test'
+    out, err = capture_subprocess_io do
+      assert_arv_get @@multilevel_manifest_locator + '/', 'tmp/arv-get-test/'
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/arv-get-test/foo/bar/baz')
+  end
+
+  def test_create_partial_directory_tree
+    skip "Waiting unitl #4534 is implemented"
+
+    `rm -rf ./tmp/arv-get-test/`
+    Dir.mkdir './tmp/arv-get-test'
+    out, err = capture_subprocess_io do
+      assert_arv_get(@@multilevel_manifest_locator + '/foo/',
+                     'tmp/arv-get-test/')
+    end
+    assert_equal '', err
+    assert_equal '', out
+    assert_equal 'baz', IO.read('tmp/arv-get-test/bar/baz')
+  end
+
+  protected
+  def assert_arv_get(*args)
+    expect = case args.first
+             when true, false
+               args.shift
+             else
+               true
+             end
+    assert_equal(expect,
+                 system(['./bin/arv-get', 'arv-get'], *args),
+                 "`arv-get #{args.join ' '}` " +
+                 "should exit #{if expect then 0 else 'non-zero' end}")
+  end
+
+  def remove_tmp_foo
+    begin
+      File.unlink('tmp/foo')
+    rescue Errno::ENOENT
+    end
+  end
+end
diff --git a/sdk/cli/test/test_arv-put.rb b/sdk/cli/test/test_arv-put.rb
new file mode 100644 (file)
index 0000000..73513db
--- /dev/null
@@ -0,0 +1,223 @@
+require 'minitest/autorun'
+require 'digest/md5'
+
+class TestArvPut < Minitest::Test
+  def setup
+    begin Dir.mkdir './tmp' rescue Errno::EEXIST end
+    begin Dir.mkdir './tmp/empty_dir' rescue Errno::EEXIST end
+    File.open './tmp/empty_file', 'wb' do
+    end
+    File.open './tmp/foo', 'wb' do |f|
+      f.write 'foo'
+    end
+  end
+
+  def test_help
+    out, err = capture_subprocess_io do
+      assert arv_put('-h'), 'arv-put -h exits zero'
+    end
+    $stderr.write err
+    assert_empty err
+    assert_match /^usage:/, out
+  end
+
+  def test_raw_stdin
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      r,w = IO.pipe
+      wpid = fork do
+        r.close
+        w << 'foo'
+      end
+      w.close
+      assert arv_put('--raw', {in: r})
+      r.close
+      Process.waitpid wpid
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal "acbd18db4cc2f85cedef654fccc4a4d8+3\n", out
+  end
+
+  def test_raw_file
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert arv_put('--raw', './tmp/foo')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal "acbd18db4cc2f85cedef654fccc4a4d8+3\n", out
+  end
+
+  def test_raw_empty_file
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert arv_put('--raw', './tmp/empty_file')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal "d41d8cd98f00b204e9800998ecf8427e+0\n", out
+  end
+
+  def test_filename_arg_with_directory
+    out, err = capture_subprocess_io do
+      assert_equal(false, arv_put('--filename', 'foo', './tmp/empty_dir/.'),
+                   'arv-put --filename refuses directory')
+    end
+    assert_match /^usage:.*error:/m, err
+    assert_empty out
+  end
+
+  def test_filename_arg_with_multiple_files
+    out, err = capture_subprocess_io do
+      assert_equal(false, arv_put('--filename', 'foo',
+                                  './tmp/empty_file',
+                                  './tmp/empty_file'),
+                   'arv-put --filename refuses directory')
+    end
+    assert_match /^usage:.*error:/m, err
+    assert_empty out
+  end
+
+  def test_filename_arg_with_empty_file
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert arv_put('--filename', 'foo', './tmp/empty_file')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert match_collection_uuid(out)
+  end
+
+  def test_as_stream
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert arv_put('--as-stream', './tmp/foo')
+    end
+    $stderr.write err
+    assert_match '', err
+    assert_equal foo_manifest, out
+  end
+
+  def test_progress
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert arv_put('--manifest', '--progress', './tmp/foo')
+    end
+    assert_match /%/, err
+    assert match_collection_uuid(out)
+  end
+
+  def test_batch_progress
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      assert arv_put('--manifest', '--batch-progress', './tmp/foo')
+    end
+    assert_match /: 0 written 3 total/, err
+    assert_match /: 3 written 3 total/, err
+    assert match_collection_uuid(out)
+  end
+
+  def test_progress_and_batch_progress
+    out, err = capture_subprocess_io do
+      assert_equal(false,
+                   arv_put('--progress', '--batch-progress', './tmp/foo'),
+                   'arv-put --progress --batch-progress is contradictory')
+    end
+    assert_match /^usage:.*error:/m, err
+    assert_empty out
+  end
+
+  def test_read_from_implicit_stdin
+    skip "Waiting unitl #4534 is implemented"
+
+    test_read_from_stdin(specify_stdin_as='--manifest')
+  end
+
+  def test_read_from_dev_stdin
+    skip "Waiting unitl #4534 is implemented"
+
+    test_read_from_stdin(specify_stdin_as='/dev/stdin')
+  end
+
+  def test_read_from_stdin(specify_stdin_as='-')
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      r,w = IO.pipe
+      wpid = fork do
+        r.close
+        w << 'foo'
+      end
+      w.close
+      assert arv_put('--filename', 'foo', specify_stdin_as,
+                                 { in: r })
+      r.close
+      Process.waitpid wpid
+    end
+    $stderr.write err
+    assert_match '', err
+    assert match_collection_uuid(out)
+  end
+
+  def test_read_from_implicit_stdin_implicit_manifest
+    skip "Waiting unitl #4534 is implemented"
+
+    test_read_from_stdin_implicit_manifest(specify_stdin_as=nil,
+                                           expect_filename='stdin')
+  end
+
+  def test_read_from_dev_stdin_implicit_manifest
+    skip "Waiting unitl #4534 is implemented"
+
+    test_read_from_stdin_implicit_manifest(specify_stdin_as='/dev/stdin')
+  end
+
+  def test_read_from_stdin_implicit_manifest(specify_stdin_as='-',
+                                             expect_filename=nil)
+    skip "Waiting unitl #4534 is implemented"
+
+    expect_filename = expect_filename || specify_stdin_as.split('/').last
+    out, err = capture_subprocess_io do
+      r,w = IO.pipe
+      wpid = fork do
+        r.close
+        w << 'foo'
+      end
+      w.close
+      args = []
+      args.push specify_stdin_as if specify_stdin_as
+      assert arv_put(*args, { in: r })
+      r.close
+      Process.waitpid wpid
+    end
+    $stderr.write err
+    assert_match '', err
+    assert match_collection_uuid(out)
+  end
+
+  protected
+  def arv_put(*args)
+    system ['./bin/arv-put', 'arv-put'], *args
+  end
+
+  def foo_manifest(filename='foo')
+    ". #{Digest::MD5.hexdigest('foo')}+3 0:3:#{filename}\n"
+  end
+
+  def foo_manifest_locator(filename='foo')
+    Digest::MD5.hexdigest(foo_manifest(filename)) +
+      "+#{foo_manifest(filename).length}"
+  end
+
+  def match_collection_uuid(uuid)
+    /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(uuid)
+  end
+end
diff --git a/sdk/cli/test/test_arv-run-pipeline-instance.rb b/sdk/cli/test/test_arv-run-pipeline-instance.rb
new file mode 100644 (file)
index 0000000..8c8d1d8
--- /dev/null
@@ -0,0 +1,35 @@
+require 'minitest/autorun'
+
+class TestRunPipelineInstance < Minitest::Test
+  def setup
+  end
+
+  def test_run_pipeline_instance_get_help
+    skip "Waiting unitl #4534 is implemented"
+
+    out, err = capture_subprocess_io do
+      system ('arv-run-pipeline-instance -h')
+    end
+    assert_equal '', err
+  end
+
+  def test_run_pipeline_instance_with_no_such_option
+    out, err = capture_subprocess_io do
+      system ('arv-run-pipeline-instance --junk')
+    end
+    refute_equal '', err
+  end
+
+  def test_run_pipeline_instance_for_bogus_template_uuid
+    out, err = capture_subprocess_io do
+      # fails with error SSL_connect error because HOST_INSECURE is not being used
+         # system ('arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+
+      # fails with error: fatal: cannot load such file -- arvados
+         # system ('./bin/arv-run-pipeline-instance --template bogus-abcde-fghijklmnopqrs input=c1bad4b39ca5a924e481008009d94e32+210')
+    end
+    #refute_equal '', err
+    assert_equal '', err
+  end
+
+end
diff --git a/sdk/cli/test/test_arv-tag.rb b/sdk/cli/test/test_arv-tag.rb
new file mode 100644 (file)
index 0000000..a5a1c94
--- /dev/null
@@ -0,0 +1,112 @@
+require 'minitest/autorun'
+require 'digest/md5'
+require 'json'
+
+def assert_failure *args
+  assert_equal false, *args
+end
+
+class TestArvTag < Minitest::Test
+
+  def test_no_args
+    skip "Waiting unitl #4534 is implemented"
+
+    # arv-tag exits with failure if run with no args
+    out, err = capture_subprocess_io do
+      assert_equal false, arv_tag
+    end
+    assert_empty out
+    assert_match /^usage:/i, err
+  end
+
+  # Test adding and removing a single tag on a single object.
+  def test_single_tag_single_obj
+    skip "TBD"
+
+    # Add a single tag.
+    tag_uuid, err = capture_subprocess_io do
+      assert arv_tag '--short', 'add', 'test_tag1', '--object', 'uuid1'
+    end
+    assert_empty err
+
+    out, err = capture_subprocess_io do
+      assert arv 'link', 'show', '--uuid', tag_uuid.rstrip
+    end
+
+    assert_empty err
+    link = JSON.parse out
+    assert_tag link, 'test_tag1', 'uuid1'
+
+    # Remove the tag.
+    out, err = capture_subprocess_io do
+      assert arv_tag 'remove', 'test_tag1', '--object', 'uuid1'
+    end
+
+    assert_empty err
+    links = JSON.parse out
+    assert_equal 1, links.length
+    assert_tag links[0], 'test_tag1', 'uuid1'
+
+    # Verify that the link no longer exists.
+    out, err = capture_subprocess_io do
+      assert_equal false, arv('link', 'show', '--uuid', links[0]['uuid'])
+    end
+
+    assert_equal "Error: Path not found\n", err
+  end
+
+  # Test adding and removing a single tag with multiple objects.
+  def test_single_tag_multi_objects
+    skip "TBD"
+
+    out, err = capture_subprocess_io do
+      assert arv_tag('add', 'test_tag1',
+                     '--object', 'uuid1',
+                     '--object', 'uuid2',
+                     '--object', 'uuid3')
+    end
+    assert_empty err
+
+    out, err = capture_subprocess_io do
+      assert arv 'link', 'list', '--where', '{"link_class":"tag","name":"test_tag1"}'
+    end
+
+    assert_empty err
+    json_out = JSON.parse out
+    links = json_out['items'].sort { |a,b| a['head_uuid'] <=> b['head_uuid'] }
+    assert_equal 3, links.length
+    assert_tag links[0], 'test_tag1', 'uuid1'
+    assert_tag links[1], 'test_tag1', 'uuid2'
+    assert_tag links[2], 'test_tag1', 'uuid3'
+
+    out, err = capture_subprocess_io do
+      assert arv_tag('remove', 'test_tag1',
+                     '--object', 'uuid1',
+                     '--object', 'uuid2',
+                     '--object', 'uuid3')
+    end
+    assert_empty err
+
+    out, err = capture_subprocess_io do
+      assert arv 'link', 'list', '--where', '{"link_class":"tag","name":"test_tag1"}'
+    end
+
+    assert_empty err
+    assert_empty out
+  end
+
+  protected
+  def arv_tag(*args)
+    system ['./bin/arv-tag', 'arv-tag'], *args
+  end
+
+  def arv(*args)
+    system ['./bin/arv', 'arv'], *args
+  end
+
+  def assert_tag(link, name, head_uuid)
+    assert_equal 'tag',     link['link_class']
+    assert_equal name,      link['name']
+    assert_equal head_uuid, link['head_uuid']
+  end
+end
diff --git a/sdk/cli/test/test_arv-ws.rb b/sdk/cli/test/test_arv-ws.rb
new file mode 100644 (file)
index 0000000..d972122
--- /dev/null
@@ -0,0 +1,21 @@
+require 'minitest/autorun'
+
+class TestArvWs < Minitest::Test
+  def setup
+  end
+
+  def test_arv_ws_get_help
+    out, err = capture_subprocess_io do
+      system ('arv-ws -h')
+    end
+    assert_equal '', err
+  end
+
+  def test_arv_ws_such_option
+    out, err = capture_subprocess_io do
+      system ('arv-ws --junk')
+    end
+    refute_equal '', err
+  end
+
+end
diff --git a/sdk/go/arvadosclient/arvadosclient.go b/sdk/go/arvadosclient/arvadosclient.go
new file mode 100644 (file)
index 0000000..5ea2524
--- /dev/null
@@ -0,0 +1,249 @@
+/* Simple Arvados Go SDK for communicating with API server. */
+
+package arvadosclient
+
+import (
+       "bytes"
+       "crypto/tls"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "net/http"
+       "net/url"
+       "os"
+       "strings"
+)
+
+// Errors
+var MissingArvadosApiHost = errors.New("Missing required environment variable ARVADOS_API_HOST")
+var MissingArvadosApiToken = errors.New("Missing required environment variable ARVADOS_API_TOKEN")
+
+type ArvadosApiError struct {
+       error
+       HttpStatusCode int
+       HttpStatus string
+}
+
+func (e ArvadosApiError) Error() string { return e.error.Error() }
+
+// Helper type so we don't have to write out 'map[string]interface{}' every time.
+type Dict map[string]interface{}
+
+// Information about how to contact the Arvados server
+type ArvadosClient struct {
+       // Arvados API server, form "host:port"
+       ApiServer string
+
+       // Arvados API token for authentication
+       ApiToken string
+
+       // Whether to require a valid SSL certificate or not
+       ApiInsecure bool
+
+       // Client object shared by client requests.  Supports HTTP KeepAlive.
+       Client *http.Client
+
+       // If true, sets the X-External-Client header to indicate
+       // the client is outside the cluster.
+       External bool
+}
+
+// Create a new KeepClient, initialized with standard Arvados environment
+// variables ARVADOS_API_HOST, ARVADOS_API_TOKEN, and (optionally)
+// ARVADOS_API_HOST_INSECURE.
+func MakeArvadosClient() (kc ArvadosClient, err error) {
+       insecure := (os.Getenv("ARVADOS_API_HOST_INSECURE") == "true")
+       external := (os.Getenv("ARVADOS_EXTERNAL_CLIENT") == "true")
+
+       kc = ArvadosClient{
+               ApiServer:   os.Getenv("ARVADOS_API_HOST"),
+               ApiToken:    os.Getenv("ARVADOS_API_TOKEN"),
+               ApiInsecure: insecure,
+               Client: &http.Client{Transport: &http.Transport{
+                       TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure}}},
+               External: external}
+
+       if os.Getenv("ARVADOS_API_HOST") == "" {
+               return kc, MissingArvadosApiHost
+       }
+       if os.Getenv("ARVADOS_API_TOKEN") == "" {
+               return kc, MissingArvadosApiToken
+       }
+
+       return kc, err
+}
+
+// Low-level access to a resource.
+//
+//   method - HTTP method, one of GET, HEAD, PUT, POST or DELETE
+//   resource - the arvados resource to act on
+//   uuid - the uuid of the specific item to access (may be empty)
+//   action - sub-action to take on the resource or uuid (may be empty)
+//   parameters - method parameters
+//
+// return
+//   reader - the body reader, or nil if there was an error
+//   err - error accessing the resource, or nil if no error
+func (this ArvadosClient) CallRaw(method string, resource string, uuid string, action string, parameters Dict) (reader io.ReadCloser, err error) {
+       var req *http.Request
+
+       u := url.URL{
+               Scheme: "https",
+               Host:   this.ApiServer}
+
+       u.Path = "/arvados/v1"
+
+       if resource != "" {
+               u.Path = u.Path + "/" + resource
+       }
+       if uuid != "" {
+               u.Path = u.Path + "/" + uuid
+       }
+       if action != "" {
+               u.Path = u.Path + "/" + action
+       }
+
+       if parameters == nil {
+               parameters = make(Dict)
+       }
+
+       parameters["format"] = "json"
+
+       vals := make(url.Values)
+       for k, v := range parameters {
+               m, err := json.Marshal(v)
+               if err == nil {
+                       vals.Set(k, string(m))
+               }
+       }
+
+       if method == "GET" || method == "HEAD" {
+               u.RawQuery = vals.Encode()
+               if req, err = http.NewRequest(method, u.String(), nil); err != nil {
+                       return nil, err
+               }
+       } else {
+               if req, err = http.NewRequest(method, u.String(), bytes.NewBufferString(vals.Encode())); err != nil {
+                       return nil, err
+               }
+               req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+       }
+
+       // Add api token header
+       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.ApiToken))
+       if this.External {
+               req.Header.Add("X-External-Client", "1")
+       }
+
+       // Make the request
+       var resp *http.Response
+       if resp, err = this.Client.Do(req); err != nil {
+               return nil, err
+       }
+
+       if resp.StatusCode == http.StatusOK {
+               return resp.Body, nil
+       }
+
+       defer resp.Body.Close()
+       errorText := fmt.Sprintf("API response: %s", resp.Status)
+
+       // If the response body has {"errors":["reason1","reason2"]}
+       // then return those reasons.
+       var errInfo = Dict{}
+       if err := json.NewDecoder(resp.Body).Decode(&errInfo); err == nil {
+               if errorList, ok := errInfo["errors"]; ok {
+                       var errorStrings []string
+                       if errArray, ok := errorList.([]interface{}); ok {
+                               for _, errItem := range errArray {
+                                       // We expect an array of strings here.
+                                       // Non-strings will be passed along
+                                       // JSON-encoded.
+                                       if s, ok := errItem.(string); ok {
+                                               errorStrings = append(errorStrings, s)
+                                       } else if j, err := json.Marshal(errItem); err == nil {
+                                               errorStrings = append(errorStrings, string(j))
+                                       }
+                               }
+                               errorText = strings.Join(errorStrings, "; ")
+                       }
+               }
+       }
+       return nil, ArvadosApiError{errors.New(errorText), resp.StatusCode, resp.Status}
+}
+
+// Access to a resource.
+//
+//   method - HTTP method, one of GET, HEAD, PUT, POST or DELETE
+//   resource - the arvados resource to act on
+//   uuid - the uuid of the specific item to access (may be empty)
+//   action - sub-action to take on the resource or uuid (may be empty)
+//   parameters - method parameters
+//   output - a map or annotated struct which is a legal target for encoding/json/Decoder
+// return
+//   err - error accessing the resource, or nil if no error
+func (this ArvadosClient) Call(method string, resource string, uuid string, action string, parameters Dict, output interface{}) (err error) {
+       var reader io.ReadCloser
+       reader, err = this.CallRaw(method, resource, uuid, action, parameters)
+       if reader != nil {
+               defer reader.Close()
+       }
+       if err != nil {
+               return err
+       }
+
+       if output != nil {
+               dec := json.NewDecoder(reader)
+               if err = dec.Decode(output); err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+// Create a new instance of a resource.
+//
+//   resource - the arvados resource on which to create an item
+//   parameters - method parameters
+//   output - a map or annotated struct which is a legal target for encoding/json/Decoder
+// return
+//   err - error accessing the resource, or nil if no error
+func (this ArvadosClient) Create(resource string, parameters Dict, output interface{}) (err error) {
+       return this.Call("POST", resource, "", "", parameters, output)
+}
+
+// Delete an instance of a resource.
+//
+//   resource - the arvados resource on which to delete an item
+//   uuid - the item to delete
+//   parameters - method parameters
+//   output - a map or annotated struct which is a legal target for encoding/json/Decoder
+// return
+//   err - error accessing the resource, or nil if no error
+func (this ArvadosClient) Delete(resource string, uuid string, parameters Dict, output interface{}) (err error) {
+       return this.Call("DELETE", resource, uuid, "", parameters, output)
+}
+
+// Update fields of an instance of a resource.
+//
+//   resource - the arvados resource on which to update the item
+//   uuid - the item to update
+//   parameters - method parameters
+//   output - a map or annotated struct which is a legal target for encoding/json/Decoder
+// return
+//   err - error accessing the resource, or nil if no error
+func (this ArvadosClient) Update(resource string, uuid string, parameters Dict, output interface{}) (err error) {
+       return this.Call("PUT", resource, uuid, "", parameters, output)
+}
+
+// List the instances of a resource
+//
+//   resource - the arvados resource on which to list
+//   parameters - method parameters
+//   output - a map or annotated struct which is a legal target for encoding/json/Decoder
+// return
+//   err - error accessing the resource, or nil if no error
+func (this ArvadosClient) List(resource string, parameters Dict, output interface{}) (err error) {
+       return this.Call("GET", resource, "", "", parameters, output)
+}
diff --git a/sdk/go/arvadosclient/arvadosclient_test.go b/sdk/go/arvadosclient/arvadosclient_test.go
new file mode 100644 (file)
index 0000000..bf9b4e3
--- /dev/null
@@ -0,0 +1,119 @@
+package arvadosclient
+
+import (
+       "fmt"
+       . "gopkg.in/check.v1"
+       "net/http"
+       "os"
+       "os/exec"
+       "testing"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+var _ = Suite(&ServerRequiredSuite{})
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+func pythonDir() string {
+       cwd, _ := os.Getwd()
+       return fmt.Sprintf("%s/../../python/tests", cwd)
+}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       os.Chdir(pythonDir())
+       if err := exec.Command("python", "run_test_server.py", "start").Run(); err != nil {
+               panic("'python run_test_server.py start' returned error")
+       }
+       if err := exec.Command("python", "run_test_server.py", "start_keep").Run(); err != nil {
+               panic("'python run_test_server.py start_keep' returned error")
+       }
+}
+
+func (s *ServerRequiredSuite) TestMakeArvadosClient(c *C) {
+       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "")
+
+       kc, err := MakeArvadosClient()
+       c.Check(kc.ApiServer, Equals, "localhost:3000")
+       c.Check(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       c.Check(kc.ApiInsecure, Equals, false)
+
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+       kc, err = MakeArvadosClient()
+       c.Check(kc.ApiServer, Equals, "localhost:3000")
+       c.Check(kc.ApiToken, Equals, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       c.Check(kc.ApiInsecure, Equals, true)
+       c.Check(kc.Client.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, Equals, true)
+
+       c.Assert(err, Equals, nil)
+}
+
+func (s *ServerRequiredSuite) TestCreatePipelineTemplate(c *C) {
+       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+       arv, err := MakeArvadosClient()
+
+       getback := make(Dict)
+       err = arv.Create("pipeline_templates",
+               Dict{"pipeline_template": Dict{
+                       "name": "tmp",
+                       "components": Dict{
+                               "c1": map[string]string{"script": "script1"},
+                               "c2": map[string]string{"script": "script2"}}}},
+               &getback)
+       c.Assert(err, Equals, nil)
+       c.Assert(getback["name"], Equals, "tmp")
+       c.Assert(getback["components"].(map[string]interface{})["c2"].(map[string]interface{})["script"], Equals, "script2")
+
+       uuid := getback["uuid"].(string)
+       getback = make(Dict)
+       err = arv.Update("pipeline_templates", uuid,
+               Dict{
+                       "pipeline_template": Dict{"name": "tmp2"}},
+               &getback)
+       c.Assert(err, Equals, nil)
+       c.Assert(getback["name"], Equals, "tmp2")
+
+       c.Assert(getback["uuid"].(string), Equals, uuid)
+       getback = make(Dict)
+       err = arv.Delete("pipeline_templates", uuid, nil, &getback)
+       c.Assert(err, Equals, nil)
+       c.Assert(getback["name"], Equals, "tmp2")
+}
+
+func (s *ServerRequiredSuite) TestErrorResponse(c *C) {
+       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+       arv, _ := MakeArvadosClient()
+
+       getback := make(Dict)
+
+       {
+               err := arv.Create("logs",
+                       Dict{"log": Dict{"bogus_attr": "foo"}},
+                       &getback)
+               c.Assert(err, ErrorMatches, ".*unknown attribute: bogus_attr.*")
+               c.Assert(err, FitsTypeOf, ArvadosApiError{})
+               c.Assert(err.(ArvadosApiError).HttpStatusCode, Equals, 422)
+       }
+
+       {
+               err := arv.Create("bogus",
+                       Dict{"bogus": Dict{}},
+                       &getback)
+               c.Assert(err, ErrorMatches, "Path not found")
+               c.Assert(err, FitsTypeOf, ArvadosApiError{})
+               c.Assert(err.(ArvadosApiError).HttpStatusCode, Equals, 404)
+       }
+}
diff --git a/sdk/go/keepclient/hashcheck.go b/sdk/go/keepclient/hashcheck.go
new file mode 100644 (file)
index 0000000..1f696d9
--- /dev/null
@@ -0,0 +1,78 @@
+// Lightweight implementation of io.ReadCloser that checks the contents read
+// from the underlying io.Reader a against checksum hash.  To avoid reading the
+// entire contents into a buffer up front, the hash is updated with each read,
+// and the actual checksum is not checked until the underlying reader returns
+// EOF.
+package keepclient
+
+import (
+       "errors"
+       "fmt"
+       "hash"
+       "io"
+)
+
+var BadChecksum = errors.New("Reader failed checksum")
+
+type HashCheckingReader struct {
+       // The underlying data source
+       io.Reader
+
+       // The hashing function to use
+       hash.Hash
+
+       // The hash value to check against.  Must be a hex-encoded lowercase string.
+       Check string
+}
+
+// Read from the underlying reader, update the hashing function, and pass the
+// results through.  Will return BadChecksum on the last read instead of EOF if
+// the checksum doesn't match.
+func (this HashCheckingReader) Read(p []byte) (n int, err error) {
+       n, err = this.Reader.Read(p)
+       if n > 0 {
+               this.Hash.Write(p[:n])
+       }
+       if err == io.EOF {
+               sum := this.Hash.Sum(make([]byte, 0, this.Hash.Size()))
+               if fmt.Sprintf("%x", sum) != this.Check {
+                       err = BadChecksum
+               }
+       }
+       return n, err
+}
+
+// Write entire contents of this.Reader to 'dest'.  Returns BadChecksum if the
+// data written to 'dest' doesn't match the hash code of this.Check.
+func (this HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error) {
+       if writeto, ok := this.Reader.(io.WriterTo); ok {
+               written, err = writeto.WriteTo(io.MultiWriter(dest, this.Hash))
+       } else {
+               written, err = io.Copy(io.MultiWriter(dest, this.Hash), this.Reader)
+       }
+
+       sum := this.Hash.Sum(make([]byte, 0, this.Hash.Size()))
+
+       if fmt.Sprintf("%x", sum) != this.Check {
+               err = BadChecksum
+       }
+
+       return written, err
+}
+
+// Close() the underlying Reader if it is castable to io.ReadCloser.  This will
+// drain the underlying reader of any remaining data and check the checksum.
+func (this HashCheckingReader) Close() (err error) {
+       _, err = io.Copy(this.Hash, this.Reader)
+
+       if closer, ok := this.Reader.(io.ReadCloser); ok {
+               err = closer.Close()
+       }
+
+       sum := this.Hash.Sum(make([]byte, 0, this.Hash.Size()))
+       if fmt.Sprintf("%x", sum) != this.Check {
+               err = BadChecksum
+       }
+
+       return err
+}
diff --git a/sdk/go/keepclient/hashcheck_test.go b/sdk/go/keepclient/hashcheck_test.go
new file mode 100644 (file)
index 0000000..371a989
--- /dev/null
@@ -0,0 +1,85 @@
+package keepclient
+
+import (
+       "bytes"
+       "crypto/md5"
+       "fmt"
+       . "gopkg.in/check.v1"
+       "io"
+       "io/ioutil"
+)
+
+type HashcheckSuiteSuite struct{}
+
+// Gocheck boilerplate
+var _ = Suite(&HashcheckSuiteSuite{})
+
+func (h *HashcheckSuiteSuite) TestRead(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       {
+               r, w := io.Pipe()
+               hcr := HashCheckingReader{r, md5.New(), hash}
+               go func() {
+                       w.Write([]byte("foo"))
+                       w.Close()
+               }()
+               p, err := ioutil.ReadAll(hcr)
+               c.Check(len(p), Equals, 3)
+               c.Check(err, Equals, nil)
+       }
+
+       {
+               r, w := io.Pipe()
+               hcr := HashCheckingReader{r, md5.New(), hash}
+               go func() {
+                       w.Write([]byte("bar"))
+                       w.Close()
+               }()
+               p, err := ioutil.ReadAll(hcr)
+               c.Check(len(p), Equals, 3)
+               c.Check(err, Equals, BadChecksum)
+       }
+}
+
+func (h *HashcheckSuiteSuite) TestWriteTo(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       {
+               bb := bytes.NewBufferString("foo")
+               hcr := HashCheckingReader{bb, md5.New(), hash}
+               r, w := io.Pipe()
+               done := make(chan bool)
+               go func() {
+                       p, err := ioutil.ReadAll(r)
+                       c.Check(len(p), Equals, 3)
+                       c.Check(err, Equals, nil)
+                       done <- true
+               }()
+
+               n, err := hcr.WriteTo(w)
+               w.Close()
+               c.Check(n, Equals, int64(3))
+               c.Check(err, Equals, nil)
+               <-done
+       }
+
+       {
+               bb := bytes.NewBufferString("bar")
+               hcr := HashCheckingReader{bb, md5.New(), hash}
+               r, w := io.Pipe()
+               done := make(chan bool)
+               go func() {
+                       p, err := ioutil.ReadAll(r)
+                       c.Check(len(p), Equals, 3)
+                       c.Check(err, Equals, nil)
+                       done <- true
+               }()
+
+               n, err := hcr.WriteTo(w)
+               w.Close()
+               c.Check(n, Equals, int64(3))
+               c.Check(err, Equals, BadChecksum)
+               <-done
+       }
+}
diff --git a/sdk/go/keepclient/keepclient.go b/sdk/go/keepclient/keepclient.go
new file mode 100644 (file)
index 0000000..23af470
--- /dev/null
@@ -0,0 +1,283 @@
+/* Provides low-level Get/Put primitives for accessing Arvados Keep blocks. */
+package keepclient
+
+import (
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/streamer"
+       "io"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "regexp"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "time"
+       "unsafe"
+)
+
+// A Keep "block" is 64MB.
+const BLOCKSIZE = 64 * 1024 * 1024
+
+var BlockNotFound = errors.New("Block not found")
+var InsufficientReplicasError = errors.New("Could not write sufficient replicas")
+var OversizeBlockError = errors.New("Block too big")
+var MissingArvadosApiHost = errors.New("Missing required environment variable ARVADOS_API_HOST")
+var MissingArvadosApiToken = errors.New("Missing required environment variable ARVADOS_API_TOKEN")
+
+const X_Keep_Desired_Replicas = "X-Keep-Desired-Replicas"
+const X_Keep_Replicas_Stored = "X-Keep-Replicas-Stored"
+
+// Information about Arvados and Keep servers.
+type KeepClient struct {
+       Arvados       *arvadosclient.ArvadosClient
+       Want_replicas int
+       Using_proxy   bool
+       service_roots *map[string]string
+       lock          sync.Mutex
+       Client        *http.Client
+}
+
+// Create a new KeepClient.  This will contact the API server to discover Keep
+// servers.
+func MakeKeepClient(arv *arvadosclient.ArvadosClient) (kc KeepClient, err error) {
+       kc = KeepClient{
+               Arvados:       arv,
+               Want_replicas: 2,
+               Using_proxy:   false,
+               Client:        &http.Client{},
+       }
+       err = (&kc).DiscoverKeepServers()
+
+       return kc, err
+}
+
+// Put a block given the block hash, a reader with the block data, and the
+// expected length of that data.  The desired number of replicas is given in
+// KeepClient.Want_replicas.  Returns the number of replicas that were written
+// and if there was an error.  Note this will return InsufficientReplias
+// whenever 0 <= replicas < this.Wants_replicas.
+func (this KeepClient) PutHR(hash string, r io.Reader, expectedLength int64) (locator string, replicas int, err error) {
+
+       // Buffer for reads from 'r'
+       var bufsize int
+       if expectedLength > 0 {
+               if expectedLength > BLOCKSIZE {
+                       return "", 0, OversizeBlockError
+               }
+               bufsize = int(expectedLength)
+       } else {
+               bufsize = BLOCKSIZE
+       }
+
+       t := streamer.AsyncStreamFromReader(bufsize, HashCheckingReader{r, md5.New(), hash})
+       defer t.Close()
+
+       return this.putReplicas(hash, t, expectedLength)
+}
+
+// Put a block given the block hash and a byte buffer.  The desired number of
+// replicas is given in KeepClient.Want_replicas.  Returns the number of
+// replicas that were written and if there was an error.  Note this will return
+// InsufficientReplias whenever 0 <= replicas < this.Wants_replicas.
+func (this KeepClient) PutHB(hash string, buf []byte) (locator string, replicas int, err error) {
+       t := streamer.AsyncStreamFromSlice(buf)
+       defer t.Close()
+
+       return this.putReplicas(hash, t, int64(len(buf)))
+}
+
+// Put a block given a buffer.  The hash will be computed.  The desired number
+// of replicas is given in KeepClient.Want_replicas.  Returns the number of
+// replicas that were written and if there was an error.  Note this will return
+// InsufficientReplias whenever 0 <= replicas < this.Wants_replicas.
+func (this KeepClient) PutB(buffer []byte) (locator string, replicas int, err error) {
+       hash := fmt.Sprintf("%x", md5.Sum(buffer))
+       return this.PutHB(hash, buffer)
+}
+
+// Put a block, given a Reader.  This will read the entire reader into a buffer
+// to compute the hash.  The desired number of replicas is given in
+// KeepClient.Want_replicas.  Returns the number of replicas that were written
+// and if there was an error.  Note this will return InsufficientReplias
+// whenever 0 <= replicas < this.Wants_replicas.  Also nhote that if the block
+// hash and data size are available, PutHR() is more efficient.
+func (this KeepClient) PutR(r io.Reader) (locator string, replicas int, err error) {
+       if buffer, err := ioutil.ReadAll(r); err != nil {
+               return "", 0, err
+       } else {
+               return this.PutB(buffer)
+       }
+}
+
+// Get a block given a hash.  Return a reader, the expected data length, the
+// URL the block was fetched from, and if there was an error.  If the block
+// checksum does not match, the final Read() on the reader returned by this
+// method will return a BadChecksum error instead of EOF.
+func (this KeepClient) Get(hash string) (reader io.ReadCloser,
+       contentLength int64, url string, err error) {
+       return this.AuthorizedGet(hash, "", "")
+}
+
+// Get a block given a hash, with additional authorization provided by
+// signature and timestamp.  Return a reader, the expected data length, the URL
+// the block was fetched from, and if there was an error.  If the block
+// checksum does not match, the final Read() on the reader returned by this
+// method will return a BadChecksum error instead of EOF.
+func (this KeepClient) AuthorizedGet(hash string,
+       signature string,
+       timestamp string) (reader io.ReadCloser,
+       contentLength int64, url string, err error) {
+
+       // Take the hash of locator and timestamp in order to identify this
+       // specific transaction in log statements.
+       requestId := fmt.Sprintf("%x", md5.Sum([]byte(hash+time.Now().String())))[0:8]
+
+       // Calculate the ordering for asking servers
+       sv := NewRootSorter(this.ServiceRoots(), hash).GetSortedRoots()
+
+       for _, host := range sv {
+               var req *http.Request
+               var err error
+               var url string
+               if signature != "" {
+                       url = fmt.Sprintf("%s/%s+A%s@%s", host, hash,
+                               signature, timestamp)
+               } else {
+                       url = fmt.Sprintf("%s/%s", host, hash)
+               }
+               if req, err = http.NewRequest("GET", url, nil); err != nil {
+                       continue
+               }
+
+               req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
+
+               log.Printf("[%v] Begin download %s", requestId, url)
+
+               var resp *http.Response
+               if resp, err = this.Client.Do(req); err != nil || resp.StatusCode != http.StatusOK {
+                       respbody, _ := ioutil.ReadAll(&io.LimitedReader{resp.Body, 4096})
+                       response := strings.TrimSpace(string(respbody))
+                       log.Printf("[%v] Download %v status code: %v error: \"%v\" response: \"%v\"",
+                               requestId, url, resp.StatusCode, err, response)
+                       continue
+               }
+
+               if resp.StatusCode == http.StatusOK {
+                       log.Printf("[%v] Download %v status code: %v", requestId, url, resp.StatusCode)
+                       return HashCheckingReader{resp.Body, md5.New(), hash}, resp.ContentLength, url, nil
+               }
+       }
+
+       return nil, 0, "", BlockNotFound
+}
+
+// Determine if a block with the given hash is available and readable, but does
+// not return the block contents.
+func (this KeepClient) Ask(hash string) (contentLength int64, url string, err error) {
+       return this.AuthorizedAsk(hash, "", "")
+}
+
+// Determine if a block with the given hash is available and readable with the
+// given signature and timestamp, but does not return the block contents.
+func (this KeepClient) AuthorizedAsk(hash string, signature string,
+       timestamp string) (contentLength int64, url string, err error) {
+       // Calculate the ordering for asking servers
+       sv := NewRootSorter(this.ServiceRoots(), hash).GetSortedRoots()
+
+       for _, host := range sv {
+               var req *http.Request
+               var err error
+               if signature != "" {
+                       url = fmt.Sprintf("%s/%s+A%s@%s", host, hash,
+                               signature, timestamp)
+               } else {
+                       url = fmt.Sprintf("%s/%s", host, hash)
+               }
+
+               if req, err = http.NewRequest("HEAD", url, nil); err != nil {
+                       continue
+               }
+
+               req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
+
+               var resp *http.Response
+               if resp, err = this.Client.Do(req); err != nil {
+                       continue
+               }
+
+               if resp.StatusCode == http.StatusOK {
+                       return resp.ContentLength, url, nil
+               }
+       }
+
+       return 0, "", BlockNotFound
+
+}
+
+// Atomically read the service_roots field.
+func (this *KeepClient) ServiceRoots() map[string]string {
+       r := (*map[string]string)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&this.service_roots))))
+       return *r
+}
+
+// Atomically update the service_roots field.  Enables you to update
+// service_roots without disrupting any GET or PUT operations that might
+// already be in progress.
+func (this *KeepClient) SetServiceRoots(new_roots map[string]string) {
+       roots := make(map[string]string)
+       for uuid, root := range new_roots {
+               roots[uuid] = root
+       }
+       atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&this.service_roots)),
+               unsafe.Pointer(&roots))
+}
+
+type Locator struct {
+       Hash      string
+       Size      int
+       Signature string
+       Timestamp string
+}
+
+func MakeLocator2(hash string, hints string) (locator Locator) {
+       locator.Hash = hash
+       if hints != "" {
+               signature_pat, _ := regexp.Compile("^A([[:xdigit:]]+)@([[:xdigit:]]{8})$")
+               for _, hint := range strings.Split(hints, "+") {
+                       if hint != "" {
+                               if match, _ := regexp.MatchString("^[[:digit:]]+$", hint); match {
+                                       fmt.Sscanf(hint, "%d", &locator.Size)
+                               } else if m := signature_pat.FindStringSubmatch(hint); m != nil {
+                                       locator.Signature = m[1]
+                                       locator.Timestamp = m[2]
+                               } else if match, _ := regexp.MatchString("^[:upper:]", hint); match {
+                                       // Any unknown hint that starts with an uppercase letter is
+                                       // presumed to be valid and ignored, to permit forward compatibility.
+                               } else {
+                                       // Unknown format; not a valid locator.
+                                       return Locator{"", 0, "", ""}
+                               }
+                       }
+               }
+       }
+       return locator
+}
+
+func MakeLocator(path string) Locator {
+       pathpattern, err := regexp.Compile("^([0-9a-f]{32})([+].*)?$")
+       if err != nil {
+               log.Print("Don't like regexp", err)
+       }
+
+       sm := pathpattern.FindStringSubmatch(path)
+       if sm == nil {
+               log.Print("Failed match ", path)
+               return Locator{"", 0, "", ""}
+       }
+
+       return MakeLocator2(sm[1], sm[2])
+}
diff --git a/sdk/go/keepclient/keepclient_test.go b/sdk/go/keepclient/keepclient_test.go
new file mode 100644 (file)
index 0000000..8487e00
--- /dev/null
@@ -0,0 +1,720 @@
+package keepclient
+
+import (
+       "crypto/md5"
+       "flag"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/streamer"
+       . "gopkg.in/check.v1"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       "os"
+       "os/exec"
+       "testing"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+var _ = Suite(&StandaloneSuite{})
+
+var no_server = flag.Bool("no-server", false, "Skip 'ServerRequireSuite'")
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+// Standalone tests
+type StandaloneSuite struct{}
+
+func pythonDir() string {
+       cwd, _ := os.Getwd()
+       return fmt.Sprintf("%s/../../python/tests", cwd)
+}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       if *no_server {
+               c.Skip("Skipping tests that require server")
+               return
+       }
+       os.Chdir(pythonDir())
+       {
+               cmd := exec.Command("python", "run_test_server.py", "start")
+               stderr, err := cmd.StderrPipe()
+               if err != nil {
+                       log.Fatalf("Setting up stderr pipe: %s", err)
+               }
+               go io.Copy(os.Stderr, stderr)
+               if err := cmd.Run(); err != nil {
+                       panic(fmt.Sprintf("'python run_test_server.py start' returned error %s", err))
+               }
+       }
+       {
+               cmd := exec.Command("python", "run_test_server.py", "start_keep")
+               stderr, err := cmd.StderrPipe()
+               if err != nil {
+                       log.Fatalf("Setting up stderr pipe: %s", err)
+               }
+               go io.Copy(os.Stderr, stderr)
+               if err := cmd.Run(); err != nil {
+                       panic(fmt.Sprintf("'python run_test_server.py start_keep' returned error %s", err))
+               }
+       }
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       os.Chdir(pythonDir())
+       exec.Command("python", "run_test_server.py", "stop_keep").Run()
+       exec.Command("python", "run_test_server.py", "stop").Run()
+}
+
+func (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {
+       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+
+       kc, err := MakeKeepClient(&arv)
+
+       c.Assert(err, Equals, nil)
+       c.Check(len(kc.ServiceRoots()), Equals, 2)
+       for _, root := range kc.ServiceRoots() {
+               c.Check(root, Matches, "http://localhost:2510[\\d]")
+       }
+}
+
+type StubPutHandler struct {
+       c              *C
+       expectPath     string
+       expectApiToken string
+       expectBody     string
+       handled        chan string
+}
+
+func (this StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       this.c.Check(req.URL.Path, Equals, "/"+this.expectPath)
+       this.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", this.expectApiToken))
+       body, err := ioutil.ReadAll(req.Body)
+       this.c.Check(err, Equals, nil)
+       this.c.Check(body, DeepEquals, []byte(this.expectBody))
+       resp.WriteHeader(200)
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func RunFakeKeepServer(st http.Handler) (ks KeepServer) {
+       var err error
+       ks.listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: 0})
+       if err != nil {
+               panic(fmt.Sprintf("Could not listen on any port"))
+       }
+       ks.url = fmt.Sprintf("http://%s", ks.listener.Addr().String())
+       go http.Serve(ks.listener, st)
+       return
+}
+
+func UploadToStubHelper(c *C, st http.Handler, f func(KeepClient, string,
+       io.ReadCloser, io.WriteCloser, chan uploadStatus)) {
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       arv.ApiToken = "abc123"
+
+       kc, _ := MakeKeepClient(&arv)
+
+       reader, writer := io.Pipe()
+       upload_status := make(chan uploadStatus)
+
+       f(kc, ks.url, reader, writer, upload_status)
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {
+       log.Printf("TestUploadToStubKeepServer")
+
+       st := StubPutHandler{
+               c,
+               "acbd18db4cc2f85cedef654fccc4a4d8",
+               "abc123",
+               "foo",
+               make(chan string)}
+
+       UploadToStubHelper(c, st,
+               func(kc KeepClient, url string, reader io.ReadCloser,
+                       writer io.WriteCloser, upload_status chan uploadStatus) {
+
+                       go kc.uploadToKeepServer(url, st.expectPath, reader, upload_status, int64(len("foo")), "TestUploadToStubKeepServer")
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-st.handled
+                       status := <-upload_status
+                       c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
+               })
+
+       log.Printf("TestUploadToStubKeepServer done")
+}
+
+func (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {
+       log.Printf("TestUploadToStubKeepServerBufferReader")
+
+       st := StubPutHandler{
+               c,
+               "acbd18db4cc2f85cedef654fccc4a4d8",
+               "abc123",
+               "foo",
+               make(chan string)}
+
+       UploadToStubHelper(c, st,
+               func(kc KeepClient, url string, reader io.ReadCloser,
+                       writer io.WriteCloser, upload_status chan uploadStatus) {
+
+                       tr := streamer.AsyncStreamFromReader(512, reader)
+                       defer tr.Close()
+
+                       br1 := tr.MakeStreamReader()
+
+                       go kc.uploadToKeepServer(url, st.expectPath, br1, upload_status, 3, "TestUploadToStubKeepServerBufferReader")
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-st.handled
+
+                       status := <-upload_status
+                       c.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf("%s/%s", url, st.expectPath), 200, 1, ""})
+               })
+
+       log.Printf("TestUploadToStubKeepServerBufferReader done")
+}
+
+type FailHandler struct {
+       handled chan string
+}
+
+func (this FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.WriteHeader(500)
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {
+       log.Printf("TestFailedUploadToStubKeepServer")
+
+       st := FailHandler{
+               make(chan string)}
+
+       hash := "acbd18db4cc2f85cedef654fccc4a4d8"
+
+       UploadToStubHelper(c, st,
+               func(kc KeepClient, url string, reader io.ReadCloser,
+                       writer io.WriteCloser, upload_status chan uploadStatus) {
+
+                       go kc.uploadToKeepServer(url, hash, reader, upload_status, 3, "TestFailedUploadToStubKeepServer")
+
+                       writer.Write([]byte("foo"))
+                       writer.Close()
+
+                       <-st.handled
+
+                       status := <-upload_status
+                       c.Check(status.url, Equals, fmt.Sprintf("%s/%s", url, hash))
+                       c.Check(status.statusCode, Equals, 500)
+               })
+       log.Printf("TestFailedUploadToStubKeepServer done")
+}
+
+type KeepServer struct {
+       listener net.Listener
+       url      string
+}
+
+func RunSomeFakeKeepServers(st http.Handler, n int) (ks []KeepServer) {
+       ks = make([]KeepServer, n)
+
+       for i := 0; i < n; i += 1 {
+               ks[i] = RunFakeKeepServer(st)
+       }
+
+       return ks
+}
+
+func (s *StandaloneSuite) TestPutB(c *C) {
+       log.Printf("TestPutB")
+
+       hash := Md5String("foo")
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 5)}
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       service_roots := make(map[string]string)
+
+       ks := RunSomeFakeKeepServers(st, 5)
+
+       for i, k := range ks {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(service_roots)
+
+       kc.PutB([]byte("foo"))
+
+       shuff := NewRootSorter(
+               kc.ServiceRoots(), Md5String("foo")).GetSortedRoots()
+
+       s1 := <-st.handled
+       s2 := <-st.handled
+       c.Check((s1 == shuff[0] && s2 == shuff[1]) ||
+               (s1 == shuff[1] && s2 == shuff[0]),
+               Equals,
+               true)
+
+       log.Printf("TestPutB done")
+}
+
+func (s *StandaloneSuite) TestPutHR(c *C) {
+       log.Printf("TestPutHR")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 5)}
+
+       arv, _ := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       service_roots := make(map[string]string)
+
+       ks := RunSomeFakeKeepServers(st, 5)
+
+       for i, k := range ks {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(service_roots)
+
+       reader, writer := io.Pipe()
+
+       go func() {
+               writer.Write([]byte("foo"))
+               writer.Close()
+       }()
+
+       kc.PutHR(hash, reader, 3)
+
+       shuff := NewRootSorter(kc.ServiceRoots(), hash).GetSortedRoots()
+       log.Print(shuff)
+
+       s1 := <-st.handled
+       s2 := <-st.handled
+
+       c.Check((s1 == shuff[0] && s2 == shuff[1]) ||
+               (s1 == shuff[1] && s2 == shuff[0]),
+               Equals,
+               true)
+
+       log.Printf("TestPutHR done")
+}
+
+func (s *StandaloneSuite) TestPutWithFail(c *C) {
+       log.Printf("TestPutWithFail")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 4)}
+
+       fh := FailHandler{
+               make(chan string, 1)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       service_roots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 4)
+       ks2 := RunSomeFakeKeepServers(fh, 1)
+
+       for i, k := range ks1 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(service_roots)
+
+       shuff := NewRootSorter(
+               kc.ServiceRoots(), Md5String("foo")).GetSortedRoots()
+
+       phash, replicas, err := kc.PutB([]byte("foo"))
+
+       <-fh.handled
+
+       c.Check(err, Equals, nil)
+       c.Check(phash, Equals, "")
+       c.Check(replicas, Equals, 2)
+
+       s1 := <-st.handled
+       s2 := <-st.handled
+
+       c.Check((s1 == shuff[1] && s2 == shuff[2]) ||
+               (s1 == shuff[2] && s2 == shuff[1]),
+               Equals,
+               true)
+}
+
+func (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {
+       log.Printf("TestPutWithTooManyFail")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubPutHandler{
+               c,
+               hash,
+               "abc123",
+               "foo",
+               make(chan string, 1)}
+
+       fh := FailHandler{
+               make(chan string, 4)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+
+       kc.Want_replicas = 2
+       arv.ApiToken = "abc123"
+       service_roots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+       ks2 := RunSomeFakeKeepServers(fh, 4)
+
+       for i, k := range ks1 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(service_roots)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+
+       c.Check(err, Equals, InsufficientReplicasError)
+       c.Check(replicas, Equals, 1)
+       c.Check(<-st.handled, Equals, ks1[0].url)
+
+       log.Printf("TestPutWithTooManyFail done")
+}
+
+type StubGetHandler struct {
+       c              *C
+       expectPath     string
+       expectApiToken string
+       returnBody     []byte
+}
+
+func (this StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       this.c.Check(req.URL.Path, Equals, "/"+this.expectPath)
+       this.c.Check(req.Header.Get("Authorization"), Equals, fmt.Sprintf("OAuth2 %s", this.expectApiToken))
+       resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(this.returnBody)))
+       resp.Write(this.returnBody)
+}
+
+func (s *StandaloneSuite) TestGet(c *C) {
+       log.Printf("TestGet")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := StubGetHandler{
+               c,
+               hash,
+               "abc123",
+               []byte("foo")}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url})
+
+       r, n, url2, err := kc.Get(hash)
+       defer r.Close()
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks.url, hash))
+
+       content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(content, DeepEquals, []byte("foo"))
+
+       log.Printf("TestGet done")
+}
+
+func (s *StandaloneSuite) TestGetFail(c *C) {
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+
+       st := FailHandler{make(chan string, 1)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url})
+
+       r, n, url2, err := kc.Get(hash)
+       c.Check(err, Equals, BlockNotFound)
+       c.Check(n, Equals, int64(0))
+       c.Check(url2, Equals, "")
+       c.Check(r, Equals, nil)
+}
+
+type BarHandler struct {
+       handled chan string
+}
+
+func (this BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.Write([]byte("bar"))
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestChecksum(c *C) {
+       foohash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+       barhash := fmt.Sprintf("%x", md5.Sum([]byte("bar")))
+
+       st := BarHandler{make(chan string, 1)}
+
+       ks := RunFakeKeepServer(st)
+       defer ks.listener.Close()
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+       arv.ApiToken = "abc123"
+       kc.SetServiceRoots(map[string]string{"x": ks.url})
+
+       r, n, _, err := kc.Get(barhash)
+       _, err = ioutil.ReadAll(r)
+       c.Check(n, Equals, int64(3))
+       c.Check(err, Equals, nil)
+
+       <-st.handled
+
+       r, n, _, err = kc.Get(foohash)
+       _, err = ioutil.ReadAll(r)
+       c.Check(n, Equals, int64(3))
+       c.Check(err, Equals, BadChecksum)
+
+       <-st.handled
+}
+
+func (s *StandaloneSuite) TestGetWithFailures(c *C) {
+       content := []byte("waz")
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       fh := FailHandler{
+               make(chan string, 4)}
+
+       st := StubGetHandler{
+               c,
+               hash,
+               "abc123",
+               content}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+       arv.ApiToken = "abc123"
+       service_roots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+       ks2 := RunSomeFakeKeepServers(fh, 4)
+
+       for i, k := range ks1 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       for i, k := range ks2 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i+len(ks1))] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(service_roots)
+
+       // This test works only if one of the failing services is
+       // attempted before the succeeding service. Otherwise,
+       // <-fh.handled below will just hang! (Probe order depends on
+       // the choice of block content "waz" and the UUIDs of the fake
+       // servers, so we just tried different strings until we found
+       // an example that passes this Assert.)
+       c.Assert(NewRootSorter(service_roots, hash).GetSortedRoots()[0], Not(Equals), ks1[0].url)
+
+       r, n, url2, err := kc.Get(hash)
+
+       <-fh.handled
+       c.Check(err, Equals, nil)
+       c.Check(n, Equals, int64(3))
+       c.Check(url2, Equals, fmt.Sprintf("%s/%s", ks1[0].url, hash))
+
+       read_content, err2 := ioutil.ReadAll(r)
+       c.Check(err2, Equals, nil)
+       c.Check(read_content, DeepEquals, content)
+}
+
+func (s *ServerRequiredSuite) TestPutGetHead(c *C) {
+       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+       content := []byte("TestPutGetHead")
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, err := MakeKeepClient(&arv)
+       c.Assert(err, Equals, nil)
+
+       hash := fmt.Sprintf("%x", md5.Sum(content))
+
+       {
+               n, _, err := kc.Ask(hash)
+               c.Check(err, Equals, BlockNotFound)
+               c.Check(n, Equals, int64(0))
+       }
+       {
+               hash2, replicas, err := kc.PutB(content)
+               c.Check(hash2, Equals, fmt.Sprintf("%s+%d", hash, len(content)))
+               c.Check(replicas, Equals, 2)
+               c.Check(err, Equals, nil)
+       }
+       {
+               r, n, url2, err := kc.Get(hash)
+               c.Check(err, Equals, nil)
+               c.Check(n, Equals, int64(len(content)))
+               c.Check(url2, Equals, fmt.Sprintf("http://localhost:25108/%s", hash))
+
+               read_content, err2 := ioutil.ReadAll(r)
+               c.Check(err2, Equals, nil)
+               c.Check(read_content, DeepEquals, content)
+       }
+       {
+               n, url2, err := kc.Ask(hash)
+               c.Check(err, Equals, nil)
+               c.Check(n, Equals, int64(len(content)))
+               c.Check(url2, Equals, fmt.Sprintf("http://localhost:25108/%s", hash))
+       }
+}
+
+type StubProxyHandler struct {
+       handled chan string
+}
+
+func (this StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       resp.Header().Set("X-Keep-Replicas-Stored", "2")
+       this.handled <- fmt.Sprintf("http://%s", req.Host)
+}
+
+func (s *StandaloneSuite) TestPutProxy(c *C) {
+       log.Printf("TestPutProxy")
+
+       st := StubProxyHandler{make(chan string, 1)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+
+       kc.Want_replicas = 2
+       kc.Using_proxy = true
+       arv.ApiToken = "abc123"
+       service_roots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+
+       for i, k := range ks1 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+
+       kc.SetServiceRoots(service_roots)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+       <-st.handled
+
+       c.Check(err, Equals, nil)
+       c.Check(replicas, Equals, 2)
+
+       log.Printf("TestPutProxy done")
+}
+
+func (s *StandaloneSuite) TestPutProxyInsufficientReplicas(c *C) {
+       log.Printf("TestPutProxy")
+
+       st := StubProxyHandler{make(chan string, 1)}
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       kc, _ := MakeKeepClient(&arv)
+
+       kc.Want_replicas = 3
+       kc.Using_proxy = true
+       arv.ApiToken = "abc123"
+       service_roots := make(map[string]string)
+
+       ks1 := RunSomeFakeKeepServers(st, 1)
+
+       for i, k := range ks1 {
+               service_roots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+               defer k.listener.Close()
+       }
+       kc.SetServiceRoots(service_roots)
+
+       _, replicas, err := kc.PutB([]byte("foo"))
+       <-st.handled
+
+       c.Check(err, Equals, InsufficientReplicasError)
+       c.Check(replicas, Equals, 2)
+
+       log.Printf("TestPutProxy done")
+}
+
+func (s *StandaloneSuite) TestMakeLocator(c *C) {
+       l := MakeLocator("91f372a266fe2bf2823cb8ec7fda31ce+3+Aabcde@12345678")
+
+       c.Check(l.Hash, Equals, "91f372a266fe2bf2823cb8ec7fda31ce")
+       c.Check(l.Size, Equals, 3)
+       c.Check(l.Signature, Equals, "abcde")
+       c.Check(l.Timestamp, Equals, "12345678")
+}
diff --git a/sdk/go/keepclient/root_sorter.go b/sdk/go/keepclient/root_sorter.go
new file mode 100644 (file)
index 0000000..c2780bc
--- /dev/null
@@ -0,0 +1,57 @@
+package keepclient
+
+import (
+       "sort"
+)
+
+type RootSorter struct {
+       root         []string
+       weight       []string
+       order        []int
+}
+
+func NewRootSorter(serviceRoots map[string]string, hash string) (*RootSorter) {
+       rs := new(RootSorter)
+       rs.root = make([]string, len(serviceRoots))
+       rs.weight = make([]string, len(serviceRoots))
+       rs.order = make([]int, len(serviceRoots))
+       i := 0
+       for uuid, root := range serviceRoots {
+               rs.root[i] = root
+               rs.weight[i] = rs.getWeight(hash, uuid)
+               rs.order[i] = i
+               i++
+       }
+       sort.Sort(rs)
+       return rs
+}
+
+func (rs RootSorter) getWeight(hash string, uuid string) (string) {
+       if len(uuid) == 27 {
+               return Md5String(hash + uuid[12:])
+       } else {
+               // Only useful for testing, a set of one service root, etc.
+               return Md5String(hash + uuid)
+       }
+}
+
+func (rs RootSorter) GetSortedRoots() ([]string) {
+       sorted := make([]string, len(rs.order))
+       for i := range rs.order {
+               sorted[i] = rs.root[rs.order[i]]
+       }
+       return sorted
+}
+
+// Less is really More here: the heaviest root will be at the front of the list.
+func (rs RootSorter) Less(i, j int) bool {
+       return rs.weight[rs.order[j]] < rs.weight[rs.order[i]]
+}
+
+func (rs RootSorter) Len() int {
+       return len(rs.order)
+}
+
+func (rs RootSorter) Swap(i, j int) {
+       sort.IntSlice(rs.order).Swap(i, j)
+}
diff --git a/sdk/go/keepclient/root_sorter_test.go b/sdk/go/keepclient/root_sorter_test.go
new file mode 100644 (file)
index 0000000..455715d
--- /dev/null
@@ -0,0 +1,58 @@
+package keepclient
+
+import (
+       "fmt"
+       . "gopkg.in/check.v1"
+       "strconv"
+       "strings"
+)
+
+type RootSorterSuite struct{}
+var _ = Suite(&RootSorterSuite{})
+
+func FakeSvcRoot(i uint64) (string) {
+       return fmt.Sprintf("https://%x.svc/", i)
+}
+
+func FakeSvcUuid(i uint64) (string) {
+       return fmt.Sprintf("zzzzz-bi6l4-%015x", i)
+}
+
+func FakeServiceRoots(n uint64) (map[string]string) {
+       sr := map[string]string{}
+       for i := uint64(0); i < n; i ++ {
+               sr[FakeSvcUuid(i)] = FakeSvcRoot(i)
+       }
+       return sr
+}
+
+func (*RootSorterSuite) EmptyRoots(c *C) {
+       rs := NewRootSorter(map[string]string{}, Md5String("foo"))
+       c.Check(rs.GetSortedRoots(), Equals, []string{})
+}
+
+func (*RootSorterSuite) JustOneRoot(c *C) {
+       rs := NewRootSorter(FakeServiceRoots(1), Md5String("foo"))
+       c.Check(rs.GetSortedRoots(), Equals, []string{FakeSvcRoot(0)})
+}
+
+func (*RootSorterSuite) ReferenceSet(c *C) {
+       fakeroots := FakeServiceRoots(16)
+       // These reference probe orders are explained further in
+       // ../../python/tests/test_keep_client.py:
+       expected_orders := []string{
+               "3eab2d5fc9681074",
+               "097dba52e648f1c3",
+               "c5b4e023f8a7d691",
+               "9d81c02e76a3bf54",
+       }
+       for h, expected_order := range expected_orders {
+               hash := Md5String(fmt.Sprintf("%064x", h))
+               roots := NewRootSorter(fakeroots, hash).GetSortedRoots()
+               for i, svc_id_s := range strings.Split(expected_order, "") {
+                       svc_id, err := strconv.ParseUint(svc_id_s, 16, 64)
+                       c.Assert(err, Equals, nil)
+                       c.Check(roots[i], Equals, FakeSvcRoot(svc_id))
+               }
+       }
+}
diff --git a/sdk/go/keepclient/support.go b/sdk/go/keepclient/support.go
new file mode 100644 (file)
index 0000000..c24849e
--- /dev/null
@@ -0,0 +1,262 @@
+/* Internal methods to support keepclient.go */
+package keepclient
+
+import (
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/streamer"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       "os"
+       "strings"
+       "time"
+)
+
+type keepDisk struct {
+       Uuid     string `json:"uuid"`
+       Hostname string `json:"service_host"`
+       Port     int    `json:"service_port"`
+       SSL      bool   `json:"service_ssl_flag"`
+       SvcType  string `json:"service_type"`
+}
+
+func Md5String(s string) string {
+       return fmt.Sprintf("%x", md5.Sum([]byte(s)))
+}
+
+// Set timeouts apply when connecting to keepproxy services (assumed to be over
+// the Internet).
+func (this *KeepClient) setClientSettingsProxy() {
+       if this.Client.Timeout == 0 {
+               // Maximum time to wait for a complete response
+               this.Client.Timeout = 300 * time.Second
+
+               // TCP and TLS connection settings
+               this.Client.Transport = &http.Transport{
+                       Dial: (&net.Dialer{
+                               // The maximum time to wait to set up
+                               // the initial TCP connection.
+                               Timeout: 30 * time.Second,
+
+                               // The TCP keep alive heartbeat
+                               // interval.
+                               KeepAlive: 120 * time.Second,
+                       }).Dial,
+
+                       TLSHandshakeTimeout: 10 * time.Second,
+               }
+       }
+
+}
+
+// Set timeouts apply when connecting to keepstore services directly (assumed
+// to be on the local network).
+func (this *KeepClient) setClientSettingsStore() {
+       if this.Client.Timeout == 0 {
+               // Maximum time to wait for a complete response
+               this.Client.Timeout = 20 * time.Second
+
+               // TCP and TLS connection timeouts
+               this.Client.Transport = &http.Transport{
+                       Dial: (&net.Dialer{
+                               // The maximum time to wait to set up
+                               // the initial TCP connection.
+                               Timeout: 2 * time.Second,
+
+                               // The TCP keep alive heartbeat
+                               // interval.
+                               KeepAlive: 180 * time.Second,
+                       }).Dial,
+
+                       TLSHandshakeTimeout: 4 * time.Second,
+               }
+       }
+}
+
+func (this *KeepClient) DiscoverKeepServers() error {
+       if prx := os.Getenv("ARVADOS_KEEP_PROXY"); prx != "" {
+               sr := map[string]string{"proxy": prx}
+               this.SetServiceRoots(sr)
+               this.Using_proxy = true
+               this.setClientSettingsProxy()
+               return nil
+       }
+
+       type svcList struct {
+               Items []keepDisk `json:"items"`
+       }
+       var m svcList
+
+       err := this.Arvados.Call("GET", "keep_services", "", "accessible", nil, &m)
+
+       if err != nil {
+               if err := this.Arvados.List("keep_disks", nil, &m); err != nil {
+                       return err
+               }
+       }
+
+       listed := make(map[string]bool)
+       service_roots := make(map[string]string)
+
+       for _, element := range m.Items {
+               n := ""
+
+               if element.SSL {
+                       n = "s"
+               }
+
+               // Construct server URL
+               url := fmt.Sprintf("http%s://%s:%d", n, element.Hostname, element.Port)
+
+               // Skip duplicates
+               if !listed[url] {
+                       listed[url] = true
+                       service_roots[element.Uuid] = url
+               }
+               if element.SvcType == "proxy" {
+                       this.Using_proxy = true
+               }
+       }
+
+       if this.Using_proxy {
+               this.setClientSettingsProxy()
+       } else {
+               this.setClientSettingsStore()
+       }
+
+       this.SetServiceRoots(service_roots)
+
+       return nil
+}
+
+type uploadStatus struct {
+       err             error
+       url             string
+       statusCode      int
+       replicas_stored int
+       response        string
+}
+
+func (this KeepClient) uploadToKeepServer(host string, hash string, body io.ReadCloser,
+       upload_status chan<- uploadStatus, expectedLength int64, requestId string) {
+
+       var req *http.Request
+       var err error
+       var url = fmt.Sprintf("%s/%s", host, hash)
+       if req, err = http.NewRequest("PUT", url, nil); err != nil {
+               log.Printf("[%v] Error creating request PUT %v error: %v", requestId, url, err.Error())
+               upload_status <- uploadStatus{err, url, 0, 0, ""}
+               body.Close()
+               return
+       }
+
+       req.ContentLength = expectedLength
+       if expectedLength > 0 {
+               // http.Client.Do will close the body ReadCloser when it is
+               // done with it.
+               req.Body = body
+       } else {
+               // "For client requests, a value of 0 means unknown if Body is
+               // not nil."  In this case we do want the body to be empty, so
+               // don't set req.Body.  However, we still need to close the
+               // body ReadCloser.
+               body.Close()
+       }
+
+       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", this.Arvados.ApiToken))
+       req.Header.Add("Content-Type", "application/octet-stream")
+
+       if this.Using_proxy {
+               req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
+       }
+
+       var resp *http.Response
+       if resp, err = this.Client.Do(req); err != nil {
+               log.Printf("[%v] Upload failed %v error: %v", requestId, url, err.Error())
+               upload_status <- uploadStatus{err, url, 0, 0, ""}
+               return
+       }
+
+       rep := 1
+       if xr := resp.Header.Get(X_Keep_Replicas_Stored); xr != "" {
+               fmt.Sscanf(xr, "%d", &rep)
+       }
+
+       defer resp.Body.Close()
+       defer io.Copy(ioutil.Discard, resp.Body)
+
+       respbody, err2 := ioutil.ReadAll(&io.LimitedReader{resp.Body, 4096})
+       response := strings.TrimSpace(string(respbody))
+       if err2 != nil && err2 != io.EOF {
+               log.Printf("[%v] Upload %v error: %v response: %v", requestId, url, err2.Error(), response)
+               upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
+       } else if resp.StatusCode == http.StatusOK {
+               log.Printf("[%v] Upload %v success", requestId, url)
+               upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
+       } else {
+               log.Printf("[%v] Upload %v error: %v response: %v", requestId, url, resp.StatusCode, response)
+               upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
+       }
+}
+
+func (this KeepClient) putReplicas(
+       hash string,
+       tr *streamer.AsyncStream,
+       expectedLength int64) (locator string, replicas int, err error) {
+
+       // Take the hash of locator and timestamp in order to identify this
+       // specific transaction in log statements.
+       requestId := fmt.Sprintf("%x", md5.Sum([]byte(locator+time.Now().String())))[0:8]
+
+       // Calculate the ordering for uploading to servers
+       sv := NewRootSorter(this.ServiceRoots(), hash).GetSortedRoots()
+
+       // The next server to try contacting
+       next_server := 0
+
+       // The number of active writers
+       active := 0
+
+       // Used to communicate status from the upload goroutines
+       upload_status := make(chan uploadStatus)
+       defer close(upload_status)
+
+       // Desired number of replicas
+       remaining_replicas := this.Want_replicas
+
+       for remaining_replicas > 0 {
+               for active < remaining_replicas {
+                       // Start some upload requests
+                       if next_server < len(sv) {
+                               log.Printf("[%v] Begin upload %s to %s", requestId, hash, sv[next_server])
+                               go this.uploadToKeepServer(sv[next_server], hash, tr.MakeStreamReader(), upload_status, expectedLength, requestId)
+                               next_server += 1
+                               active += 1
+                       } else {
+                               if active == 0 {
+                                       return locator, (this.Want_replicas - remaining_replicas), InsufficientReplicasError
+                               } else {
+                                       break
+                               }
+                       }
+               }
+               log.Printf("[%v] Replicas remaining to write: %v active uploads: %v",
+                       requestId, remaining_replicas, active)
+
+               // Now wait for something to happen.
+               status := <-upload_status
+               active -= 1
+
+               if status.statusCode == 200 {
+                       // good news!
+                       remaining_replicas -= status.replicas_stored
+                       locator = status.response
+               }
+       }
+
+       return locator, this.Want_replicas, nil
+}
diff --git a/sdk/go/streamer/streamer.go b/sdk/go/streamer/streamer.go
new file mode 100644 (file)
index 0000000..2217dd3
--- /dev/null
@@ -0,0 +1,130 @@
+/* AsyncStream pulls data in from a io.Reader source (such as a file or network
+socket) and fans out to any number of StreamReader sinks.
+
+Unlike io.TeeReader() or io.MultiWriter(), new StreamReaders can be created at
+any point in the lifetime of the AsyncStream, and each StreamReader will read
+the contents of the buffer up to the "frontier" of the buffer, at which point
+the StreamReader blocks until new data is read from the source.
+
+This is useful for minimizing readthrough latency as sinks can read and act on
+data from the source without waiting for the source to be completely buffered.
+It is also useful as a cache in situations where re-reading the original source
+potentially is costly, since the buffer retains a copy of the source data.
+
+Usage:
+
+Begin reading into a buffer with maximum size 'buffersize' from 'source':
+  stream := AsyncStreamFromReader(buffersize, source)
+
+To create a new reader (this can be called multiple times, each reader starts
+at the beginning of the buffer):
+  reader := tr.MakeStreamReader()
+
+Make sure to close the reader when you're done with it.
+  reader.Close()
+
+When you're done with the stream:
+  stream.Close()
+
+Alternately, if you already have a filled buffer and just want to read out from it:
+  stream := AsyncStreamFromSlice(buf)
+
+  r := tr.MakeStreamReader()
+
+*/
+
+package streamer
+
+import (
+       "io"
+)
+
+type AsyncStream struct {
+       buffer            []byte
+       requests          chan sliceRequest
+       add_reader        chan bool
+       subtract_reader   chan bool
+       wait_zero_readers chan bool
+}
+
+// Reads from the buffer managed by the Transfer()
+type StreamReader struct {
+       offset    int
+       stream    *AsyncStream
+       responses chan sliceResult
+}
+
+func AsyncStreamFromReader(buffersize int, source io.Reader) *AsyncStream {
+       t := &AsyncStream{make([]byte, buffersize), make(chan sliceRequest), make(chan bool), make(chan bool), make(chan bool)}
+
+       go t.transfer(source)
+       go t.readersMonitor()
+
+       return t
+}
+
+func AsyncStreamFromSlice(buf []byte) *AsyncStream {
+       t := &AsyncStream{buf, make(chan sliceRequest), make(chan bool), make(chan bool), make(chan bool)}
+
+       go t.transfer(nil)
+       go t.readersMonitor()
+
+       return t
+}
+
+func (this *AsyncStream) MakeStreamReader() *StreamReader {
+       this.add_reader <- true
+       return &StreamReader{0, this, make(chan sliceResult)}
+}
+
+// Reads from the buffer managed by the Transfer()
+func (this *StreamReader) Read(p []byte) (n int, err error) {
+       this.stream.requests <- sliceRequest{this.offset, len(p), this.responses}
+       rr, valid := <-this.responses
+       if valid {
+               this.offset += len(rr.slice)
+               return copy(p, rr.slice), rr.err
+       } else {
+               return 0, io.ErrUnexpectedEOF
+       }
+}
+
+func (this *StreamReader) WriteTo(dest io.Writer) (written int64, err error) {
+       // Record starting offset in order to correctly report the number of bytes sent
+       starting_offset := this.offset
+       for {
+               this.stream.requests <- sliceRequest{this.offset, 32 * 1024, this.responses}
+               rr, valid := <-this.responses
+               if valid {
+                       this.offset += len(rr.slice)
+                       if rr.err != nil {
+                               if rr.err == io.EOF {
+                                       // EOF is not an error.
+                                       return int64(this.offset - starting_offset), nil
+                               } else {
+                                       return int64(this.offset - starting_offset), rr.err
+                               }
+                       } else {
+                               dest.Write(rr.slice)
+                       }
+               } else {
+                       return int64(this.offset), io.ErrUnexpectedEOF
+               }
+       }
+}
+
+// Close the responses channel
+func (this *StreamReader) Close() error {
+       this.stream.subtract_reader <- true
+       close(this.responses)
+       this.stream = nil
+       return nil
+}
+
+func (this *AsyncStream) Close() {
+       this.wait_zero_readers <- true
+       close(this.requests)
+       close(this.add_reader)
+       close(this.subtract_reader)
+       close(this.wait_zero_readers)
+}
diff --git a/sdk/go/streamer/streamer_test.go b/sdk/go/streamer/streamer_test.go
new file mode 100644 (file)
index 0000000..853d7d3
--- /dev/null
@@ -0,0 +1,366 @@
+package streamer
+
+import (
+       . "gopkg.in/check.v1"
+       "io"
+       "testing"
+       "time"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) { TestingT(t) }
+
+var _ = Suite(&StandaloneSuite{})
+
+// Standalone tests
+type StandaloneSuite struct{}
+
+func (s *StandaloneSuite) TestReadIntoBuffer(c *C) {
+       ReadIntoBufferHelper(c, 225)
+       ReadIntoBufferHelper(c, 224)
+}
+
+func HelperWrite128andCheck(c *C, buffer []byte, writer io.Writer, slices chan nextSlice) {
+       out := make([]byte, 128)
+       for i := 0; i < 128; i += 1 {
+               out[i] = byte(i)
+       }
+       writer.Write(out)
+       s1 := <-slices
+       c.Check(len(s1.slice), Equals, 128)
+       c.Check(s1.reader_error, Equals, nil)
+       for i := 0; i < 128; i += 1 {
+               c.Check(s1.slice[i], Equals, byte(i))
+       }
+       for i := 0; i < len(buffer); i += 1 {
+               if i < 128 {
+                       c.Check(buffer[i], Equals, byte(i))
+               } else {
+                       c.Check(buffer[i], Equals, byte(0))
+               }
+       }
+}
+
+func HelperWrite96andCheck(c *C, buffer []byte, writer io.Writer, slices chan nextSlice) {
+       out := make([]byte, 96)
+       for i := 0; i < 96; i += 1 {
+               out[i] = byte(i / 2)
+       }
+       writer.Write(out)
+       s1 := <-slices
+       c.Check(len(s1.slice), Equals, 96)
+       c.Check(s1.reader_error, Equals, nil)
+       for i := 0; i < 96; i += 1 {
+               c.Check(s1.slice[i], Equals, byte(i/2))
+       }
+       for i := 0; i < len(buffer); i += 1 {
+               if i < 128 {
+                       c.Check(buffer[i], Equals, byte(i))
+               } else if i < (128 + 96) {
+                       c.Check(buffer[i], Equals, byte((i-128)/2))
+               } else {
+                       c.Check(buffer[i], Equals, byte(0))
+               }
+       }
+}
+
+func ReadIntoBufferHelper(c *C, bufsize int) {
+       buffer := make([]byte, bufsize)
+
+       reader, writer := io.Pipe()
+       slices := make(chan nextSlice)
+
+       go readIntoBuffer(buffer, reader, slices)
+
+       HelperWrite128andCheck(c, buffer, writer, slices)
+       HelperWrite96andCheck(c, buffer, writer, slices)
+
+       writer.Close()
+       s1 := <-slices
+       c.Check(len(s1.slice), Equals, 0)
+       c.Check(s1.reader_error, Equals, io.EOF)
+}
+
+func (s *StandaloneSuite) TestReadIntoShortBuffer(c *C) {
+       buffer := make([]byte, 223)
+       reader, writer := io.Pipe()
+       slices := make(chan nextSlice)
+
+       go readIntoBuffer(buffer, reader, slices)
+
+       HelperWrite128andCheck(c, buffer, writer, slices)
+
+       out := make([]byte, 96)
+       for i := 0; i < 96; i += 1 {
+               out[i] = byte(i / 2)
+       }
+
+       // Write will deadlock because it can't write all the data, so
+       // spin it off to a goroutine
+       go writer.Write(out)
+       s1 := <-slices
+
+       c.Check(len(s1.slice), Equals, 95)
+       c.Check(s1.reader_error, Equals, nil)
+       for i := 0; i < 95; i += 1 {
+               c.Check(s1.slice[i], Equals, byte(i/2))
+       }
+       for i := 0; i < len(buffer); i += 1 {
+               if i < 128 {
+                       c.Check(buffer[i], Equals, byte(i))
+               } else if i < (128 + 95) {
+                       c.Check(buffer[i], Equals, byte((i-128)/2))
+               } else {
+                       c.Check(buffer[i], Equals, byte(0))
+               }
+       }
+
+       writer.Close()
+       s1 = <-slices
+       c.Check(len(s1.slice), Equals, 0)
+       c.Check(s1.reader_error, Equals, io.ErrShortBuffer)
+}
+
+func (s *StandaloneSuite) TestTransfer(c *C) {
+       reader, writer := io.Pipe()
+
+       tr := AsyncStreamFromReader(512, reader)
+
+       br1 := tr.MakeStreamReader()
+       out := make([]byte, 128)
+
+       {
+               // Write some data, and read into a buffer shorter than
+               // available data
+               for i := 0; i < 128; i += 1 {
+                       out[i] = byte(i)
+               }
+
+               writer.Write(out[:100])
+
+               in := make([]byte, 64)
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 64)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 64; i += 1 {
+                       c.Check(in[i], Equals, out[i])
+               }
+       }
+
+       {
+               // Write some more data, and read into buffer longer than
+               // available data
+               in := make([]byte, 64)
+               n, err := br1.Read(in)
+               c.Check(n, Equals, 36)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 36; i += 1 {
+                       c.Check(in[i], Equals, out[64+i])
+               }
+
+       }
+
+       {
+               // Test read before write
+               type Rd struct {
+                       n   int
+                       err error
+               }
+               rd := make(chan Rd)
+               in := make([]byte, 64)
+
+               go func() {
+                       n, err := br1.Read(in)
+                       rd <- Rd{n, err}
+               }()
+
+               time.Sleep(100 * time.Millisecond)
+               writer.Write(out[100:])
+
+               got := <-rd
+
+               c.Check(got.n, Equals, 28)
+               c.Check(got.err, Equals, nil)
+
+               for i := 0; i < 28; i += 1 {
+                       c.Check(in[i], Equals, out[100+i])
+               }
+       }
+
+       br2 := tr.MakeStreamReader()
+       {
+               // Test 'catch up' reader
+               in := make([]byte, 256)
+               n, err := br2.Read(in)
+
+               c.Check(n, Equals, 128)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 128; i += 1 {
+                       c.Check(in[i], Equals, out[i])
+               }
+       }
+
+       {
+               // Test closing the reader
+               writer.Close()
+
+               in := make([]byte, 256)
+               n1, err1 := br1.Read(in)
+               n2, err2 := br2.Read(in)
+               c.Check(n1, Equals, 0)
+               c.Check(err1, Equals, io.EOF)
+               c.Check(n2, Equals, 0)
+               c.Check(err2, Equals, io.EOF)
+       }
+
+       {
+               // Test 'catch up' reader after closing
+               br3 := tr.MakeStreamReader()
+               in := make([]byte, 256)
+               n, err := br3.Read(in)
+
+               c.Check(n, Equals, 128)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 128; i += 1 {
+                       c.Check(in[i], Equals, out[i])
+               }
+
+               n, err = br3.Read(in)
+
+               c.Check(n, Equals, 0)
+               c.Check(err, Equals, io.EOF)
+       }
+}
+
+func (s *StandaloneSuite) TestTransferShortBuffer(c *C) {
+       reader, writer := io.Pipe()
+
+       tr := AsyncStreamFromReader(100, reader)
+       defer tr.Close()
+
+       sr := tr.MakeStreamReader()
+       defer sr.Close()
+
+       out := make([]byte, 101)
+       go writer.Write(out)
+
+       n, err := sr.Read(out)
+       c.Check(n, Equals, 100)
+
+       n, err = sr.Read(out)
+       c.Check(n, Equals, 0)
+       c.Check(err, Equals, io.ErrShortBuffer)
+}
+
+func (s *StandaloneSuite) TestTransferFromBuffer(c *C) {
+       // Buffer for reads from 'r'
+       buffer := make([]byte, 100)
+       for i := 0; i < 100; i += 1 {
+               buffer[i] = byte(i)
+       }
+
+       tr := AsyncStreamFromSlice(buffer)
+
+       br1 := tr.MakeStreamReader()
+
+       in := make([]byte, 64)
+       {
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 64)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 64; i += 1 {
+                       c.Check(in[i], Equals, buffer[i])
+               }
+       }
+       {
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 36)
+               c.Check(err, Equals, nil)
+
+               for i := 0; i < 36; i += 1 {
+                       c.Check(in[i], Equals, buffer[64+i])
+               }
+       }
+       {
+               n, err := br1.Read(in)
+
+               c.Check(n, Equals, 0)
+               c.Check(err, Equals, io.EOF)
+       }
+}
+
+func (s *StandaloneSuite) TestTransferIoCopy(c *C) {
+       // Buffer for reads from 'r'
+       buffer := make([]byte, 100)
+       for i := 0; i < 100; i += 1 {
+               buffer[i] = byte(i)
+       }
+
+       tr := AsyncStreamFromSlice(buffer)
+       defer tr.Close()
+
+       br1 := tr.MakeStreamReader()
+       defer br1.Close()
+
+       reader, writer := io.Pipe()
+
+       go func() {
+               p := make([]byte, 100)
+               n, err := reader.Read(p)
+               c.Check(n, Equals, 100)
+               c.Check(err, Equals, nil)
+               c.Check(p, DeepEquals, buffer)
+       }()
+
+       io.Copy(writer, br1)
+}
+
+func (s *StandaloneSuite) TestManyReaders(c *C) {
+       reader, writer := io.Pipe()
+
+       tr := AsyncStreamFromReader(512, reader)
+       defer tr.Close()
+
+       sr := tr.MakeStreamReader()
+       go func() {
+               time.Sleep(100 * time.Millisecond)
+               sr.Close()
+       }()
+
+       for i := 0; i < 200; i += 1 {
+               go func() {
+                       br1 := tr.MakeStreamReader()
+                       defer br1.Close()
+
+                       p := make([]byte, 3)
+                       n, err := br1.Read(p)
+                       c.Check(n, Equals, 3)
+                       c.Check(p[0:3], DeepEquals, []byte("foo"))
+
+                       n, err = br1.Read(p)
+                       c.Check(n, Equals, 3)
+                       c.Check(p[0:3], DeepEquals, []byte("bar"))
+
+                       n, err = br1.Read(p)
+                       c.Check(n, Equals, 3)
+                       c.Check(p[0:3], DeepEquals, []byte("baz"))
+
+                       n, err = br1.Read(p)
+                       c.Check(n, Equals, 0)
+                       c.Check(err, Equals, io.EOF)
+               }()
+       }
+
+       writer.Write([]byte("foo"))
+       writer.Write([]byte("bar"))
+       writer.Write([]byte("baz"))
+       writer.Close()
+}
diff --git a/sdk/go/streamer/transfer.go b/sdk/go/streamer/transfer.go
new file mode 100644 (file)
index 0000000..a4a194f
--- /dev/null
@@ -0,0 +1,308 @@
+/* Internal implementation of AsyncStream.
+Outline of operation:
+
+The kernel is the transfer() goroutine.  It manages concurrent reads and
+appends to the "body" slice.  "body" is a slice of "source_buffer" that
+represents the segment of the buffer that is already filled in and available
+for reading.
+
+To fill in the buffer, transfer() starts the readIntoBuffer() goroutine to read
+from the io.Reader source directly into source_buffer.  Each read goes into a
+slice of buffer which spans the section immediately following the end of the
+current "body".  Each time a Read completes, a slice representing the the
+section just filled in (or any read errors/EOF) is sent over the "slices"
+channel back to the transfer() function.
+
+Meanwhile, the transfer() function selects() on two channels, the "requests"
+channel and the "slices" channel.
+
+When a message is recieved on the "slices" channel, this means the a new
+section of the buffer has data, or an error is signaled.  Since the data has
+been read directly into the source_buffer, it is able to simply increases the
+size of the body slice to encompass the newly filled in section.  Then any
+pending reads are serviced with handleReadRequest (described below).
+
+When a message is recieved on the "requests" channel, it means a StreamReader
+wants access to a slice of the buffer.  This is passed to handleReadRequest().
+
+The handleReadRequest() function takes a sliceRequest consisting of a buffer
+offset, maximum size, and channel to send the response.  If there was an error
+reported from the source reader, it is returned.  If the offset is less than
+the size of the body, the request can proceed, and it sends a body slice
+spanning the segment from offset to min(offset+maxsize, end of the body).  If
+source reader status is EOF (done filling the buffer) and the read request
+offset is beyond end of the body, it responds with EOF.  Otherwise, the read
+request is for a slice beyond the current size of "body" but we expect the body
+to expand as more data is added, so the request gets added to a wait list.
+
+The transfer() runs until the requests channel is closed by AsyncStream.Close()
+
+To track readers, streamer uses the readersMonitor() goroutine.  This goroutine
+chooses which channels to receive from based on the number of outstanding
+readers.  When a new reader is created, it sends a message on the add_reader
+channel.  If the number of readers is already at MAX_READERS, this blocks the
+sender until an existing reader is closed.  When a reader is closed, it sends a
+message on the subtract_reader channel.  Finally, when AsyncStream.Close() is
+called, it sends a message on the wait_zero_readers channel, which will block
+the sender unless there are zero readers and it is safe to shut down the
+AsyncStream.
+*/
+
+package streamer
+
+import (
+       "io"
+)
+
+const MAX_READERS = 100
+
+// A slice passed from readIntoBuffer() to transfer()
+type nextSlice struct {
+       slice        []byte
+       reader_error error
+}
+
+// A read request to the Transfer() function
+type sliceRequest struct {
+       offset  int
+       maxsize int
+       result  chan<- sliceResult
+}
+
+// A read result from the Transfer() function
+type sliceResult struct {
+       slice []byte
+       err   error
+}
+
+// Supports writing into a buffer
+type bufferWriter struct {
+       buf []byte
+       ptr int
+}
+
+// Copy p into this.buf, increment pointer and return number of bytes read.
+func (this *bufferWriter) Write(p []byte) (n int, err error) {
+       n = copy(this.buf[this.ptr:], p)
+       this.ptr += n
+       return n, nil
+}
+
+// Read repeatedly from the reader and write sequentially into the specified
+// buffer, and report each read to channel 'c'.  Completes when Reader 'r'
+// reports on the error channel and closes channel 'c'.
+func readIntoBuffer(buffer []byte, r io.Reader, slices chan<- nextSlice) {
+       defer close(slices)
+
+       if writeto, ok := r.(io.WriterTo); ok {
+               n, err := writeto.WriteTo(&bufferWriter{buffer, 0})
+               if err != nil {
+                       slices <- nextSlice{nil, err}
+               } else {
+                       slices <- nextSlice{buffer[:n], nil}
+                       slices <- nextSlice{nil, io.EOF}
+               }
+               return
+       } else {
+               // Initially entire buffer is available
+               ptr := buffer[:]
+               for {
+                       var n int
+                       var err error
+                       if len(ptr) > 0 {
+                               const readblock = 64 * 1024
+                               // Read 64KiB into the next part of the buffer
+                               if len(ptr) > readblock {
+                                       n, err = r.Read(ptr[:readblock])
+                               } else {
+                                       n, err = r.Read(ptr)
+                               }
+                       } else {
+                               // Ran out of buffer space, try reading one more byte
+                               var b [1]byte
+                               n, err = r.Read(b[:])
+
+                               if n > 0 {
+                                       // Reader has more data but we have nowhere to
+                                       // put it, so we're stuffed
+                                       slices <- nextSlice{nil, io.ErrShortBuffer}
+                               } else {
+                                       // Return some other error (hopefully EOF)
+                                       slices <- nextSlice{nil, err}
+                               }
+                               return
+                       }
+
+                       // End on error (includes EOF)
+                       if err != nil {
+                               slices <- nextSlice{nil, err}
+                               return
+                       }
+
+                       if n > 0 {
+                               // Make a slice with the contents of the read
+                               slices <- nextSlice{ptr[:n], nil}
+
+                               // Adjust the scratch space slice
+                               ptr = ptr[n:]
+                       }
+               }
+       }
+}
+
+// Handle a read request.  Returns true if a response was sent, and false if
+// the request should be queued.
+func handleReadRequest(req sliceRequest, body []byte, reader_status error) bool {
+       if (reader_status != nil) && (reader_status != io.EOF) {
+               req.result <- sliceResult{nil, reader_status}
+               return true
+       } else if req.offset < len(body) {
+               var end int
+               if req.offset+req.maxsize < len(body) {
+                       end = req.offset + req.maxsize
+               } else {
+                       end = len(body)
+               }
+               req.result <- sliceResult{body[req.offset:end], nil}
+               return true
+       } else if (reader_status == io.EOF) && (req.offset >= len(body)) {
+               req.result <- sliceResult{nil, io.EOF}
+               return true
+       } else {
+               return false
+       }
+}
+
+// Mediates between reads and appends.
+// If 'source_reader' is not nil, reads data from 'source_reader' and stores it
+// in the provided buffer.  Otherwise, use the contents of 'buffer' as is.
+// Accepts read requests on the buffer on the 'requests' channel.  Completes
+// when 'requests' channel is closed.
+func (this *AsyncStream) transfer(source_reader io.Reader) {
+       source_buffer := this.buffer
+       requests := this.requests
+
+       // currently buffered data
+       var body []byte
+
+       // for receiving slices from readIntoBuffer
+       var slices chan nextSlice = nil
+
+       // indicates the status of the underlying reader
+       var reader_status error = nil
+
+       if source_reader != nil {
+               // 'body' is the buffer slice representing the body content read so far
+               body = source_buffer[:0]
+
+               // used to communicate slices of the buffer as they are
+               // readIntoBuffer will close 'slices' when it is done with it
+               slices = make(chan nextSlice)
+
+               // Spin it off
+               go readIntoBuffer(source_buffer, source_reader, slices)
+       } else {
+               // use the whole buffer
+               body = source_buffer[:]
+
+               // buffer is complete
+               reader_status = io.EOF
+       }
+
+       pending_requests := make([]sliceRequest, 0)
+
+       for {
+               select {
+               case req, valid := <-requests:
+                       // Handle a buffer read request
+                       if valid {
+                               if !handleReadRequest(req, body, reader_status) {
+                                       pending_requests = append(pending_requests, req)
+                               }
+                       } else {
+                               // closed 'requests' channel indicates we're done
+                               return
+                       }
+
+               case bk, valid := <-slices:
+                       // Got a new slice from the reader
+                       if valid {
+                               reader_status = bk.reader_error
+
+                               if bk.slice != nil {
+                                       // adjust body bounds now that another slice has been read
+                                       body = source_buffer[0 : len(body)+len(bk.slice)]
+                               }
+
+                               // handle pending reads
+                               n := 0
+                               for n < len(pending_requests) {
+                                       if handleReadRequest(pending_requests[n], body, reader_status) {
+                                               // move the element from the back of the slice to
+                                               // position 'n', then shorten the slice by one element
+                                               pending_requests[n] = pending_requests[len(pending_requests)-1]
+                                               pending_requests = pending_requests[0 : len(pending_requests)-1]
+                                       } else {
+
+                                               // Request wasn't handled, so keep it in the request slice
+                                               n += 1
+                                       }
+                               }
+                       } else {
+                               if reader_status == io.EOF {
+                                       // no more reads expected, so this is ok
+                               } else {
+                                       // slices channel closed without signaling EOF
+                                       reader_status = io.ErrUnexpectedEOF
+                               }
+                               slices = nil
+                       }
+               }
+       }
+}
+
+func (this *AsyncStream) readersMonitor() {
+       var readers int = 0
+
+       for {
+               if readers == 0 {
+                       select {
+                       case _, ok := <-this.wait_zero_readers:
+                               if ok {
+                                       // nothing, just implicitly unblock the sender
+                               } else {
+                                       return
+                               }
+                       case _, ok := <-this.add_reader:
+                               if ok {
+                                       readers += 1
+                               } else {
+                                       return
+                               }
+                       }
+               } else if readers > 0 && readers < MAX_READERS {
+                       select {
+                       case _, ok := <-this.add_reader:
+                               if ok {
+                                       readers += 1
+                               } else {
+                                       return
+                               }
+
+                       case _, ok := <-this.subtract_reader:
+                               if ok {
+                                       readers -= 1
+                               } else {
+                                       return
+                               }
+                       }
+               } else if readers == MAX_READERS {
+                       _, ok := <-this.subtract_reader
+                       if ok {
+                               readers -= 1
+                       } else {
+                               return
+                       }
+               }
+       }
+}
diff --git a/sdk/java/.classpath b/sdk/java/.classpath
new file mode 100644 (file)
index 0000000..27d14a1
--- /dev/null
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+       <classpathentry including="**/*.java" kind="src" output="target/test-classes" path="src/test/java"/>
+       <classpathentry including="**/*.java" kind="src" path="src/main/java"/>
+       <classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/apis/google-api-services-discovery/v1-rev42-1.18.0-rc/google-api-services-discovery-v1-rev42-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/api-client/google-api-client/1.18.0-rc/google-api-client-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client/1.18.0-rc/google-http-client-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar"/>
+       <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpclient/4.0.1/httpclient-4.0.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/org/apache/httpcomponents/httpcore/4.0.1/httpcore-4.0.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/commons-logging/commons-logging/1.1.1/commons-logging-1.1.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/commons-codec/commons-codec/1.3/commons-codec-1.3.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/http-client/google-http-client-jackson2/1.18.0-rc/google-http-client-jackson2-1.18.0-rc.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/fasterxml/jackson/core/jackson-core/2.1.3/jackson-core-2.1.3.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/google/guava/guava/r05/guava-r05.jar"/>
+       <classpathentry kind="var" path="M2_REPO/log4j/log4j/1.2.16/log4j-1.2.16.jar"/>
+       <classpathentry kind="var" path="M2_REPO/com/googlecode/json-simple/json-simple/1.1.1/json-simple-1.1.1.jar"/>
+       <classpathentry kind="var" path="M2_REPO/junit/junit/4.8.1/junit-4.8.1.jar"/>
+       <classpathentry kind="output" path="target/classes"/>
+</classpath>
diff --git a/sdk/java/.project b/sdk/java/.project
new file mode 100644 (file)
index 0000000..40c2bdf
--- /dev/null
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+  <name>java</name>
+  <comment>NO_M2ECLIPSE_SUPPORT: Project files created with the maven-eclipse-plugin are not supported in M2Eclipse.</comment>
+  <projects/>
+  <buildSpec>
+    <buildCommand>
+      <name>org.eclipse.jdt.core.javabuilder</name>
+    </buildCommand>
+  </buildSpec>
+  <natures>
+    <nature>org.eclipse.jdt.core.javanature</nature>
+  </natures>
+</projectDescription>
\ No newline at end of file
diff --git a/sdk/java/.settings/org.eclipse.jdt.core.prefs b/sdk/java/.settings/org.eclipse.jdt.core.prefs
new file mode 100644 (file)
index 0000000..f4f19ea
--- /dev/null
@@ -0,0 +1,5 @@
+#Mon Apr 28 10:33:40 EDT 2014
+org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.6
+eclipse.preferences.version=1
+org.eclipse.jdt.core.compiler.source=1.6
+org.eclipse.jdt.core.compiler.compliance=1.6
diff --git a/sdk/java/ArvadosSDKJavaExample.java b/sdk/java/ArvadosSDKJavaExample.java
new file mode 100644 (file)
index 0000000..7c9c013
--- /dev/null
@@ -0,0 +1,80 @@
+/**
+ * This Sample test program is useful in getting started with working with Arvados Java SDK.
+ * @author radhika
+ *
+ */
+
+import org.arvados.sdk.java.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+public class ArvadosSDKJavaExample {
+  /** Make sure the following environment variables are set before using Arvados:
+   *      ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE 
+   *      Set ARVADOS_API_HOST_INSECURE to true if you are using self-singed
+   *      certificates in development and want to bypass certificate validations.
+   *
+   *  If you are not using env variables, you can pass them to Arvados constructor.
+   *
+   *  Please refer to http://doc.arvados.org/api/index.html for a complete list
+   *      of the available API methods.
+   */
+  public static void main(String[] args) throws Exception {
+    String apiName = "arvados";
+    String apiVersion = "v1";
+
+    Arvados arv = new Arvados(apiName, apiVersion);
+
+    // Make a users list call. Here list on users is the method being invoked.
+    // Expect a Map containing the list of users as the response.
+    System.out.println("Making an arvados users.list api call");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    System.out.println("Arvados users.list:\n");
+    printResponse(response);
+    
+    // get uuid of the first user from the response
+    List items = (List)response.get("items");
+
+    Map firstUser = (Map)items.get(0);
+    String userUuid = (String)firstUser.get("uuid");
+    
+    // Make a users get call on the uuid obtained above
+    System.out.println("\n\n\nMaking a users.get call for " + userUuid);
+    params = new HashMap<String, Object>();
+    params.put("uuid", userUuid);
+    response = arv.call("users", "get", params);
+    System.out.println("Arvados users.get:\n");
+    printResponse(response);
+
+    // Make a pipeline_templates list call
+    System.out.println("\n\n\nMaking a pipeline_templates.list call.");
+
+    params = new HashMap<String, Object>();
+    response = arv.call("pipeline_templates", "list", params);
+
+    System.out.println("Arvados pipelinetempates.list:\n");
+    printResponse(response);
+  }
+  
+  private static void printResponse(Map response){
+    Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+    for (Map.Entry<String, Object> entry : entrySet) {
+      if ("items".equals(entry.getKey())) {
+        List items = (List)entry.getValue();
+        for (Object item : items) {
+          System.out.println("    " + item);
+        }            
+      } else {
+        System.out.println(entry.getKey() + " = " + entry.getValue());
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/sdk/java/ArvadosSDKJavaExampleWithPrompt.java b/sdk/java/ArvadosSDKJavaExampleWithPrompt.java
new file mode 100644 (file)
index 0000000..93ba3aa
--- /dev/null
@@ -0,0 +1,123 @@
+/**
+ * This Sample test program is useful in getting started with using Arvados Java SDK.
+ * This program creates an Arvados instance using the configured environment variables.
+ * It then provides a prompt to input method name and input parameters. 
+ * The program them invokes the API server to execute the specified method.  
+ * 
+ * @author radhika
+ */
+
+import org.arvados.sdk.java.Arvados;
+
+import java.io.File;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+
+public class ArvadosSDKJavaExampleWithPrompt {
+  /**
+   * Make sure the following environment variables are set before using Arvados:
+   * ARVADOS_API_TOKEN, ARVADOS_API_HOST and ARVADOS_API_HOST_INSECURE Set
+   * ARVADOS_API_HOST_INSECURE to true if you are using self-singed certificates
+   * in development and want to bypass certificate validations.
+   * 
+   * Please refer to http://doc.arvados.org/api/index.html for a complete list
+   * of the available API methods.
+   */
+  public static void main(String[] args) throws Exception {
+    String apiName = "arvados";
+    String apiVersion = "v1";
+
+    System.out.print("Welcome to Arvados Java SDK.");
+    System.out.println("\nYou can use this example to call API methods interactively.");
+    System.out.println("\nPlease refer to http://doc.arvados.org/api/index.html for api documentation");
+    System.out.println("\nTo make the calls, enter input data at the prompt.");
+    System.out.println("When entering parameters, you may enter a simple string or a well-formed json.");
+    System.out.println("For example to get a user you may enter:  user, zzzzz-12345-67890");
+    System.out.println("Or to filter links, you may enter:  filters, [[ \"name\", \"=\", \"can_manage\"]]");
+
+    System.out.println("\nEnter ^C when you want to quit");
+
+    // use configured env variables for API TOKEN, HOST and HOST_INSECURE
+    Arvados arv = new Arvados(apiName, apiVersion);
+
+    while (true) {
+      try {
+        // prompt for resource
+        System.out.println("\n\nEnter Resource name (for example users)");
+        System.out.println("\nAvailable resources are: " + arv.getAvailableResourses());
+        System.out.print("\n>>> ");
+
+        // read resource name
+        BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
+        String resourceName = in.readLine().trim();
+        if ("".equals(resourceName)) {
+          throw (new Exception("No resource name entered"));
+        }
+        // read method name
+        System.out.println("\nEnter method name (for example get)");
+        System.out.println("\nAvailable methods are: " + arv.getAvailableMethodsForResourse(resourceName));
+        System.out.print("\n>>> ");
+        String methodName = in.readLine().trim();
+        if ("".equals(methodName)) {
+          throw (new Exception("No method name entered"));
+        }
+
+        // read method parameters
+        System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+        System.out.println("\nAvailable parameters are: " + 
+              arv.getAvailableParametersForMethod(resourceName, methodName));
+        
+        System.out.print("\n>>> ");
+        Map paramsMap = new HashMap();
+        String param = "";
+        try {
+          do {
+            param = in.readLine();
+            if (param.isEmpty())
+              break;
+            int index = param.indexOf(","); // first comma
+            String paramName = param.substring(0, index);
+            String paramValue = param.substring(index+1);
+            paramsMap.put(paramName.trim(), paramValue.trim());
+
+            System.out.println("\nEnter parameter name, value (for example uuid, uuid-value)");
+            System.out.print("\n>>> ");
+          } while (!param.isEmpty());
+        } catch (Exception e) {
+          System.out.println (e.getMessage());
+          System.out.println ("\nSet up a new call");
+          continue;
+        }
+
+        // Make a "call" for the given resource name and method name
+        try {
+          System.out.println ("Making a call for " + resourceName + " " + methodName);
+          Map response = arv.call(resourceName, methodName, paramsMap);
+
+          Set<Entry<String,Object>> entrySet = (Set<Entry<String,Object>>)response.entrySet();
+          for (Map.Entry<String, Object> entry : entrySet) {
+            if ("items".equals(entry.getKey())) {
+              List items = (List)entry.getValue();
+              for (Object item : items) {
+                System.out.println("    " + item);
+              }            
+            } else {
+              System.out.println(entry.getKey() + " = " + entry.getValue());
+            }
+          }
+        } catch (Exception e){
+          System.out.println (e.getMessage());
+          System.out.println ("\nSet up a new call");
+        }
+      } catch (Exception e) {
+        System.out.println (e.getMessage());
+        System.out.println ("\nSet up a new call");
+      }
+    }
+  }
+}
diff --git a/sdk/java/README b/sdk/java/README
new file mode 100644 (file)
index 0000000..0933b88
--- /dev/null
@@ -0,0 +1,4 @@
+Welcome to Arvados Java SDK.
+
+Please refer to http://doc.arvados.org/sdk/java/index.html to get started
+    with Arvados Java SDK.
diff --git a/sdk/java/pom.xml b/sdk/java/pom.xml
new file mode 100644 (file)
index 0000000..53e8f75
--- /dev/null
@@ -0,0 +1,106 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.arvados.sdk.java</groupId>
+  <artifactId>java</artifactId>
+  <packaging>jar</packaging>
+  <version>1.0-SNAPSHOT</version>
+  <name>java</name>
+  <url>http://maven.apache.org</url>
+
+  <dependencies>
+    <dependency>
+      <groupId>com.google.apis</groupId>
+      <artifactId>google-api-services-discovery</artifactId>
+      <version>v1-rev42-1.18.0-rc</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.api-client</groupId>
+      <artifactId>google-api-client</artifactId>
+      <version>1.18.0-rc</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.http-client</groupId>
+      <artifactId>google-http-client-jackson2</artifactId>
+      <version>1.18.0-rc</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>r05</version>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <version>1.2.16</version>
+    </dependency>
+    <dependency>
+      <groupId>com.googlecode.json-simple</groupId>
+      <artifactId>json-simple</artifactId>
+      <version>1.1.1</version>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.8.1</version>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <finalName>arvados-sdk-1.0</finalName>
+
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <configuration>
+          <source>1.6</source>
+          <target>1.6</target>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>attached</goal>
+            </goals>
+            <phase>package</phase>
+            <configuration>
+              <descriptorRefs>
+                <descriptorRef>jar-with-dependencies</descriptorRef>
+              </descriptorRefs>
+              <archive>
+                <manifest>
+                  <mainClass>org.arvados.sdk.Arvados</mainClass>
+                </manifest>
+                <manifestEntries>
+                  <!--<Premain-Class>Your.agent.class</Premain-Class> <Agent-Class>Your.agent.class</Agent-Class> -->
+                  <Can-Redefine-Classes>true</Can-Redefine-Classes>
+                  <Can-Retransform-Classes>true</Can-Retransform-Classes>
+                </manifestEntries>
+              </archive>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+    <resources>
+      <resource>
+        <directory>src/main/resources</directory>
+        <targetPath>${basedir}/target/classes</targetPath>
+        <includes>
+          <include>log4j.properties</include>
+        </includes>
+        <filtering>true</filtering>
+      </resource>
+      <resource>
+        <directory>src/test/resources</directory>
+        <filtering>true</filtering>
+      </resource>
+    </resources>
+  </build>
+</project>
diff --git a/sdk/java/src/main/java/org/arvados/sdk/java/Arvados.java b/sdk/java/src/main/java/org/arvados/sdk/java/Arvados.java
new file mode 100644 (file)
index 0000000..2c03639
--- /dev/null
@@ -0,0 +1,454 @@
+package org.arvados.sdk.java;
+
+import com.google.api.client.http.javanet.*;
+import com.google.api.client.http.ByteArrayContent;
+import com.google.api.client.http.GenericUrl;
+import com.google.api.client.http.HttpContent;
+import com.google.api.client.http.HttpRequest;
+import com.google.api.client.http.HttpRequestFactory;
+import com.google.api.client.http.HttpTransport;
+import com.google.api.client.http.UriTemplate;
+import com.google.api.client.json.JsonFactory;
+import com.google.api.client.json.jackson2.JacksonFactory;
+import com.google.api.client.util.Maps;
+import com.google.api.services.discovery.Discovery;
+import com.google.api.services.discovery.model.JsonSchema;
+import com.google.api.services.discovery.model.RestDescription;
+import com.google.api.services.discovery.model.RestMethod;
+import com.google.api.services.discovery.model.RestMethod.Request;
+import com.google.api.services.discovery.model.RestResource;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.log4j.Logger;
+import org.json.simple.JSONArray;
+import org.json.simple.JSONObject;
+
+/**
+ * This class provides a java SDK interface to Arvados API server.
+ * 
+ * Please refer to http://doc.arvados.org/api/ to learn about the
+ *  various resources and methods exposed by the API server.
+ *  
+ * @author radhika
+ */
+public class Arvados {
+  // HttpTransport and JsonFactory are thread-safe. So, use global instances.
+  private HttpTransport httpTransport;
+  private final JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
+
+  private String arvadosApiToken;
+  private String arvadosApiHost;
+  private boolean arvadosApiHostInsecure;
+
+  private String arvadosRootUrl;
+
+  private static final Logger logger = Logger.getLogger(Arvados.class);
+
+  // Get it once and reuse on the call requests
+  RestDescription restDescription = null;
+  String apiName = null;
+  String apiVersion = null;
+
+  public Arvados (String apiName, String apiVersion) throws Exception {
+    this (apiName, apiVersion, null, null, null);
+  }
+
+  public Arvados (String apiName, String apiVersion, String token,
+      String host, String hostInsecure) throws Exception {
+    this.apiName = apiName;
+    this.apiVersion = apiVersion;
+
+    // Read needed environmental variables if they are not passed
+    if (token != null) {
+      arvadosApiToken = token;
+    } else {
+      arvadosApiToken = System.getenv().get("ARVADOS_API_TOKEN");
+      if (arvadosApiToken == null) {
+        throw new Exception("Missing environment variable: ARVADOS_API_TOKEN");
+      }
+    }
+
+    if (host != null) {
+      arvadosApiHost = host;
+    } else {
+      arvadosApiHost = System.getenv().get("ARVADOS_API_HOST");      
+      if (arvadosApiHost == null) {
+        throw new Exception("Missing environment variable: ARVADOS_API_HOST");
+      }
+    }
+    arvadosRootUrl = "https://" + arvadosApiHost;
+    arvadosRootUrl += (arvadosApiHost.endsWith("/")) ? "" : "/";
+
+    if (hostInsecure != null) {
+      arvadosApiHostInsecure = Boolean.valueOf(hostInsecure);
+    } else {
+      arvadosApiHostInsecure =
+          "true".equals(System.getenv().get("ARVADOS_API_HOST_INSECURE")) ? true : false;
+    }
+
+    // Create HTTP_TRANSPORT object
+    NetHttpTransport.Builder builder = new NetHttpTransport.Builder();
+    if (arvadosApiHostInsecure) {
+      builder.doNotValidateCertificate();
+    }
+    httpTransport = builder.build();
+
+    // initialize rest description
+    restDescription = loadArvadosApi();
+  }
+
+  /**
+   * Make a call to API server with the provide call information.
+   * @param resourceName
+   * @param methodName
+   * @param paramsMap
+   * @return Map
+   * @throws Exception
+   */
+  public Map call(String resourceName, String methodName,
+      Map<String, Object> paramsMap) throws Exception {
+    RestMethod method = getMatchingMethod(resourceName, methodName);
+
+    HashMap<String, Object> parameters = loadParameters(paramsMap, method);
+
+    GenericUrl url = new GenericUrl(UriTemplate.expand(
+        arvadosRootUrl + restDescription.getBasePath() + method.getPath(), 
+        parameters, true));
+
+    try {
+      // construct the request
+      HttpRequestFactory requestFactory;
+      requestFactory = httpTransport.createRequestFactory();
+
+      // possibly required content
+      HttpContent content = null;
+
+      if (!method.getHttpMethod().equals("GET") &&
+          !method.getHttpMethod().equals("DELETE")) {
+        String objectName = resourceName.substring(0, resourceName.length()-1);
+        Object requestBody = paramsMap.get(objectName);
+        if (requestBody == null) {
+          error("POST method requires content object " + objectName);
+        }
+
+        content = new ByteArrayContent("application/json",((String)requestBody).getBytes());
+      }
+
+      HttpRequest request =
+          requestFactory.buildRequest(method.getHttpMethod(), url, content);
+
+      // make the request
+      List<String> authHeader = new ArrayList<String>();
+      authHeader.add("OAuth2 " + arvadosApiToken);
+      request.getHeaders().put("Authorization", authHeader);
+      String response = request.execute().parseAsString();
+
+      Map responseMap = jsonFactory.createJsonParser(response).parse(HashMap.class);
+
+      logger.debug(responseMap);
+
+      return responseMap;
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw e;
+    }
+  }
+
+  /**
+   * Get all supported resources by the API
+   * @return Set
+   */
+  public Set<String> getAvailableResourses() {
+    return (restDescription.getResources().keySet());
+  }
+
+  /**
+   * Get all supported method names for the given resource
+   * @param resourceName
+   * @return Set
+   * @throws Exception
+   */
+  public Set<String> getAvailableMethodsForResourse(String resourceName)
+      throws Exception {
+    Map<String, RestMethod> methodMap = getMatchingMethodMap (resourceName);
+    return (methodMap.keySet());
+  }
+
+  /**
+   * Get the parameters for the method in the resource sought.
+   * @param resourceName
+   * @param methodName
+   * @return Set
+   * @throws Exception
+   */
+  public Map<String,List<String>> getAvailableParametersForMethod(String resourceName, String methodName)
+      throws Exception {
+    RestMethod method = getMatchingMethod(resourceName, methodName);
+    Map<String, List<String>> parameters = new HashMap<String, List<String>>();
+    List<String> requiredParameters = new ArrayList<String>();
+    List<String> optionalParameters = new ArrayList<String>();
+    parameters.put ("required", requiredParameters);
+    parameters.put("optional", optionalParameters);
+
+    try {
+      // get any request parameters
+      Request request = method.getRequest();
+      if (request != null) {
+        Object required = request.get("required");
+        Object requestProperties = request.get("properties");
+        if (requestProperties != null) {
+          if (requestProperties instanceof Map) {
+            Map properties = (Map)requestProperties;
+            Set<String> propertyKeys = properties.keySet();
+            for (String property : propertyKeys) {
+              if (Boolean.TRUE.equals(required)) {
+                requiredParameters.add(property);
+              } else {
+                optionalParameters.add(property);                
+              }
+            }
+          }
+        }
+      }
+
+      // get other listed parameters
+      Map<String,JsonSchema> methodParameters = method.getParameters();
+      for (Map.Entry<String, JsonSchema> entry : methodParameters.entrySet()) {
+        if (Boolean.TRUE.equals(entry.getValue().getRequired())) {
+          requiredParameters.add(entry.getKey());
+        } else {
+          optionalParameters.add(entry.getKey());
+        }
+      }
+    } catch (Exception e){
+      logger.error(e);
+    }
+
+    return parameters;
+  }
+
+  private HashMap<String, Object> loadParameters(Map<String, Object> paramsMap,
+      RestMethod method) throws Exception {
+    HashMap<String, Object> parameters = Maps.newHashMap();
+
+    // required parameters
+    if (method.getParameterOrder() != null) {
+      for (String parameterName : method.getParameterOrder()) {
+        JsonSchema parameter = method.getParameters().get(parameterName);
+        if (Boolean.TRUE.equals(parameter.getRequired())) {
+          Object parameterValue = paramsMap.get(parameterName);
+          if (parameterValue == null) {
+            error("missing required parameter: " + parameter);
+          } else {
+            putParameter(null, parameters, parameterName, parameter, parameterValue);
+          }
+        }
+      }
+    }
+
+    for (Map.Entry<String, Object> entry : paramsMap.entrySet()) {
+      String parameterName = entry.getKey();
+      Object parameterValue = entry.getValue();
+
+      if (parameterName.equals("contentType")) {
+        if (method.getHttpMethod().equals("GET") || method.getHttpMethod().equals("DELETE")) {
+          error("HTTP content type cannot be specified for this method: " + parameterName);
+        }
+      } else {
+        JsonSchema parameter = null;
+        if (restDescription.getParameters() != null) {
+          parameter = restDescription.getParameters().get(parameterName);
+        }
+        if (parameter == null && method.getParameters() != null) {
+          parameter = method.getParameters().get(parameterName);
+        }
+        putParameter(parameterName, parameters, parameterName, parameter, parameterValue);
+      }
+    }
+
+    return parameters;
+  }
+
+  private RestMethod getMatchingMethod(String resourceName, String methodName)
+      throws Exception {
+    Map<String, RestMethod> methodMap = getMatchingMethodMap(resourceName);
+
+    if (methodName == null) {
+      error("missing method name");      
+    }
+
+    RestMethod method =
+        methodMap == null ? null : methodMap.get(methodName);
+    if (method == null) {
+      error("method not found: ");
+    }
+
+    return method;
+  }
+
+  private Map<String, RestMethod> getMatchingMethodMap(String resourceName)
+      throws Exception {
+    if (resourceName == null) {
+      error("missing resource name");      
+    }
+
+    Map<String, RestMethod> methodMap = null;
+    Map<String, RestResource> resources = restDescription.getResources();
+    RestResource resource = resources.get(resourceName);
+    if (resource == null) {
+      error("resource not found");
+    }
+    methodMap = resource.getMethods();
+    return methodMap;
+  }
+
+  /**
+   * Not thread-safe. So, create for each request.
+   * @param apiName
+   * @param apiVersion
+   * @return
+   * @throws Exception
+   */
+  private RestDescription loadArvadosApi()
+      throws Exception {
+    try {
+      Discovery discovery;
+
+      Discovery.Builder discoveryBuilder =
+          new Discovery.Builder(httpTransport, jsonFactory, null);
+
+      discoveryBuilder.setRootUrl(arvadosRootUrl);
+      discoveryBuilder.setApplicationName(apiName);
+
+      discovery = discoveryBuilder.build();
+
+      return discovery.apis().getRest(apiName, apiVersion).execute();
+    } catch (Exception e) {
+      e.printStackTrace();
+      throw e;
+    }
+  }
+
+  /**
+   * Convert the input parameter into its equivalent json string.
+   * Add this json string value to the parameters map to be sent to server.
+   * @param argName
+   * @param parameters
+   * @param parameterName
+   * @param parameter
+   * @param parameterValue
+   * @throws Exception
+   */
+  private void putParameter(String argName, Map<String, Object> parameters,
+      String parameterName, JsonSchema parameter, Object parameterValue)
+          throws Exception {
+    Object value = parameterValue;
+    if (parameter != null) {
+      if ("boolean".equals(parameter.getType())) {
+        value = Boolean.valueOf(parameterValue.toString());
+      } else if ("number".equals(parameter.getType())) {
+        value = new BigDecimal(parameterValue.toString());
+      } else if ("integer".equals(parameter.getType())) {
+        value = new BigInteger(parameterValue.toString());
+      } else if ("float".equals(parameter.getType())) {
+        value = new BigDecimal(parameterValue.toString());
+      } else if ("Java.util.Calendar".equals(parameter.getType())) {
+        value = new BigDecimal(parameterValue.toString());
+      } else if (("array".equals(parameter.getType())) ||
+          ("Array".equals(parameter.getType()))) {
+        if (parameterValue.getClass().isArray()){
+          value = getJsonValueFromArrayType(parameterValue);
+        } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+          value = getJsonValueFromListType(parameterValue);
+        }
+      } else if (("Hash".equals(parameter.getType())) ||
+          ("hash".equals(parameter.getType()))) {
+        value = getJsonValueFromMapType(parameterValue);
+      } else {
+        if (parameterValue.getClass().isArray()){
+          value = getJsonValueFromArrayType(parameterValue);
+        } else if (List.class.isAssignableFrom(parameterValue.getClass())) {
+          value = getJsonValueFromListType(parameterValue);
+        } else if (Map.class.isAssignableFrom(parameterValue.getClass())) {
+          value = getJsonValueFromMapType(parameterValue);
+        }
+      }
+    }
+
+    parameters.put(parameterName, value);
+  }
+
+  /**
+   * Convert the given input array into json string before sending to server.
+   * @param parameterValue
+   * @return
+   */
+  private String getJsonValueFromArrayType (Object parameterValue) {
+    String arrayStr = Arrays.deepToString((Object[])parameterValue);
+
+    // we can expect either an array of array objects or an array of objects
+    if (arrayStr.startsWith("[[") && arrayStr.endsWith("]]")) {
+      Object[][] array = new Object[1][];
+      arrayStr = arrayStr.substring(2, arrayStr.length()-2);
+      String jsonStr = getJsonStringForArrayStr(arrayStr);
+      String value = "[" + jsonStr + "]";
+      return value;
+    } else {
+      arrayStr = arrayStr.substring(1, arrayStr.length()-1);
+      return (getJsonStringForArrayStr(arrayStr));
+    }
+  }
+
+  private String getJsonStringForArrayStr(String arrayStr) {
+    Object[] array = arrayStr.split(",");
+    Object[] trimmedArray = new Object[array.length];
+    for (int i=0; i<array.length; i++){
+      trimmedArray[i] = array[i].toString().trim();
+    }
+    String value = JSONArray.toJSONString(Arrays.asList(trimmedArray));
+    return value;
+  }
+
+  /**
+   * Convert the given input List into json string before sending to server.
+   * @param parameterValue
+   * @return
+   */
+  private String getJsonValueFromListType (Object parameterValue) {
+    List paramList = (List)parameterValue;
+    Object[] array = new Object[paramList.size()];
+    Arrays.deepToString(paramList.toArray(array));
+    return (getJsonValueFromArrayType(array));
+  }
+
+  /**
+   * Convert the given input map into json string before sending to server.
+   * @param parameterValue
+   * @return
+   */
+  private String getJsonValueFromMapType (Object parameterValue) {
+    JSONObject json = new JSONObject((Map)parameterValue);
+    return json.toString();
+  }
+
+  private static void error(String detail) throws Exception {
+    String errorDetail = "ERROR: " + detail;
+
+    logger.debug(errorDetail);
+    throw new Exception(errorDetail);
+  }
+
+  public static void main(String[] args){
+    System.out.println("Welcome to Arvados Java SDK.");
+    System.out.println("Please refer to http://doc.arvados.org/sdk/java/index.html to get started with the the SDK.");
+  }
+
+}
diff --git a/sdk/java/src/main/java/org/arvados/sdk/java/MethodDetails.java b/sdk/java/src/main/java/org/arvados/sdk/java/MethodDetails.java
new file mode 100644 (file)
index 0000000..2479246
--- /dev/null
@@ -0,0 +1,22 @@
+package org.arvados.sdk.java;
+
+import com.google.api.client.util.Lists;
+import com.google.api.client.util.Sets;
+
+import java.util.ArrayList;
+import java.util.SortedSet;
+
+public class MethodDetails implements Comparable<MethodDetails> {
+    String name;
+    ArrayList<String> requiredParameters = Lists.newArrayList();
+    SortedSet<String> optionalParameters = Sets.newTreeSet();
+    boolean hasContent;
+
+    @Override
+    public int compareTo(MethodDetails o) {
+      if (o == this) {
+        return 0;
+      }
+      return name.compareTo(o.name);
+    }
+}
\ No newline at end of file
diff --git a/sdk/java/src/main/resources/log4j.properties b/sdk/java/src/main/resources/log4j.properties
new file mode 100644 (file)
index 0000000..89a9b93
--- /dev/null
@@ -0,0 +1,11 @@
+# To change log location, change log4j.appender.fileAppender.File 
+
+log4j.rootLogger=DEBUG, fileAppender
+
+log4j.appender.fileAppender=org.apache.log4j.RollingFileAppender
+log4j.appender.fileAppender.File=${basedir}/log/arvados_sdk_java.log
+log4j.appender.fileAppender.Append=true
+log4j.appender.file.MaxFileSize=10MB
+log4j.appender.file.MaxBackupIndex=10
+log4j.appender.fileAppender.layout=org.apache.log4j.PatternLayout
+log4j.appender.fileAppender.layout.ConversionPattern=[%d] %-5p %c %L %x - %m%n
diff --git a/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java b/sdk/java/src/test/java/org/arvados/sdk/java/ArvadosTest.java
new file mode 100644 (file)
index 0000000..5176e8c
--- /dev/null
@@ -0,0 +1,464 @@
+package org.arvados.sdk.java;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.GregorianCalendar;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Unit test for Arvados.
+ */
+public class ArvadosTest {
+
+  /**
+   * Test users.list api
+   * @throws Exception
+   */
+  @Test
+  public void testCallUsersList() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+    List items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+    assertTrue("expected at least one item in users list", items.size()>0);
+
+    Map firstUser = (Map)items.get(0);
+    assertNotNull ("Expcted at least one user", firstUser);
+
+    assertEquals("Expected kind to be user", "arvados#user", firstUser.get("kind"));
+    assertNotNull("Expected uuid for first user", firstUser.get("uuid"));
+  }
+
+  /**
+   * Test users.get <uuid> api
+   * @throws Exception
+   */
+  @Test
+  public void testCallUsersGet() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    // call user.system and get uuid of this user
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+
+    assertNotNull("expected users list", response);
+    List items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+
+    Map firstUser = (Map)items.get(0);
+    String userUuid = (String)firstUser.get("uuid");
+
+    // invoke users.get with the system user uuid
+    params = new HashMap<String, Object>();
+    params.put("uuid", userUuid);
+
+    response = arv.call("users", "get", params);
+
+    assertNotNull("Expected uuid for first user", response.get("uuid"));
+    assertEquals("Expected system user uuid", userUuid, response.get("uuid"));
+  }
+
+  /**
+   * Test users.create api
+   * @throws Exception
+   */
+  @Test
+  public void testCreateUser() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("user", "{}");
+    Map response = arv.call("users", "create", params);
+
+    assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+    Object uuid = response.get("uuid");
+    assertNotNull("Expected uuid for first user", uuid);
+
+    // delete the object
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("users", "delete", params);
+
+    // invoke users.get with the system user uuid
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+
+    Exception caught = null;
+    try {
+      arv.call("users", "get", params);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected 404", caught.getMessage().contains("Path not found"));
+  }
+
+  @Test
+  public void testCreateUserWithMissingRequiredParam() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Exception caught = null;
+    try {
+      arv.call("users", "create", params);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected POST method requires content object user", 
+        caught.getMessage().contains("ERROR: POST method requires content object user"));
+  }
+
+  /**
+   * Test users.create api
+   * @throws Exception
+   */
+  @Test
+  public void testCreateAndUpdateUser() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("user", "{}");
+    Map response = arv.call("users", "create", params);
+
+    assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+    Object uuid = response.get("uuid");
+    assertNotNull("Expected uuid for first user", uuid);
+
+    // update this user
+    params = new HashMap<String, Object>();
+    params.put("user", "{}");
+    params.put("uuid", uuid);
+    response = arv.call("users", "update", params);
+
+    assertEquals("Expected kind to be user", "arvados#user", response.get("kind"));
+
+    uuid = response.get("uuid");
+    assertNotNull("Expected uuid for first user", uuid);
+
+    // delete the object
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("users", "delete", params);
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testUnsupportedApiName() throws Exception {
+    Exception caught = null;
+    try {
+      Arvados arv = new Arvados("not_arvados", "v1");
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected 404 when unsupported api is used", caught.getMessage().contains("404 Not Found"));
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testUnsupportedVersion() throws Exception {
+    Exception caught = null;
+    try {
+      Arvados arv = new Arvados("arvados", "v2");
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected 404 when unsupported version is used", caught.getMessage().contains("404 Not Found"));
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testCallForNoSuchResrouce() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Exception caught = null;
+    try {
+      arv.call("abcd", "list", null);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: resource not found"));
+  }
+
+  /**
+   * Test unsupported api version api
+   * @throws Exception
+   */
+  @Test
+  public void testCallForNoSuchResrouceMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Exception caught = null;
+    try {
+      arv.call("users", "abcd", null);
+    } catch (Exception e) {
+      caught = e;
+    }
+
+    assertNotNull ("expected exception", caught);
+    assertTrue ("Expected ERROR: 404 not found", caught.getMessage().contains("ERROR: method not found"));
+  }
+
+  /**
+   * Test pipeline_tempates.create api
+   * @throws Exception
+   */
+  @Test
+  public void testCreateAndGetPipelineTemplate() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    File file = new File(getClass().getResource( "/first_pipeline.json" ).toURI());
+    byte[] data = new byte[(int)file.length()];
+    try {
+      FileInputStream is = new FileInputStream(file);
+      is.read(data);
+      is.close();
+    }catch(Exception e) {
+      e.printStackTrace();
+    }
+
+    Map<String, Object> params = new HashMap<String, Object>();
+    params.put("pipeline_template", new String(data));
+    Map response = arv.call("pipeline_templates", "create", params);
+
+    assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+    String uuid = (String)response.get("uuid");
+    assertNotNull("Expected uuid for pipeline template", uuid);
+
+    // get the pipeline
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("pipeline_templates", "get", params);
+
+    assertEquals("Expected kind to be user", "arvados#pipelineTemplate", response.get("kind"));
+    assertEquals("Expected uuid for pipeline template", uuid, response.get("uuid"));
+
+    // delete the object
+    params = new HashMap<String, Object>();
+    params.put("uuid", uuid);
+    response = arv.call("pipeline_templates", "delete", params);
+  }
+
+  /**
+   * Test users.list api
+   * @throws Exception
+   */
+  @Test
+  public void testArvadosWithTokenPassed() throws Exception {
+    String token = System.getenv().get("ARVADOS_API_TOKEN");
+    String host = System.getenv().get("ARVADOS_API_HOST");      
+    String hostInsecure = System.getenv().get("ARVADOS_API_HOST_INSECURE");
+
+    Arvados arv = new Arvados("arvados", "v1", token, host, hostInsecure);
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+  }
+
+  /**
+   * Test users.list api
+   * @throws Exception
+   */
+  @Test
+  public void testCallUsersListWithLimit() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("users", "list", params);
+    assertEquals("Expected users.list in response", "arvados#userList", response.get("kind"));
+
+    List items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+    assertTrue("expected at least one item in users list", items.size()>0);
+
+    int numUsersListItems = items.size();
+
+    // make the request again with limit
+    params = new HashMap<String, Object>();
+    params.put("limit", numUsersListItems-1);
+
+    response = arv.call("users", "list", params);
+
+    assertEquals("Expected kind to be users.list", "arvados#userList", response.get("kind"));
+
+    items = (List)response.get("items");
+    assertNotNull("expected users list items", items);
+    assertTrue("expected at least one item in users list", items.size()>0);
+
+    int numUsersListItems2 = items.size();
+    assertEquals ("Got more users than requested", numUsersListItems-1, numUsersListItems2);
+  }
+
+  @Test
+  public void testGetLinksWithFilters() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("links", "list", params);
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+    String[][] filters = new String[1][];
+    String[] condition = new String[3];
+    condition[0] = "name";
+    condition[1] = "=";
+    condition[2] = "can_manage";
+    filters[0] = condition;
+    params.put("filters", filters);
+    
+    response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+    assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+  }
+
+  @Test
+  public void testGetLinksWithFiltersAsList() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("links", "list", params);
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+    List<List> filters = new ArrayList<List>();
+    List<String> condition = new ArrayList<String>();
+    condition.add("name");
+    condition.add("is_a");
+    condition.add("can_manage");
+    filters.add(condition);
+    params.put("filters", filters);
+    
+    response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+    assertFalse("Expected no can_manage in response", response.toString().contains("\"name\":\"can_manage\""));
+  }
+
+  @Test
+  public void testGetLinksWithTimestampFilters() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map response = arv.call("links", "list", params);
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+
+    // get links created "tomorrow". Expect none in response
+    Calendar calendar = new GregorianCalendar();
+    calendar.setTime(new Date());
+    calendar.add(Calendar.DAY_OF_MONTH, 1);
+    
+    Object[][] filters = new Object[1][];
+    Object[] condition = new Object[3];
+    condition[0] = "created_at";
+    condition[1] = ">";
+    condition[2] = calendar.get(Calendar.YEAR) + "-" + (calendar.get(Calendar.MONTH)+1) + "-" + calendar.get(Calendar.DAY_OF_MONTH);
+    filters[0] = condition;
+    params.put("filters", filters);
+    
+    response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+    int items_avail = ((BigDecimal)response.get("items_available")).intValue();
+    assertEquals("Expected zero links", items_avail, 0);
+  }
+
+  @Test
+  public void testGetLinksWithWhereClause() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+
+    Map<String, Object> params = new HashMap<String, Object>();
+
+    Map<String, String> where = new HashMap<String, String>();
+    where.put("where", "updated_at > '2014-05-01'");
+    
+    params.put("where", where);
+    
+    Map response = arv.call("links", "list", params);
+    
+    assertEquals("Expected links.list in response", "arvados#linkList", response.get("kind"));
+  }
+
+  @Test
+  public void testGetAvailableResources() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Set<String> resources = arv.getAvailableResourses();
+    assertNotNull("Expected resources", resources);
+    assertTrue("Excected users in resrouces", resources.contains("users"));
+  }
+
+  @Test
+  public void testGetAvailableMethodsResources() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Set<String> methods = arv.getAvailableMethodsForResourse("users");
+    assertNotNull("Expected resources", methods);
+    assertTrue("Excected create method for users", methods.contains("create"));
+  }
+
+  @Test
+  public void testGetAvailableParametersForUsersGetMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "get");
+    assertNotNull("Expected parameters", parameters);
+    assertTrue("Excected uuid parameter for get method for users", parameters.get("required").contains("uuid"));
+  }
+
+  @Test
+  public void testGetAvailableParametersForUsersCreateMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "create");
+    assertNotNull("Expected parameters", parameters);
+    assertTrue("Excected user parameter for get method for users", parameters.get("required").contains("user"));
+  }
+
+  @Test
+  public void testGetAvailableParametersForUsersListMethod() throws Exception {
+    Arvados arv = new Arvados("arvados", "v1");
+    Map<String,List<String>> parameters = arv.getAvailableParametersForMethod("users", "list");
+    assertNotNull("Expected parameters", parameters);
+    assertTrue("Excected no required parameter for list method for users", parameters.get("required").size() == 0);
+    assertTrue("Excected some optional parameters for list method for users", parameters.get("optional").contains("filters"));
+  }
+
+}
\ No newline at end of file
diff --git a/sdk/java/src/test/resources/first_pipeline.json b/sdk/java/src/test/resources/first_pipeline.json
new file mode 100644 (file)
index 0000000..3caa972
--- /dev/null
@@ -0,0 +1,16 @@
+{
+  "name":"first pipeline",
+  "components":{
+    "do_hash":{
+      "script":"hash.py",
+      "script_parameters":{
+        "input":{
+          "required": true,
+          "dataclass": "Collection"
+        }
+      },
+      "script_version":"master",
+      "output_is_persistent":true
+    }
+  }
+}
diff --git a/sdk/perl/Makefile.PL b/sdk/perl/Makefile.PL
new file mode 100644 (file)
index 0000000..21e31ad
--- /dev/null
@@ -0,0 +1,10 @@
+#! /usr/bin/perl
+
+use strict;
+
+use ExtUtils::MakeMaker;
+
+WriteMakefile(
+    NAME            => 'Arvados',
+    VERSION_FROM    => 'lib/Arvados.pm'
+);
diff --git a/sdk/perl/lib/Arvados.pm b/sdk/perl/lib/Arvados.pm
new file mode 100644 (file)
index 0000000..c47f1da
--- /dev/null
@@ -0,0 +1,161 @@
+=head1 NAME
+
+Arvados -- client library for Arvados services
+
+=head1 SYNOPSIS
+
+  use Arvados;
+  $arv = Arvados->new(apiHost => 'arvados.local');
+
+  my $instances = $arv->{'pipeline_instances'}->{'list'}->execute();
+  print "UUID is ", $instances->{'items'}->[0]->{'uuid'}, "\n";
+
+  $uuid = 'eiv0u-arx5y-2c5ovx43zw90gvh';
+  $instance = $arv->{'pipeline_instances'}->{'get'}->execute('uuid' => $uuid);
+  print "ETag is ", $instance->{'etag'}, "\n";
+
+  $instance->{'active'} = 1;
+  $instance->{'name'} = '';
+  $instance->save();
+  print "ETag is ", $instance->{'etag'}, "\n";
+
+=head1 METHODS
+
+=head2 new()
+
+ my $whc = Arvados->new( %OPTIONS );
+
+Set up a client and retrieve the schema from the server.
+
+=head3 Options
+
+=over
+
+=item apiHost
+
+Hostname of API discovery service. Default: C<ARVADOS_API_HOST>
+environment variable, or C<arvados>
+
+=item apiProtocolScheme
+
+Protocol scheme. Default: C<ARVADOS_API_PROTOCOL_SCHEME> environment
+variable, or C<https>
+
+=item authToken
+
+Authorization token. Default: C<ARVADOS_API_TOKEN> environment variable
+
+=item apiService
+
+Default C<arvados>
+
+=item apiVersion
+
+Default C<v1>
+
+=back
+
+=cut
+
+package Arvados;
+
+use Net::SSL (); # From Crypt-SSLeay
+BEGIN {
+  $Net::HTTPS::SSL_SOCKET_CLASS = "Net::SSL"; # Force use of Net::SSL
+}
+
+use JSON;
+use Carp;
+use Arvados::ResourceAccessor;
+use Arvados::ResourceMethod;
+use Arvados::ResourceProxy;
+use Arvados::ResourceProxyList;
+use Arvados::Request;
+use Data::Dumper;
+
+$Arvados::VERSION = 0.1;
+
+sub new
+{
+    my $class = shift;
+    my %self = @_;
+    my $self = \%self;
+    bless ($self, $class);
+    return $self->build(@_);
+}
+
+sub build
+{
+    my $self = shift;
+
+    $config = load_config_file("$ENV{HOME}/.config/arvados/settings.conf");
+
+    $self->{'authToken'} ||=
+       $ENV{ARVADOS_API_TOKEN} || $config->{ARVADOS_API_TOKEN};
+
+    $self->{'apiHost'} ||=
+       $ENV{ARVADOS_API_HOST} || $config->{ARVADOS_API_HOST};
+
+    $self->{'noVerifyHostname'} ||=
+       $ENV{ARVADOS_API_HOST_INSECURE};
+
+    $self->{'apiProtocolScheme'} ||=
+       $ENV{ARVADOS_API_PROTOCOL_SCHEME} ||
+       $config->{ARVADOS_API_PROTOCOL_SCHEME};
+
+    $self->{'ua'} = new Arvados::Request;
+
+    my $host = $self->{'apiHost'} || 'arvados';
+    my $service = $self->{'apiService'} || 'arvados';
+    my $version = $self->{'apiVersion'} || 'v1';
+    my $scheme = $self->{'apiProtocolScheme'} || 'https';
+    my $uri = "$scheme://$host/discovery/v1/apis/$service/$version/rest";
+    my $r = $self->new_request;
+    $r->set_uri($uri);
+    $r->set_method("GET");
+    $r->process_request();
+    my $data, $headers;
+    my ($status_number, $status_phrase) = $r->get_status();
+    $data = $r->get_body() if $status_number == 200;
+    $headers = $r->get_headers();
+    if ($data) {
+        my $doc = $self->{'discoveryDocument'} = JSON::decode_json($data);
+        print STDERR Dumper $doc if $ENV{'DEBUG_ARVADOS_API_DISCOVERY'};
+        my $k, $v;
+        while (($k, $v) = each %{$doc->{'resources'}}) {
+            $self->{$k} = Arvados::ResourceAccessor->new($self, $k);
+        }
+    } else {
+        croak "No discovery doc at $uri - $status_number $status_phrase";
+    }
+    $self;
+}
+
+sub new_request
+{
+    my $self = shift;
+    local $ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'};
+    if ($self->{'noVerifyHostname'} || ($host =~ /\.local$/)) {
+        $ENV{'PERL_LWP_SSL_VERIFY_HOSTNAME'} = 0;
+    }
+    Arvados::Request->new();
+}
+
+sub load_config_file ($)
+{
+    my $config_file = shift;
+    my %config;
+
+    if (open (CONF, $config_file)) {
+       while (<CONF>) {
+           next if /^\s*#/ || /^\s*$/;  # skip comments and blank lines
+           chomp;
+           my ($key, $val) = split /\s*=\s*/, $_, 2;
+           $config{$key} = $val;
+       }
+    }
+    close CONF;
+    return \%config;
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/Request.pm b/sdk/perl/lib/Arvados/Request.pm
new file mode 100644 (file)
index 0000000..07ca763
--- /dev/null
@@ -0,0 +1,97 @@
+package Arvados::Request;
+use Data::Dumper;
+use LWP::UserAgent;
+use URI::Escape;
+use Encode;
+use strict;
+@Arvados::HTTP::ISA = qw(LWP::UserAgent);
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+    return $self->_init(@_);
+}
+
+sub _init
+{
+    my $self = shift;
+    $self->{'ua'} = new LWP::UserAgent(@_);
+    $self->{'ua'}->agent ("libarvados-perl/".$Arvados::VERSION);
+    $self;
+}
+
+sub set_uri
+{
+    my $self = shift;
+    $self->{'uri'} = shift;
+}
+
+sub process_request
+{
+    my $self = shift;
+    my %req;
+    my %content;
+    my $method = $self->{'method'};
+    if ($method eq 'GET' || $method eq 'HEAD') {
+        $content{'_method'} = $method;
+        $method = 'POST';
+    }
+    $req{$method} = $self->{'uri'};
+    $self->{'req'} = new HTTP::Request (%req);
+    $self->{'req'}->header('Authorization' => ('OAuth2 ' . $self->{'authToken'})) if $self->{'authToken'};
+    $self->{'req'}->header('Accept' => 'application/json');
+    my ($p, $v);
+    while (($p, $v) = each %{$self->{'queryParams'}}) {
+        $content{$p} = (ref($v) eq "") ? $v : JSON::encode_json($v);
+    }
+    my $content;
+    while (($p, $v) = each %content) {
+        $content .= '&' unless $content eq '';
+        $content .= uri_escape($p);
+        $content .= '=';
+        $content .= uri_escape($v);
+    }
+    $self->{'req'}->content_type("application/x-www-form-urlencoded; charset='utf8'");
+    $self->{'req'}->content(Encode::encode('utf8', $content));
+    $self->{'res'} = $self->{'ua'}->request ($self->{'req'});
+}
+
+sub get_status
+{
+    my $self = shift;
+    return ($self->{'res'}->code(),
+           $self->{'res'}->message());
+}
+
+sub get_body
+{
+    my $self = shift;
+    return $self->{'res'}->content;
+}
+
+sub set_method
+{
+    my $self = shift;
+    $self->{'method'} = shift;
+}
+
+sub set_query_params
+{
+    my $self = shift;
+    $self->{'queryParams'} = shift;
+}
+
+sub set_auth_token
+{
+    my $self = shift;
+    $self->{'authToken'} = shift;
+}
+
+sub get_headers
+{
+    ""
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceAccessor.pm b/sdk/perl/lib/Arvados/ResourceAccessor.pm
new file mode 100644 (file)
index 0000000..73600ca
--- /dev/null
@@ -0,0 +1,21 @@
+package Arvados::ResourceAccessor;
+use Carp;
+use Data::Dumper;
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+
+    $self->{'api'} = shift;
+    $self->{'resourcesName'} = shift;
+    $self->{'methods'} = $self->{'api'}->{'discoveryDocument'}->{'resources'}->{$self->{'resourcesName'}}->{'methods'};
+    my $method_name, $method;
+    while (($method_name, $method) = each %{$self->{'methods'}}) {
+        $self->{$method_name} = Arvados::ResourceMethod->new($self, $method);
+    }
+    $self;
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceMethod.pm b/sdk/perl/lib/Arvados/ResourceMethod.pm
new file mode 100644 (file)
index 0000000..649d779
--- /dev/null
@@ -0,0 +1,120 @@
+package Arvados::ResourceMethod;
+use Carp;
+use Data::Dumper;
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+    return $self->_init(@_);
+}
+
+sub _init
+{
+    my $self = shift;
+    $self->{'resourceAccessor'} = shift;
+    $self->{'method'} = shift;
+    return $self;
+}
+
+sub execute
+{
+    my $self = shift;
+    my $method = $self->{'method'};
+
+    my $path = $method->{'path'};
+
+    my %body_params;
+    my %given_params = @_;
+    my %extra_params = %given_params;
+    my %method_params = %{$method->{'parameters'}};
+    if ($method->{'request'}->{'properties'}) {
+        while (my ($prop_name, $prop_value) =
+               each %{$method->{'request'}->{'properties'}}) {
+            if (ref($prop_value) eq 'HASH' && $prop_value->{'$ref'}) {
+                $method_params{$prop_name} = { 'type' => 'object' };
+            }
+        }
+    }
+    while (my ($param_name, $param) = each %method_params) {
+        delete $extra_params{$param_name};
+        if ($param->{'required'} && !exists $given_params{$param_name}) {
+            croak("Required parameter not supplied: $param_name");
+        }
+        elsif ($param->{'location'} eq 'path') {
+            $path =~ s/{\Q$param_name\E}/$given_params{$param_name}/eg;
+        }
+        elsif (!exists $given_params{$param_name}) {
+            ;
+        }
+        elsif ($param->{'type'} eq 'object') {
+            my %param_value;
+            my ($p, $v);
+            if (exists $param->{'properties'}) {
+                while (my ($property_name, $property) =
+                       each %{$param->{'properties'}}) {
+                    # if the discovery doc specifies object structure,
+                    # convert to true/false depending on supplied type
+                    if (!exists $given_params{$param_name}->{$property_name}) {
+                        ;
+                    }
+                    elsif (!defined $given_params{$param_name}->{$property_name}) {
+                        $param_value{$property_name} = JSON::null;
+                    }
+                    elsif ($property->{'type'} eq 'boolean') {
+                        $param_value{$property_name} = $given_params{$param_name}->{$property_name} ? JSON::true : JSON::false;
+                    }
+                    else {
+                        $param_value{$property_name} = $given_params{$param_name}->{$property_name};
+                    }
+                }
+            }
+            else {
+                while (my ($property_name, $property) =
+                       each %{$given_params{$param_name}}) {
+                    if (ref $property eq '' || $property eq undef) {
+                        $param_value{$property_name} = $property;
+                    }
+                    elsif (ref $property eq 'HASH') {
+                        $param_value{$property_name} = {};
+                        while (my ($k, $v) = each %$property) {
+                            $param_value{$property_name}->{$k} = $v;
+                        }
+                    }
+                }
+            }
+            $body_params{$param_name} = \%param_value;
+        } elsif ($param->{'type'} eq 'boolean') {
+            $body_params{$param_name} = $given_params{$param_name} ? JSON::true : JSON::false;
+        } else {
+            $body_params{$param_name} = $given_params{$param_name};
+        }
+    }
+    if (%extra_params) {
+        croak("Unsupported parameter(s) passed to API call /$path: \"" . join('", "', keys %extra_params) . '"');
+    }
+    my $r = $self->{'resourceAccessor'}->{'api'}->new_request;
+    my $base_uri = $self->{'resourceAccessor'}->{'api'}->{'discoveryDocument'}->{'baseUrl'};
+    $base_uri =~ s:/$::;
+    $r->set_uri($base_uri . "/" . $path);
+    $r->set_method($method->{'httpMethod'});
+    $r->set_auth_token($self->{'resourceAccessor'}->{'api'}->{'authToken'});
+    $r->set_query_params(\%body_params) if %body_params;
+    $r->process_request();
+    my $data, $headers;
+    my ($status_number, $status_phrase) = $r->get_status();
+    if ($status_number != 200) {
+        croak("API call /$path failed: $status_number $status_phrase\n". $r->get_body());
+    }
+    $data = $r->get_body();
+    $headers = $r->get_headers();
+    my $result = JSON::decode_json($data);
+    if ($method->{'response'}->{'$ref'} =~ /List$/) {
+        Arvados::ResourceProxyList->new($result, $self->{'resourceAccessor'});
+    } else {
+        Arvados::ResourceProxy->new($result, $self->{'resourceAccessor'});
+    }
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceProxy.pm b/sdk/perl/lib/Arvados/ResourceProxy.pm
new file mode 100644 (file)
index 0000000..5127d0c
--- /dev/null
@@ -0,0 +1,57 @@
+package Arvados::ResourceProxy;
+
+sub new
+{
+    my $class = shift;
+    my $self = shift;
+    $self->{'resourceAccessor'} = shift;
+    bless ($self, $class);
+    $self;
+}
+
+sub save
+{
+    my $self = shift;
+    $response = $self->{'resourceAccessor'}->{'update'}->execute('uuid' => $self->{'uuid'}, $self->resource_parameter_name() => $self);
+    foreach my $param (keys %$self) {
+        if (exists $response->{$param}) {
+            $self->{$param} = $response->{$param};
+        }
+    }
+    $self;
+}
+
+sub update_attributes
+{
+    my $self = shift;
+    my %updates = @_;
+    $response = $self->{'resourceAccessor'}->{'update'}->execute('uuid' => $self->{'uuid'}, $self->resource_parameter_name() => \%updates);
+    foreach my $param (keys %updates) {
+        if (exists $response->{$param}) {
+            $self->{$param} = $response->{$param};
+        }
+    }
+    $self;
+}
+
+sub reload
+{
+    my $self = shift;
+    $response = $self->{'resourceAccessor'}->{'get'}->execute('uuid' => $self->{'uuid'});
+    foreach my $param (keys %$self) {
+        if (exists $response->{$param}) {
+            $self->{$param} = $response->{$param};
+        }
+    }
+    $self;
+}
+
+sub resource_parameter_name
+{
+    my $self = shift;
+    my $pname = $self->{'resourceAccessor'}->{'resourcesName'};
+    $pname =~ s/s$//;           # XXX not a very good singularize()
+    $pname;
+}
+
+1;
diff --git a/sdk/perl/lib/Arvados/ResourceProxyList.pm b/sdk/perl/lib/Arvados/ResourceProxyList.pm
new file mode 100644 (file)
index 0000000..6bba208
--- /dev/null
@@ -0,0 +1,20 @@
+package Arvados::ResourceProxyList;
+
+sub new
+{
+    my $class = shift;
+    my $self = {};
+    bless ($self, $class);
+    $self->_init(@_);
+}
+
+sub _init
+{
+    my $self = shift;
+    $self->{'serverResponse'} = shift;
+    $self->{'resourceAccessor'} = shift;
+    $self->{'items'} = [ map { Arvados::ResourceProxy->new($_, $self->{'resourceAccessor'}) } @{$self->{'serverResponse'}->{'items'}} ];
+    $self;
+}
+
+1;
diff --git a/sdk/python/.gitignore b/sdk/python/.gitignore
new file mode 100644 (file)
index 0000000..ab21552
--- /dev/null
@@ -0,0 +1,7 @@
+*.pyc
+/build/
+/dist/
+*.egg
+*.egg-info
+/tests/tmp
+.eggs
diff --git a/sdk/python/MANIFEST.in b/sdk/python/MANIFEST.in
new file mode 100644 (file)
index 0000000..9561fb1
--- /dev/null
@@ -0,0 +1 @@
+include README.rst
diff --git a/sdk/python/README.rst b/sdk/python/README.rst
new file mode 100644 (file)
index 0000000..10e01a4
--- /dev/null
@@ -0,0 +1,63 @@
+=====================
+Arvados Python Client
+=====================
+
+Overview
+--------
+
+This package provides the ``arvados`` module, an API client for
+Arvados_.  It also includes higher-level functions to help you write
+Crunch scripts, and command-line tools to store and retrieve data in
+the Keep storage server.
+
+.. _Arvados: https://arvados.org/
+
+Installation
+------------
+
+Installing under your user account
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This method lets you install the package without root access.
+However, other users on the same system won't be able to use it.
+
+1. Run ``pip install --user arvados-python-client``.
+
+2. In your shell configuration, make sure you add ``$HOME/.local/bin``
+   to your PATH environment variable.  For example, you could add the
+   command ``PATH=$PATH:$HOME/.local/bin`` to your ``.bashrc`` file.
+
+3. Reload your shell configuration.  For example, bash users could run
+   ``source ~/.bashrc``.
+
+Installing on Debian systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. Add this Arvados repository to your sources list::
+
+     deb http://apt.arvados.org/ wheezy main
+
+2. Update your package list.
+
+3. Install the ``python-arvados-python-client`` package.
+
+Configuration
+-------------
+
+This client software needs two pieces of information to connect to
+Arvados: the DNS name of the API server, and an API authorization
+token.  You can set these in environment variables, or the file
+``$HOME/.config/arvados/settings.conf``.  `The Arvados user
+documentation
+<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes
+how to find this information in the Arvados Workbench, and install it
+on your system.
+
+Testing and Development
+-----------------------
+
+This package is one part of the Arvados source package, and it has
+integration tests to check interoperability with other Arvados
+components.  Our `hacking guide
+<https://arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_
+describes how to set up a development environment and run tests.
diff --git a/sdk/python/arvados/__init__.py b/sdk/python/arvados/__init__.py
new file mode 100644 (file)
index 0000000..4cae20d
--- /dev/null
@@ -0,0 +1,135 @@
+import gflags
+import httplib
+import httplib2
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+
+from api import *
+from collection import *
+from keep import *
+from stream import *
+import errors
+import util
+
+# Set up Arvados logging based on the user's configuration.
+# All Arvados code should log under the arvados hierarchy.
+log_handler = logging.StreamHandler()
+log_handler.setFormatter(logging.Formatter(
+        '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
+        '%Y-%m-%d %H:%M:%S'))
+logger = logging.getLogger('arvados')
+logger.addHandler(log_handler)
+logger.setLevel(logging.DEBUG if config.get('ARVADOS_DEBUG')
+                else logging.WARNING)
+
+def task_set_output(self,s):
+    api('v1').job_tasks().update(uuid=self['uuid'],
+                                 body={
+            'output':s,
+            'success':True,
+            'progress':1.0
+            }).execute()
+
+_current_task = None
+def current_task():
+    global _current_task
+    if _current_task:
+        return _current_task
+    t = api('v1').job_tasks().get(uuid=os.environ['TASK_UUID']).execute()
+    t = UserDict.UserDict(t)
+    t.set_output = types.MethodType(task_set_output, t)
+    t.tmpdir = os.environ['TASK_WORK']
+    _current_task = t
+    return t
+
+_current_job = None
+def current_job():
+    global _current_job
+    if _current_job:
+        return _current_job
+    t = api('v1').jobs().get(uuid=os.environ['JOB_UUID']).execute()
+    t = UserDict.UserDict(t)
+    t.tmpdir = os.environ['JOB_WORK']
+    _current_job = t
+    return t
+
+def getjobparam(*args):
+    return current_job()['script_parameters'].get(*args)
+
+def get_job_param_mount(*args):
+    return os.path.join(os.environ['TASK_KEEPMOUNT'], current_job()['script_parameters'].get(*args))
+
+def get_task_param_mount(*args):
+    return os.path.join(os.environ['TASK_KEEPMOUNT'], current_task()['parameters'].get(*args))
+
+class JobTask(object):
+    def __init__(self, parameters=dict(), runtime_constraints=dict()):
+        print "init jobtask %s %s" % (parameters, runtime_constraints)
+
+class job_setup:
+    @staticmethod
+    def one_task_per_input_file(if_sequence=0, and_end_task=True, input_as_path=False):
+        if if_sequence != current_task()['sequence']:
+            return
+        job_input = current_job()['script_parameters']['input']
+        cr = CollectionReader(job_input)
+        cr.normalize()
+        for s in cr.all_streams():
+            for f in s.all_files():
+                if input_as_path:
+                    task_input = os.path.join(job_input, s.name(), f.name())
+                else:
+                    task_input = f.as_manifest()
+                new_task_attrs = {
+                    'job_uuid': current_job()['uuid'],
+                    'created_by_job_task_uuid': current_task()['uuid'],
+                    'sequence': if_sequence + 1,
+                    'parameters': {
+                        'input':task_input
+                        }
+                    }
+                api('v1').job_tasks().create(body=new_task_attrs).execute()
+        if and_end_task:
+            api('v1').job_tasks().update(uuid=current_task()['uuid'],
+                                       body={'success':True}
+                                       ).execute()
+            exit(0)
+
+    @staticmethod
+    def one_task_per_input_stream(if_sequence=0, and_end_task=True):
+        if if_sequence != current_task()['sequence']:
+            return
+        job_input = current_job()['script_parameters']['input']
+        cr = CollectionReader(job_input)
+        for s in cr.all_streams():
+            task_input = s.tokens()
+            new_task_attrs = {
+                'job_uuid': current_job()['uuid'],
+                'created_by_job_task_uuid': current_task()['uuid'],
+                'sequence': if_sequence + 1,
+                'parameters': {
+                    'input':task_input
+                    }
+                }
+            api('v1').job_tasks().create(body=new_task_attrs).execute()
+        if and_end_task:
+            api('v1').job_tasks().update(uuid=current_task()['uuid'],
+                                       body={'success':True}
+                                       ).execute()
+            exit(0)
+
+
diff --git a/sdk/python/arvados/api.py b/sdk/python/arvados/api.py
new file mode 100644 (file)
index 0000000..c618fc3
--- /dev/null
@@ -0,0 +1,161 @@
+import httplib2
+import json
+import logging
+import os
+import re
+import types
+
+import apiclient
+from apiclient import discovery as apiclient_discovery
+from apiclient import errors as apiclient_errors
+import config
+import errors
+import util
+
+_logger = logging.getLogger('arvados.api')
+conncache = {}
+
+class CredentialsFromToken(object):
+    def __init__(self, api_token):
+        self.api_token = api_token
+
+    @staticmethod
+    def http_request(self, uri, **kwargs):
+        from httplib import BadStatusLine
+        if 'headers' not in kwargs:
+            kwargs['headers'] = {}
+
+        if config.get("ARVADOS_EXTERNAL_CLIENT", "") == "true":
+            kwargs['headers']['X-External-Client'] = '1'
+
+        kwargs['headers']['Authorization'] = 'OAuth2 %s' % self.arvados_api_token
+        try:
+            return self.orig_http_request(uri, **kwargs)
+        except BadStatusLine:
+            # This is how httplib tells us that it tried to reuse an
+            # existing connection but it was already closed by the
+            # server. In that case, yes, we would like to retry.
+            # Unfortunately, we are not absolutely certain that the
+            # previous call did not succeed, so this is slightly
+            # risky.
+            return self.orig_http_request(uri, **kwargs)
+    def authorize(self, http):
+        http.arvados_api_token = self.api_token
+        http.orig_http_request = http.request
+        http.request = types.MethodType(self.http_request, http)
+        return http
+
+# Monkey patch discovery._cast() so objects and arrays get serialized
+# with json.dumps() instead of str().
+_cast_orig = apiclient_discovery._cast
+def _cast_objects_too(value, schema_type):
+    global _cast_orig
+    if (type(value) != type('') and
+        (schema_type == 'object' or schema_type == 'array')):
+        return json.dumps(value)
+    else:
+        return _cast_orig(value, schema_type)
+apiclient_discovery._cast = _cast_objects_too
+
+# Convert apiclient's HttpErrors into our own API error subclass for better
+# error reporting.
+# Reassigning apiclient_errors.HttpError is not sufficient because most of the
+# apiclient submodules import the class into their own namespace.
+def _new_http_error(cls, *args, **kwargs):
+    return super(apiclient_errors.HttpError, cls).__new__(
+        errors.ApiError, *args, **kwargs)
+apiclient_errors.HttpError.__new__ = staticmethod(_new_http_error)
+
+def http_cache(data_type):
+    path = os.environ['HOME'] + '/.cache/arvados/' + data_type
+    try:
+        util.mkdir_dash_p(path)
+    except OSError:
+        path = None
+    return path
+
+def api(version=None, cache=True, host=None, token=None, insecure=False, **kwargs):
+    """Return an apiclient Resources object for an Arvados instance.
+
+    Arguments:
+    * version: A string naming the version of the Arvados API to use (for
+      example, 'v1').
+    * cache: If True (default), return an existing Resources object if
+      one already exists with the same endpoint and credentials. If
+      False, create a new one, and do not keep it in the cache (i.e.,
+      do not return it from subsequent api(cache=True) calls with
+      matching endpoint and credentials).
+    * host: The Arvados API server host (and optional :port) to connect to.
+    * token: The authentication token to send with each API call.
+    * insecure: If True, ignore SSL certificate validation errors.
+
+    Additional keyword arguments will be passed directly to
+    `apiclient_discovery.build` if a new Resource object is created.
+    If the `discoveryServiceUrl` or `http` keyword arguments are
+    missing, this function will set default values for them, based on
+    the current Arvados configuration settings.
+
+    """
+
+    if not version:
+        version = 'v1'
+        _logger.info("Using default API version. " +
+                     "Call arvados.api('%s') instead." %
+                     version)
+    if 'discoveryServiceUrl' in kwargs:
+        if host:
+            raise ValueError("both discoveryServiceUrl and host provided")
+        # Here we can't use a token from environment, config file,
+        # etc. Those probably have nothing to do with the host
+        # provided by the caller.
+        if not token:
+            raise ValueError("discoveryServiceUrl provided, but token missing")
+    elif host and token:
+        pass
+    elif not host and not token:
+        # Load from user configuration or environment
+        for x in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:
+            if x not in config.settings():
+                raise ValueError("%s is not set. Aborting." % x)
+        host = config.get('ARVADOS_API_HOST')
+        token = config.get('ARVADOS_API_TOKEN')
+        insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+    else:
+        # Caller provided one but not the other
+        if not host:
+            raise ValueError("token argument provided, but host missing.")
+        else:
+            raise ValueError("host argument provided, but token missing.")
+
+    if host:
+        # Caller wants us to build the discoveryServiceUrl
+        kwargs['discoveryServiceUrl'] = (
+            'https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' % (host,))
+
+    if cache:
+        connprofile = (version, host, token, insecure)
+        svc = conncache.get(connprofile)
+        if svc:
+            return svc
+
+    if 'http' not in kwargs:
+        http_kwargs = {}
+        # Prefer system's CA certificates (if available) over httplib2's.
+        certs_path = '/etc/ssl/certs/ca-certificates.crt'
+        if os.path.exists(certs_path):
+            http_kwargs['ca_certs'] = certs_path
+        if cache:
+            http_kwargs['cache'] = http_cache('discovery')
+        if insecure:
+            http_kwargs['disable_ssl_certificate_validation'] = True
+        kwargs['http'] = httplib2.Http(**http_kwargs)
+
+    credentials = CredentialsFromToken(api_token=token)
+    kwargs['http'] = credentials.authorize(kwargs['http'])
+
+    svc = apiclient_discovery.build('arvados', version, **kwargs)
+    svc.api_token = token
+    kwargs['http'].cache = None
+    if cache:
+        conncache[connprofile] = svc
+    return svc
diff --git a/sdk/python/arvados/arvfile.py b/sdk/python/arvados/arvfile.py
new file mode 100644 (file)
index 0000000..e8dac46
--- /dev/null
@@ -0,0 +1,29 @@
+import functools
+
+class ArvadosFileBase(object):
+    def __init__(self, name, mode):
+        self.name = name
+        self.mode = mode
+        self.closed = False
+
+    @staticmethod
+    def _before_close(orig_func):
+        @functools.wraps(orig_func)
+        def wrapper(self, *args, **kwargs):
+            if self.closed:
+                raise ValueError("I/O operation on closed stream file")
+            return orig_func(self, *args, **kwargs)
+        return wrapper
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        try:
+            self.close()
+        except Exception:
+            if exc_type is None:
+                raise
+
+    def close(self):
+        self.closed = True
diff --git a/sdk/python/arvados/collection.py b/sdk/python/arvados/collection.py
new file mode 100644 (file)
index 0000000..d530f58
--- /dev/null
@@ -0,0 +1,680 @@
+import functools
+import logging
+import os
+import re
+
+from collections import deque
+from stat import *
+
+from .arvfile import ArvadosFileBase
+from keep import *
+from .stream import StreamReader, split
+import config
+import errors
+import util
+
+_logger = logging.getLogger('arvados.collection')
+
+def normalize_stream(s, stream):
+    stream_tokens = [s]
+    sortedfiles = list(stream.keys())
+    sortedfiles.sort()
+
+    blocks = {}
+    streamoffset = 0L
+    for f in sortedfiles:
+        for b in stream[f]:
+            if b[arvados.LOCATOR] not in blocks:
+                stream_tokens.append(b[arvados.LOCATOR])
+                blocks[b[arvados.LOCATOR]] = streamoffset
+                streamoffset += b[arvados.BLOCKSIZE]
+
+    if len(stream_tokens) == 1:
+        stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)
+
+    for f in sortedfiles:
+        current_span = None
+        fout = f.replace(' ', '\\040')
+        for segment in stream[f]:
+            segmentoffset = blocks[segment[arvados.LOCATOR]] + segment[arvados.OFFSET]
+            if current_span is None:
+                current_span = [segmentoffset, segmentoffset + segment[arvados.SEGMENTSIZE]]
+            else:
+                if segmentoffset == current_span[1]:
+                    current_span[1] += segment[arvados.SEGMENTSIZE]
+                else:
+                    stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+                    current_span = [segmentoffset, segmentoffset + segment[arvados.SEGMENTSIZE]]
+
+        if current_span is not None:
+            stream_tokens.append("{0}:{1}:{2}".format(current_span[0], current_span[1] - current_span[0], fout))
+
+        if not stream[f]:
+            stream_tokens.append("0:0:{0}".format(fout))
+
+    return stream_tokens
+
+
+class CollectionBase(object):
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        pass
+
+    def _my_keep(self):
+        if self._keep_client is None:
+            self._keep_client = KeepClient(api_client=self._api_client,
+                                           num_retries=self.num_retries)
+        return self._keep_client
+
+    def stripped_manifest(self):
+        """
+        Return the manifest for the current collection with all
+        non-portable hints (i.e., permission signatures and other
+        hints other than size hints) removed from the locators.
+        """
+        raw = self.manifest_text()
+        clean = []
+        for line in raw.split("\n"):
+            fields = line.split()
+            if fields:
+                clean_fields = fields[:1] + [
+                    (re.sub(r'\+[^\d][^\+]*', '', x)
+                     if re.match(util.keep_locator_pattern, x)
+                     else x)
+                    for x in fields[1:]]
+                clean += [' '.join(clean_fields), "\n"]
+        return ''.join(clean)
+
+
+class CollectionReader(CollectionBase):
+    def __init__(self, manifest_locator_or_text, api_client=None,
+                 keep_client=None, num_retries=0):
+        """Instantiate a CollectionReader.
+
+        This class parses Collection manifests to provide a simple interface
+        to read its underlying files.
+
+        Arguments:
+        * manifest_locator_or_text: One of a Collection UUID, portable data
+          hash, or full manifest text.
+        * api_client: The API client to use to look up Collections.  If not
+          provided, CollectionReader will build one from available Arvados
+          configuration.
+        * keep_client: The KeepClient to use to download Collection data.
+          If not provided, CollectionReader will build one from available
+          Arvados configuration.
+        * num_retries: The default number of times to retry failed
+          service requests.  Default 0.  You may change this value
+          after instantiation, but note those changes may not
+          propagate to related objects like the Keep client.
+        """
+        self._api_client = api_client
+        self._keep_client = keep_client
+        self.num_retries = num_retries
+        if re.match(util.keep_locator_pattern, manifest_locator_or_text):
+            self._manifest_locator = manifest_locator_or_text
+            self._manifest_text = None
+        elif re.match(util.collection_uuid_pattern, manifest_locator_or_text):
+            self._manifest_locator = manifest_locator_or_text
+            self._manifest_text = None
+        elif re.match(util.manifest_pattern, manifest_locator_or_text):
+            self._manifest_text = manifest_locator_or_text
+            self._manifest_locator = None
+        else:
+            raise errors.ArgumentError(
+                "Argument to CollectionReader must be a manifest or a collection UUID")
+        self._api_response = None
+        self._streams = None
+
+    def _populate_from_api_server(self):
+        # As in KeepClient itself, we must wait until the last
+        # possible moment to instantiate an API client, in order to
+        # avoid tripping up clients that don't have access to an API
+        # server.  If we do build one, make sure our Keep client uses
+        # it.  If instantiation fails, we'll fall back to the except
+        # clause, just like any other Collection lookup
+        # failure. Return an exception, or None if successful.
+        try:
+            if self._api_client is None:
+                self._api_client = arvados.api('v1')
+                self._keep_client = None  # Make a new one with the new api.
+            self._api_response = self._api_client.collections().get(
+                uuid=self._manifest_locator).execute(
+                num_retries=self.num_retries)
+            self._manifest_text = self._api_response['manifest_text']
+            return None
+        except Exception as e:
+            return e
+
+    def _populate_from_keep(self):
+        # Retrieve a manifest directly from Keep. This has a chance of
+        # working if [a] the locator includes a permission signature
+        # or [b] the Keep services are operating in world-readable
+        # mode. Return an exception, or None if successful.
+        try:
+            self._manifest_text = self._my_keep().get(
+                self._manifest_locator, num_retries=self.num_retries)
+        except Exception as e:
+            return e
+
+    def _populate(self):
+        error_via_api = None
+        error_via_keep = None
+        should_try_keep = ((self._manifest_text is None) and
+                           util.keep_locator_pattern.match(
+                self._manifest_locator))
+        if ((self._manifest_text is None) and
+            util.signed_locator_pattern.match(self._manifest_locator)):
+            error_via_keep = self._populate_from_keep()
+        if self._manifest_text is None:
+            error_via_api = self._populate_from_api_server()
+            if error_via_api is not None and not should_try_keep:
+                raise error_via_api
+        if ((self._manifest_text is None) and
+            not error_via_keep and
+            should_try_keep):
+            # Looks like a keep locator, and we didn't already try keep above
+            error_via_keep = self._populate_from_keep()
+        if self._manifest_text is None:
+            # Nothing worked!
+            raise arvados.errors.NotFoundError(
+                ("Failed to retrieve collection '{}' " +
+                 "from either API server ({}) or Keep ({})."
+                 ).format(
+                    self._manifest_locator,
+                    error_via_api,
+                    error_via_keep))
+        self._streams = [sline.split()
+                         for sline in self._manifest_text.split("\n")
+                         if sline]
+
+    def _populate_first(orig_func):
+        # Decorator for methods that read actual Collection data.
+        @functools.wraps(orig_func)
+        def wrapper(self, *args, **kwargs):
+            if self._streams is None:
+                self._populate()
+            return orig_func(self, *args, **kwargs)
+        return wrapper
+
+    @_populate_first
+    def api_response(self):
+        """api_response() -> dict or None
+
+        Returns information about this Collection fetched from the API server.
+        If the Collection exists in Keep but not the API server, currently
+        returns None.  Future versions may provide a synthetic response.
+        """
+        return self._api_response
+
+    @_populate_first
+    def normalize(self):
+        # Rearrange streams
+        streams = {}
+        for s in self.all_streams():
+            for f in s.all_files():
+                streamname, filename = split(s.name() + "/" + f.name())
+                if streamname not in streams:
+                    streams[streamname] = {}
+                if filename not in streams[streamname]:
+                    streams[streamname][filename] = []
+                for r in f.segments:
+                    streams[streamname][filename].extend(s.locators_and_ranges(r[0], r[1]))
+
+        self._streams = [normalize_stream(s, streams[s])
+                         for s in sorted(streams)]
+
+        # Regenerate the manifest text based on the normalized streams
+        self._manifest_text = ''.join(
+            [StreamReader(stream, keep=self._my_keep()).manifest_text()
+             for stream in self._streams])
+
+    @_populate_first
+    def open(self, streampath, filename=None):
+        """open(streampath[, filename]) -> file-like object
+
+        Pass in the path of a file to read from the Collection, either as a
+        single string or as two separate stream name and file name arguments.
+        This method returns a file-like object to read that file.
+        """
+        if filename is None:
+            streampath, filename = split(streampath)
+        keep_client = self._my_keep()
+        for stream_s in self._streams:
+            stream = StreamReader(stream_s, keep_client,
+                                  num_retries=self.num_retries)
+            if stream.name() == streampath:
+                break
+        else:
+            raise ValueError("stream '{}' not found in Collection".
+                             format(streampath))
+        try:
+            return stream.files()[filename]
+        except KeyError:
+            raise ValueError("file '{}' not found in Collection stream '{}'".
+                             format(filename, streampath))
+
+    @_populate_first
+    def all_streams(self):
+        return [StreamReader(s, self._my_keep(), num_retries=self.num_retries)
+                for s in self._streams]
+
+    def all_files(self):
+        for s in self.all_streams():
+            for f in s.all_files():
+                yield f
+
+    @_populate_first
+    def manifest_text(self, strip=False, normalize=False):
+        if normalize:
+            cr = CollectionReader(self.manifest_text())
+            cr.normalize()
+            return cr.manifest_text(strip=strip, normalize=False)
+        elif strip:
+            return self.stripped_manifest()
+        else:
+            return self._manifest_text
+
+
+class _WriterFile(ArvadosFileBase):
+    def __init__(self, coll_writer, name):
+        super(_WriterFile, self).__init__(name, 'wb')
+        self.dest = coll_writer
+
+    def close(self):
+        super(_WriterFile, self).close()
+        self.dest.finish_current_file()
+
+    @ArvadosFileBase._before_close
+    def write(self, data):
+        self.dest.write(data)
+
+    @ArvadosFileBase._before_close
+    def writelines(self, seq):
+        for data in seq:
+            self.write(data)
+
+    @ArvadosFileBase._before_close
+    def flush(self):
+        self.dest.flush_data()
+
+
+class CollectionWriter(CollectionBase):
+    KEEP_BLOCK_SIZE = 2**26
+
+    def __init__(self, api_client=None, num_retries=0):
+        """Instantiate a CollectionWriter.
+
+        CollectionWriter lets you build a new Arvados Collection from scratch.
+        Write files to it.  The CollectionWriter will upload data to Keep as
+        appropriate, and provide you with the Collection manifest text when
+        you're finished.
+
+        Arguments:
+        * api_client: The API client to use to look up Collections.  If not
+          provided, CollectionReader will build one from available Arvados
+          configuration.
+        * num_retries: The default number of times to retry failed
+          service requests.  Default 0.  You may change this value
+          after instantiation, but note those changes may not
+          propagate to related objects like the Keep client.
+        """
+        self._api_client = api_client
+        self.num_retries = num_retries
+        self._keep_client = None
+        self._data_buffer = []
+        self._data_buffer_len = 0
+        self._current_stream_files = []
+        self._current_stream_length = 0
+        self._current_stream_locators = []
+        self._current_stream_name = '.'
+        self._current_file_name = None
+        self._current_file_pos = 0
+        self._finished_streams = []
+        self._close_file = None
+        self._queued_file = None
+        self._queued_dirents = deque()
+        self._queued_trees = deque()
+        self._last_open = None
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if exc_type is None:
+            self.finish()
+
+    def do_queued_work(self):
+        # The work queue consists of three pieces:
+        # * _queued_file: The file object we're currently writing to the
+        #   Collection.
+        # * _queued_dirents: Entries under the current directory
+        #   (_queued_trees[0]) that we want to write or recurse through.
+        #   This may contain files from subdirectories if
+        #   max_manifest_depth == 0 for this directory.
+        # * _queued_trees: Directories that should be written as separate
+        #   streams to the Collection.
+        # This function handles the smallest piece of work currently queued
+        # (current file, then current directory, then next directory) until
+        # no work remains.  The _work_THING methods each do a unit of work on
+        # THING.  _queue_THING methods add a THING to the work queue.
+        while True:
+            if self._queued_file:
+                self._work_file()
+            elif self._queued_dirents:
+                self._work_dirents()
+            elif self._queued_trees:
+                self._work_trees()
+            else:
+                break
+
+    def _work_file(self):
+        while True:
+            buf = self._queued_file.read(self.KEEP_BLOCK_SIZE)
+            if not buf:
+                break
+            self.write(buf)
+        self.finish_current_file()
+        if self._close_file:
+            self._queued_file.close()
+        self._close_file = None
+        self._queued_file = None
+
+    def _work_dirents(self):
+        path, stream_name, max_manifest_depth = self._queued_trees[0]
+        if stream_name != self.current_stream_name():
+            self.start_new_stream(stream_name)
+        while self._queued_dirents:
+            dirent = self._queued_dirents.popleft()
+            target = os.path.join(path, dirent)
+            if os.path.isdir(target):
+                self._queue_tree(target,
+                                 os.path.join(stream_name, dirent),
+                                 max_manifest_depth - 1)
+            else:
+                self._queue_file(target, dirent)
+                break
+        if not self._queued_dirents:
+            self._queued_trees.popleft()
+
+    def _work_trees(self):
+        path, stream_name, max_manifest_depth = self._queued_trees[0]
+        d = util.listdir_recursive(
+            path, max_depth = (None if max_manifest_depth == 0 else 0))
+        if d:
+            self._queue_dirents(stream_name, d)
+        else:
+            self._queued_trees.popleft()
+
+    def _queue_file(self, source, filename=None):
+        assert (self._queued_file is None), "tried to queue more than one file"
+        if not hasattr(source, 'read'):
+            source = open(source, 'rb')
+            self._close_file = True
+        else:
+            self._close_file = False
+        if filename is None:
+            filename = os.path.basename(source.name)
+        self.start_new_file(filename)
+        self._queued_file = source
+
+    def _queue_dirents(self, stream_name, dirents):
+        assert (not self._queued_dirents), "tried to queue more than one tree"
+        self._queued_dirents = deque(sorted(dirents))
+
+    def _queue_tree(self, path, stream_name, max_manifest_depth):
+        self._queued_trees.append((path, stream_name, max_manifest_depth))
+
+    def write_file(self, source, filename=None):
+        self._queue_file(source, filename)
+        self.do_queued_work()
+
+    def write_directory_tree(self,
+                             path, stream_name='.', max_manifest_depth=-1):
+        self._queue_tree(path, stream_name, max_manifest_depth)
+        self.do_queued_work()
+
+    def write(self, newdata):
+        if hasattr(newdata, '__iter__'):
+            for s in newdata:
+                self.write(s)
+            return
+        self._data_buffer.append(newdata)
+        self._data_buffer_len += len(newdata)
+        self._current_stream_length += len(newdata)
+        while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
+            self.flush_data()
+
+    def open(self, streampath, filename=None):
+        """open(streampath[, filename]) -> file-like object
+
+        Pass in the path of a file to write to the Collection, either as a
+        single string or as two separate stream name and file name arguments.
+        This method returns a file-like object you can write to add it to the
+        Collection.
+
+        You may only have one file object from the Collection open at a time,
+        so be sure to close the object when you're done.  Using the object in
+        a with statement makes that easy::
+
+          with cwriter.open('./doc/page1.txt') as outfile:
+              outfile.write(page1_data)
+          with cwriter.open('./doc/page2.txt') as outfile:
+              outfile.write(page2_data)
+        """
+        if filename is None:
+            streampath, filename = split(streampath)
+        if self._last_open and not self._last_open.closed:
+            raise errors.AssertionError(
+                "can't open '{}' when '{}' is still open".format(
+                    filename, self._last_open.name))
+        if streampath != self.current_stream_name():
+            self.start_new_stream(streampath)
+        self.set_current_file_name(filename)
+        self._last_open = _WriterFile(self, filename)
+        return self._last_open
+
+    def flush_data(self):
+        data_buffer = ''.join(self._data_buffer)
+        if data_buffer:
+            self._current_stream_locators.append(
+                self._my_keep().put(data_buffer[0:self.KEEP_BLOCK_SIZE]))
+            self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
+            self._data_buffer_len = len(self._data_buffer[0])
+
+    def start_new_file(self, newfilename=None):
+        self.finish_current_file()
+        self.set_current_file_name(newfilename)
+
+    def set_current_file_name(self, newfilename):
+        if re.search(r'[\t\n]', newfilename):
+            raise errors.AssertionError(
+                "Manifest filenames cannot contain whitespace: %s" %
+                newfilename)
+        elif re.search(r'\x00', newfilename):
+            raise errors.AssertionError(
+                "Manifest filenames cannot contain NUL characters: %s" %
+                newfilename)
+        self._current_file_name = newfilename
+
+    def current_file_name(self):
+        return self._current_file_name
+
+    def finish_current_file(self):
+        if self._current_file_name is None:
+            if self._current_file_pos == self._current_stream_length:
+                return
+            raise errors.AssertionError(
+                "Cannot finish an unnamed file " +
+                "(%d bytes at offset %d in '%s' stream)" %
+                (self._current_stream_length - self._current_file_pos,
+                 self._current_file_pos,
+                 self._current_stream_name))
+        self._current_stream_files.append([
+                self._current_file_pos,
+                self._current_stream_length - self._current_file_pos,
+                self._current_file_name])
+        self._current_file_pos = self._current_stream_length
+        self._current_file_name = None
+
+    def start_new_stream(self, newstreamname='.'):
+        self.finish_current_stream()
+        self.set_current_stream_name(newstreamname)
+
+    def set_current_stream_name(self, newstreamname):
+        if re.search(r'[\t\n]', newstreamname):
+            raise errors.AssertionError(
+                "Manifest stream names cannot contain whitespace")
+        self._current_stream_name = '.' if newstreamname=='' else newstreamname
+
+    def current_stream_name(self):
+        return self._current_stream_name
+
+    def finish_current_stream(self):
+        self.finish_current_file()
+        self.flush_data()
+        if not self._current_stream_files:
+            pass
+        elif self._current_stream_name is None:
+            raise errors.AssertionError(
+                "Cannot finish an unnamed stream (%d bytes in %d files)" %
+                (self._current_stream_length, len(self._current_stream_files)))
+        else:
+            if not self._current_stream_locators:
+                self._current_stream_locators.append(config.EMPTY_BLOCK_LOCATOR)
+            self._finished_streams.append([self._current_stream_name,
+                                           self._current_stream_locators,
+                                           self._current_stream_files])
+        self._current_stream_files = []
+        self._current_stream_length = 0
+        self._current_stream_locators = []
+        self._current_stream_name = None
+        self._current_file_pos = 0
+        self._current_file_name = None
+
+    def finish(self):
+        # Store the manifest in Keep and return its locator.
+        return self._my_keep().put(self.manifest_text())
+
+    def portable_data_hash(self):
+        stripped = self.stripped_manifest()
+        return hashlib.md5(stripped).hexdigest() + '+' + str(len(stripped))
+
+    def manifest_text(self):
+        self.finish_current_stream()
+        manifest = ''
+
+        for stream in self._finished_streams:
+            if not re.search(r'^\.(/.*)?$', stream[0]):
+                manifest += './'
+            manifest += stream[0].replace(' ', '\\040')
+            manifest += ' ' + ' '.join(stream[1])
+            manifest += ' ' + ' '.join("%d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040')) for sfile in stream[2])
+            manifest += "\n"
+
+        return manifest
+
+    def data_locators(self):
+        ret = []
+        for name, locators, files in self._finished_streams:
+            ret += locators
+        return ret
+
+
+class ResumableCollectionWriter(CollectionWriter):
+    STATE_PROPS = ['_current_stream_files', '_current_stream_length',
+                   '_current_stream_locators', '_current_stream_name',
+                   '_current_file_name', '_current_file_pos', '_close_file',
+                   '_data_buffer', '_dependencies', '_finished_streams',
+                   '_queued_dirents', '_queued_trees']
+
+    def __init__(self, api_client=None, num_retries=0):
+        self._dependencies = {}
+        super(ResumableCollectionWriter, self).__init__(
+            api_client, num_retries=num_retries)
+
+    @classmethod
+    def from_state(cls, state, *init_args, **init_kwargs):
+        # Try to build a new writer from scratch with the given state.
+        # If the state is not suitable to resume (because files have changed,
+        # been deleted, aren't predictable, etc.), raise a
+        # StaleWriterStateError.  Otherwise, return the initialized writer.
+        # The caller is responsible for calling writer.do_queued_work()
+        # appropriately after it's returned.
+        writer = cls(*init_args, **init_kwargs)
+        for attr_name in cls.STATE_PROPS:
+            attr_value = state[attr_name]
+            attr_class = getattr(writer, attr_name).__class__
+            # Coerce the value into the same type as the initial value, if
+            # needed.
+            if attr_class not in (type(None), attr_value.__class__):
+                attr_value = attr_class(attr_value)
+            setattr(writer, attr_name, attr_value)
+        # Check dependencies before we try to resume anything.
+        if any(KeepLocator(ls).permission_expired()
+               for ls in writer._current_stream_locators):
+            raise errors.StaleWriterStateError(
+                "locators include expired permission hint")
+        writer.check_dependencies()
+        if state['_current_file'] is not None:
+            path, pos = state['_current_file']
+            try:
+                writer._queued_file = open(path, 'rb')
+                writer._queued_file.seek(pos)
+            except IOError as error:
+                raise errors.StaleWriterStateError(
+                    "failed to reopen active file {}: {}".format(path, error))
+        return writer
+
+    def check_dependencies(self):
+        for path, orig_stat in self._dependencies.items():
+            if not S_ISREG(orig_stat[ST_MODE]):
+                raise errors.StaleWriterStateError("{} not file".format(path))
+            try:
+                now_stat = tuple(os.stat(path))
+            except OSError as error:
+                raise errors.StaleWriterStateError(
+                    "failed to stat {}: {}".format(path, error))
+            if ((not S_ISREG(now_stat[ST_MODE])) or
+                (orig_stat[ST_MTIME] != now_stat[ST_MTIME]) or
+                (orig_stat[ST_SIZE] != now_stat[ST_SIZE])):
+                raise errors.StaleWriterStateError("{} changed".format(path))
+
+    def dump_state(self, copy_func=lambda x: x):
+        state = {attr: copy_func(getattr(self, attr))
+                 for attr in self.STATE_PROPS}
+        if self._queued_file is None:
+            state['_current_file'] = None
+        else:
+            state['_current_file'] = (os.path.realpath(self._queued_file.name),
+                                      self._queued_file.tell())
+        return state
+
+    def _queue_file(self, source, filename=None):
+        try:
+            src_path = os.path.realpath(source)
+        except Exception:
+            raise errors.AssertionError("{} not a file path".format(source))
+        try:
+            path_stat = os.stat(src_path)
+        except OSError as stat_error:
+            path_stat = None
+        super(ResumableCollectionWriter, self)._queue_file(source, filename)
+        fd_stat = os.fstat(self._queued_file.fileno())
+        if not S_ISREG(fd_stat.st_mode):
+            # We won't be able to resume from this cache anyway, so don't
+            # worry about further checks.
+            self._dependencies[source] = tuple(fd_stat)
+        elif path_stat is None:
+            raise errors.AssertionError(
+                "could not stat {}: {}".format(source, stat_error))
+        elif path_stat.st_ino != fd_stat.st_ino:
+            raise errors.AssertionError(
+                "{} changed between open and stat calls".format(source))
+        else:
+            self._dependencies[src_path] = tuple(fd_stat)
+
+    def write(self, data):
+        if self._queued_file is None:
+            raise errors.AssertionError(
+                "resumable writer can't accept unsourced data")
+        return super(ResumableCollectionWriter, self).write(data)
diff --git a/sdk/python/arvados/commands/__init__.py b/sdk/python/arvados/commands/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/python/arvados/commands/_util.py b/sdk/python/arvados/commands/_util.py
new file mode 100644 (file)
index 0000000..c42ee7a
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+import argparse
+import errno
+import os
+
+def _pos_int(s):
+    num = int(s)
+    if num < 0:
+        raise ValueError("can't accept negative value: %s" % (num,))
+    return num
+
+retry_opt = argparse.ArgumentParser(add_help=False)
+retry_opt.add_argument('--retries', type=_pos_int, default=3, help="""
+Maximum number of times to retry server requests that encounter temporary
+failures (e.g., server down).  Default 3.""")
+
+def _ignore_error(error):
+    return None
+
+def _raise_error(error):
+    raise error
+
+def make_home_conf_dir(path, mode=None, errors='ignore'):
+    # Make the directory path under the user's home directory, making parent
+    # directories as needed.
+    # If the directory is newly created, and a mode is specified, chmod it
+    # with those permissions.
+    # If there's an error, return None if errors is 'ignore', else raise an
+    # exception.
+    error_handler = _ignore_error if (errors == 'ignore') else _raise_error
+    tilde_path = os.path.join('~', path)
+    abs_path = os.path.expanduser(tilde_path)
+    if abs_path == tilde_path:
+        return error_handler(ValueError("no home directory available"))
+    try:
+        os.makedirs(abs_path)
+    except OSError as error:
+        if error.errno != errno.EEXIST:
+            return error_handler(error)
+    else:
+        if mode is not None:
+            os.chmod(abs_path, mode)
+    return abs_path
diff --git a/sdk/python/arvados/commands/arv_copy.py b/sdk/python/arvados/commands/arv_copy.py
new file mode 100755 (executable)
index 0000000..7da23ac
--- /dev/null
@@ -0,0 +1,665 @@
+#! /usr/bin/env python
+
+# arv-copy [--recursive] [--no-recursive] object-uuid src dst
+#
+# Copies an object from Arvados instance src to instance dst.
+#
+# By default, arv-copy recursively copies any dependent objects
+# necessary to make the object functional in the new instance
+# (e.g. for a pipeline instance, arv-copy copies the pipeline
+# template, input collection, docker images, git repositories). If
+# --no-recursive is given, arv-copy copies only the single record
+# identified by object-uuid.
+#
+# The user must have files $HOME/.config/arvados/{src}.conf and
+# $HOME/.config/arvados/{dst}.conf with valid login credentials for
+# instances src and dst.  If either of these files is not found,
+# arv-copy will issue an error.
+
+import argparse
+import getpass
+import os
+import re
+import shutil
+import sys
+import logging
+import tempfile
+
+import arvados
+import arvados.config
+import arvados.keep
+import arvados.util
+import arvados.commands._util as arv_cmd
+import arvados.commands.keepdocker
+
+logger = logging.getLogger('arvados.arv-copy')
+
+# local_repo_dir records which git repositories from the Arvados source
+# instance have been checked out locally during this run, and to which
+# directories.
+# e.g. if repository 'twp' from src_arv has been cloned into
+# /tmp/gitfHkV9lu44A then local_repo_dir['twp'] = '/tmp/gitfHkV9lu44A'
+#
+local_repo_dir = {}
+
+# List of collections that have been copied in this session, and their
+# destination collection UUIDs.
+collections_copied = {}
+
+def main():
+    copy_opts = argparse.ArgumentParser(add_help=False)
+
+    copy_opts.add_argument(
+        '-v', '--verbose', dest='verbose', action='store_true',
+        help='Verbose output.')
+    copy_opts.add_argument(
+        '--progress', dest='progress', action='store_true',
+        help='Report progress on copying collections. (default)')
+    copy_opts.add_argument(
+        '--no-progress', dest='progress', action='store_false',
+        help='Do not report progress on copying collections.')
+    copy_opts.add_argument(
+        '-f', '--force', dest='force', action='store_true',
+        help='Perform copy even if the object appears to exist at the remote destination.')
+    copy_opts.add_argument(
+        '--src', dest='source_arvados', required=True,
+        help='The name of the source Arvados instance (required). May be either a pathname to a config file, or the basename of a file in $HOME/.config/arvados/instance_name.conf.')
+    copy_opts.add_argument(
+        '--dst', dest='destination_arvados', required=True,
+        help='The name of the destination Arvados instance (required). May be either a pathname to a config file, or the basename of a file in $HOME/.config/arvados/instance_name.conf.')
+    copy_opts.add_argument(
+        '--recursive', dest='recursive', action='store_true',
+        help='Recursively copy any dependencies for this object. (default)')
+    copy_opts.add_argument(
+        '--no-recursive', dest='recursive', action='store_false',
+        help='Do not copy any dependencies. NOTE: if this option is given, the copied object will need to be updated manually in order to be functional.')
+    copy_opts.add_argument(
+        '--dst-git-repo', dest='dst_git_repo',
+        help='The name of the destination git repository. Required when copying a pipeline recursively.')
+    copy_opts.add_argument(
+        '--project-uuid', dest='project_uuid',
+        help='The UUID of the project at the destination to which the pipeline should be copied.')
+    copy_opts.add_argument(
+        'object_uuid',
+        help='The UUID of the object to be copied.')
+    copy_opts.set_defaults(progress=True)
+    copy_opts.set_defaults(recursive=True)
+
+    parser = argparse.ArgumentParser(
+        description='Copy a pipeline instance, template or collection from one Arvados instance to another.',
+        parents=[copy_opts, arv_cmd.retry_opt])
+    args = parser.parse_args()
+
+    if args.verbose:
+        logger.setLevel(logging.DEBUG)
+    else:
+        logger.setLevel(logging.INFO)
+
+    # Create API clients for the source and destination instances
+    src_arv = api_for_instance(args.source_arvados)
+    dst_arv = api_for_instance(args.destination_arvados)
+
+    # Identify the kind of object we have been given, and begin copying.
+    t = uuid_type(src_arv, args.object_uuid)
+    if t == 'Collection':
+        result = copy_collection(args.object_uuid,
+                                 src_arv, dst_arv,
+                                 args)
+    elif t == 'PipelineInstance':
+        result = copy_pipeline_instance(args.object_uuid,
+                                        src_arv, dst_arv,
+                                        args)
+    elif t == 'PipelineTemplate':
+        result = copy_pipeline_template(args.object_uuid,
+                                        src_arv, dst_arv, args)
+    else:
+        abort("cannot copy object {} of type {}".format(args.object_uuid, t))
+
+    # Clean up any outstanding temp git repositories.
+    for d in local_repo_dir.values():
+        shutil.rmtree(d, ignore_errors=True)
+
+    # If no exception was thrown and the response does not have an
+    # error_token field, presume success
+    if 'error_token' in result or 'uuid' not in result:
+        logger.error("API server returned an error result: {}".format(result))
+        exit(1)
+
+    logger.info("")
+    logger.info("Success: created copy with uuid {}".format(result['uuid']))
+    exit(0)
+
+# api_for_instance(instance_name)
+#
+#     Creates an API client for the Arvados instance identified by
+#     instance_name.
+#
+#     If instance_name contains a slash, it is presumed to be a path
+#     (either local or absolute) to a file with Arvados configuration
+#     settings.
+#
+#     Otherwise, it is presumed to be the name of a file in
+#     $HOME/.config/arvados/instance_name.conf
+#
+def api_for_instance(instance_name):
+    if '/' in instance_name:
+        config_file = instance_name
+    else:
+        config_file = os.path.join(os.environ['HOME'], '.config', 'arvados', "{}.conf".format(instance_name))
+
+    try:
+        cfg = arvados.config.load(config_file)
+    except (IOError, OSError) as e:
+        abort(("Could not open config file {}: {}\n" +
+               "You must make sure that your configuration tokens\n" +
+               "for Arvados instance {} are in {} and that this\n" +
+               "file is readable.").format(
+                   config_file, e, instance_name, config_file))
+
+    if 'ARVADOS_API_HOST' in cfg and 'ARVADOS_API_TOKEN' in cfg:
+        api_is_insecure = (
+            cfg.get('ARVADOS_API_HOST_INSECURE', '').lower() in set(
+                ['1', 't', 'true', 'y', 'yes']))
+        client = arvados.api('v1',
+                             host=cfg['ARVADOS_API_HOST'],
+                             token=cfg['ARVADOS_API_TOKEN'],
+                             insecure=api_is_insecure,
+                             cache=False)
+    else:
+        abort('need ARVADOS_API_HOST and ARVADOS_API_TOKEN for {}'.format(instance_name))
+    return client
+
+# copy_pipeline_instance(pi_uuid, src, dst, args)
+#
+#    Copies a pipeline instance identified by pi_uuid from src to dst.
+#
+#    If the args.recursive option is set:
+#      1. Copies all input collections
+#           * For each component in the pipeline, include all collections
+#             listed as job dependencies for that component)
+#      2. Copy docker images
+#      3. Copy git repositories
+#      4. Copy the pipeline template
+#
+#    The only changes made to the copied pipeline instance are:
+#      1. The original pipeline instance UUID is preserved in
+#         the 'properties' hash as 'copied_from_pipeline_instance_uuid'.
+#      2. The pipeline_template_uuid is changed to the new template uuid.
+#      3. The owner_uuid of the instance is changed to the user who
+#         copied it.
+#
+def copy_pipeline_instance(pi_uuid, src, dst, args):
+    # Fetch the pipeline instance record.
+    pi = src.pipeline_instances().get(uuid=pi_uuid).execute()
+
+    if args.recursive:
+        if not args.dst_git_repo:
+            abort('--dst-git-repo is required when copying a pipeline recursively.')
+        # Copy the pipeline template and save the copied template.
+        if pi.get('pipeline_template_uuid', None):
+            pt = copy_pipeline_template(pi['pipeline_template_uuid'],
+                                        src, dst, args)
+
+        # Copy input collections, docker images and git repos.
+        pi = copy_collections(pi, src, dst, args)
+        copy_git_repos(pi, src, dst, args.dst_git_repo)
+        copy_docker_images(pi, src, dst, args)
+
+        # Update the fields of the pipeline instance with the copied
+        # pipeline template.
+        if pi.get('pipeline_template_uuid', None):
+            pi['pipeline_template_uuid'] = pt['uuid']
+
+    else:
+        # not recursive
+        logger.info("Copying only pipeline instance %s.", pi_uuid)
+        logger.info("You are responsible for making sure all pipeline dependencies have been updated.")
+
+    # Update the pipeline instance properties, and create the new
+    # instance at dst.
+    pi['properties']['copied_from_pipeline_instance_uuid'] = pi_uuid
+    pi['description'] = "Pipeline copied from {}\n\n{}".format(
+        pi_uuid,
+        pi['description'] if pi.get('description', None) else '')
+    if args.project_uuid:
+        pi['owner_uuid'] = args.project_uuid
+    else:
+        del pi['owner_uuid']
+    del pi['uuid']
+
+    new_pi = dst.pipeline_instances().create(body=pi, ensure_unique_name=True).execute()
+    return new_pi
+
+# copy_pipeline_template(pt_uuid, src, dst, args)
+#
+#    Copies a pipeline template identified by pt_uuid from src to dst.
+#
+#    If args.recursive is True, also copy any collections, docker
+#    images and git repositories that this template references.
+#
+#    The owner_uuid of the new template is changed to that of the user
+#    who copied the template.
+#
+#    Returns the copied pipeline template object.
+#
+def copy_pipeline_template(pt_uuid, src, dst, args):
+    # fetch the pipeline template from the source instance
+    pt = src.pipeline_templates().get(uuid=pt_uuid).execute()
+
+    if args.recursive:
+        if not args.dst_git_repo:
+            abort('--dst-git-repo is required when copying a pipeline recursively.')
+        # Copy input collections, docker images and git repos.
+        pt = copy_collections(pt, src, dst, args)
+        copy_git_repos(pt, src, dst, args.dst_git_repo)
+        copy_docker_images(pt, src, dst, args)
+
+    pt['description'] = "Pipeline template copied from {}\n\n{}".format(
+        pt_uuid,
+        pt['description'] if pt.get('description', None) else '')
+    pt['name'] = "{} copied from {}".format(pt.get('name', ''), pt_uuid)
+    del pt['uuid']
+    del pt['owner_uuid']
+
+    return dst.pipeline_templates().create(body=pt, ensure_unique_name=True).execute()
+
+# copy_collections(obj, src, dst, args)
+#
+#    Recursively copies all collections referenced by 'obj' from src
+#    to dst.  obj may be a dict or a list, in which case we run
+#    copy_collections on every value it contains. If it is a string,
+#    search it for any substring that matches a collection hash or uuid
+#    (this will find hidden references to collections like
+#      "input0": "$(file 3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq)")
+#
+#    Returns a copy of obj with any old collection uuids replaced by
+#    the new ones.
+#
+def copy_collections(obj, src, dst, args):
+
+    def copy_collection_fn(collection_match):
+        """Helper function for regex substitution: copies a single collection,
+        identified by the collection_match MatchObject, to the
+        destination.  Returns the destination collection uuid (or the
+        portable data hash if that's what src_id is).
+
+        """
+        src_id = collection_match.group(0)
+        if src_id not in collections_copied:
+            dst_col = copy_collection(src_id, src, dst, args)
+            if src_id in [dst_col['uuid'], dst_col['portable_data_hash']]:
+                collections_copied[src_id] = src_id
+            else:
+                collections_copied[src_id] = dst_col['uuid']
+        return collections_copied[src_id]
+
+    if isinstance(obj, basestring):
+        # Copy any collections identified in this string to dst, replacing
+        # them with the dst uuids as necessary.
+        obj = arvados.util.portable_data_hash_pattern.sub(copy_collection_fn, obj)
+        obj = arvados.util.collection_uuid_pattern.sub(copy_collection_fn, obj)
+        return obj
+    elif type(obj) == dict:
+        return {v: copy_collections(obj[v], src, dst, args) for v in obj}
+    elif type(obj) == list:
+        return [copy_collections(v, src, dst, args) for v in obj]
+    return obj
+
+# copy_git_repos(p, src, dst, dst_repo)
+#
+#    Copies all git repositories referenced by pipeline instance or
+#    template 'p' from src to dst.
+#
+#    For each component c in the pipeline:
+#      * Copy git repositories named in c['repository'] and c['job']['repository'] if present
+#      * Rename script versions:
+#          * c['script_version']
+#          * c['job']['script_version']
+#          * c['job']['supplied_script_version']
+#        to the commit hashes they resolve to, since any symbolic
+#        names (tags, branches) are not preserved in the destination repo.
+#
+#    The pipeline object is updated in place with the new repository
+#    names.  The return value is undefined.
+#
+def copy_git_repos(p, src, dst, dst_repo):
+    copied = set()
+    for c in p['components']:
+        component = p['components'][c]
+        if 'repository' in component:
+            repo = component['repository']
+            script_version = component.get('script_version', None)
+            if repo not in copied:
+                copy_git_repo(repo, src, dst, dst_repo, script_version)
+                copied.add(repo)
+            component['repository'] = dst_repo
+            if script_version:
+                repo_dir = local_repo_dir[repo]
+                component['script_version'] = git_rev_parse(script_version, repo_dir)
+        if 'job' in component:
+            j = component['job']
+            if 'repository' in j:
+                repo = j['repository']
+                script_version = j.get('script_version', None)
+                if repo not in copied:
+                    copy_git_repo(repo, src, dst, dst_repo, script_version)
+                    copied.add(repo)
+                j['repository'] = dst_repo
+                repo_dir = local_repo_dir[repo]
+                if script_version:
+                    j['script_version'] = git_rev_parse(script_version, repo_dir)
+                if 'supplied_script_version' in j:
+                    j['supplied_script_version'] = git_rev_parse(j['supplied_script_version'], repo_dir)
+
+def total_collection_size(manifest_text):
+    """Return the total number of bytes in this collection (excluding
+    duplicate blocks)."""
+
+    total_bytes = 0
+    locators_seen = {}
+    for line in manifest_text.splitlines():
+        words = line.split()
+        for word in words[1:]:
+            try:
+                loc = arvados.KeepLocator(word)
+            except ValueError:
+                continue  # this word isn't a locator, skip it
+            if loc.md5sum not in locators_seen:
+                locators_seen[loc.md5sum] = True
+                total_bytes += loc.size
+
+    return total_bytes
+
+# copy_collection(obj_uuid, src, dst, args)
+#
+#    Copies the collection identified by obj_uuid from src to dst.
+#    Returns the collection object created at dst.
+#
+#    If args.progress is True, produce a human-friendly progress
+#    report.
+#
+#    If a collection with the desired portable_data_hash already
+#    exists at dst, and args.force is False, copy_collection returns
+#    the existing collection without copying any blocks.  Otherwise
+#    (if no collection exists or if args.force is True)
+#    copy_collection copies all of the collection data blocks from src
+#    to dst.
+#
+#    For this application, it is critical to preserve the
+#    collection's manifest hash, which is not guaranteed with the
+#    arvados.CollectionReader and arvados.CollectionWriter classes.
+#    Copying each block in the collection manually, followed by
+#    the manifest block, ensures that the collection's manifest
+#    hash will not change.
+#
+def copy_collection(obj_uuid, src, dst, args):
+    c = src.collections().get(uuid=obj_uuid).execute()
+
+    # If a collection with this hash already exists at the
+    # destination, and 'force' is not true, just return that
+    # collection.
+    if not args.force:
+        if 'portable_data_hash' in c:
+            colhash = c['portable_data_hash']
+        else:
+            colhash = c['uuid']
+        dstcol = dst.collections().list(
+            filters=[['portable_data_hash', '=', colhash]]
+        ).execute()
+        if dstcol['items_available'] > 0:
+            logger.debug("Skipping collection %s (already at dst)", obj_uuid)
+            return dstcol['items'][0]
+
+    # Fetch the collection's manifest.
+    manifest = c['manifest_text']
+    logger.debug("Copying collection %s with manifest: <%s>", obj_uuid, manifest)
+
+    # Copy each block from src_keep to dst_keep.
+    # Use the newly signed locators returned from dst_keep to build
+    # a new manifest as we go.
+    src_keep = arvados.keep.KeepClient(api_client=src, num_retries=args.retries)
+    dst_keep = arvados.keep.KeepClient(api_client=dst, num_retries=args.retries)
+    dst_manifest = ""
+    dst_locators = {}
+    bytes_written = 0
+    bytes_expected = total_collection_size(manifest)
+    if args.progress:
+        progress_writer = ProgressWriter(human_progress)
+    else:
+        progress_writer = None
+
+    for line in manifest.splitlines(True):
+        words = line.split()
+        dst_manifest_line = words[0]
+        for word in words[1:]:
+            try:
+                loc = arvados.KeepLocator(word)
+                blockhash = loc.md5sum
+                # copy this block if we haven't seen it before
+                # (otherwise, just reuse the existing dst_locator)
+                if blockhash not in dst_locators:
+                    logger.debug("Copying block %s (%s bytes)", blockhash, loc.size)
+                    if progress_writer:
+                        progress_writer.report(obj_uuid, bytes_written, bytes_expected)
+                    data = src_keep.get(word)
+                    dst_locator = dst_keep.put(data)
+                    dst_locators[blockhash] = dst_locator
+                    bytes_written += loc.size
+                dst_manifest_line += ' ' + dst_locators[blockhash]
+            except ValueError:
+                # If 'word' can't be parsed as a locator,
+                # presume it's a filename.
+                dst_manifest_line += ' ' + word
+        dst_manifest += dst_manifest_line
+        if line.endswith("\n"):
+            dst_manifest += "\n"
+
+    if progress_writer:
+        progress_writer.report(obj_uuid, bytes_written, bytes_expected)
+        progress_writer.finish()
+
+    # Copy the manifest and save the collection.
+    logger.debug('saving %s with manifest: <%s>', obj_uuid, dst_manifest)
+    dst_keep.put(dst_manifest)
+
+    if 'uuid' in c:
+        del c['uuid']
+    if 'owner_uuid' in c:
+        del c['owner_uuid']
+    c['manifest_text'] = dst_manifest
+    return dst.collections().create(body=c, ensure_unique_name=True).execute()
+
+# copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version)
+#
+#    Copies commits from git repository 'src_git_repo' on Arvados
+#    instance 'src' to 'dst_git_repo' on 'dst'.  Both src_git_repo
+#    and dst_git_repo are repository names, not UUIDs (i.e. "arvados"
+#    or "jsmith")
+#
+#    All commits will be copied to a destination branch named for the
+#    source repository URL.
+#
+#    Because users cannot create their own repositories, the
+#    destination repository must already exist.
+#
+#    The user running this command must be authenticated
+#    to both repositories.
+#
+def copy_git_repo(src_git_repo, src, dst, dst_git_repo, script_version):
+    # Identify the fetch and push URLs for the git repositories.
+    r = src.repositories().list(
+        filters=[['name', '=', src_git_repo]]).execute()
+    if r['items_available'] != 1:
+        raise Exception('cannot identify source repo {}; {} repos found'
+                        .format(src_git_repo, r['items_available']))
+    src_git_url = r['items'][0]['fetch_url']
+    logger.debug('src_git_url: {}'.format(src_git_url))
+
+    r = dst.repositories().list(
+        filters=[['name', '=', dst_git_repo]]).execute()
+    if r['items_available'] != 1:
+        raise Exception('cannot identify destination repo {}; {} repos found'
+                        .format(dst_git_repo, r['items_available']))
+    dst_git_push_url  = r['items'][0]['push_url']
+    logger.debug('dst_git_push_url: {}'.format(dst_git_push_url))
+
+    # script_version is the "script_version" parameter from the source
+    # component or job.  It is used here to tie the destination branch
+    # to the commit that was used on the source.  If no script_version
+    # was supplied in the component or job, it is a mistake in the pipeline,
+    # but for the purposes of copying the repository, default to "master".
+    #
+    if not script_version:
+        script_version = "master"
+
+    dst_branch = re.sub(r'\W+', '_', "{}_{}".format(src_git_url, script_version))
+
+    # Copy git commits from src repo to dst repo (but only if
+    # we have not already copied this repo in this session).
+    #
+    if src_git_repo in local_repo_dir:
+        logger.debug('already copied src repo %s, skipping', src_git_repo)
+    else:
+        tmprepo = tempfile.mkdtemp()
+        local_repo_dir[src_git_repo] = tmprepo
+        arvados.util.run_command(
+            ["git", "clone", "--bare", src_git_url, tmprepo],
+            cwd=os.path.dirname(tmprepo))
+        arvados.util.run_command(
+            ["git", "branch", dst_branch, script_version],
+            cwd=tmprepo)
+        arvados.util.run_command(["git", "remote", "add", "dst", dst_git_push_url], cwd=tmprepo)
+        arvados.util.run_command(["git", "push", "dst", dst_branch], cwd=tmprepo)
+
+
+def copy_docker_images(pipeline, src, dst, args):
+    """Copy any docker images named in the pipeline components'
+    runtime_constraints field from src to dst."""
+
+    logger.debug('copy_docker_images: {}'.format(pipeline['uuid']))
+    for c_name, c_info in pipeline['components'].iteritems():
+        if ('runtime_constraints' in c_info and
+            'docker_image' in c_info['runtime_constraints']):
+            copy_docker_image(
+                c_info['runtime_constraints']['docker_image'],
+                c_info['runtime_constraints'].get('docker_image_tag', 'latest'),
+                src, dst, args)
+
+
+def copy_docker_image(docker_image, docker_image_tag, src, dst, args):
+    """Copy the docker image identified by docker_image and
+    docker_image_tag from src to dst. Create appropriate
+    docker_image_repo+tag and docker_image_hash links at dst.
+
+    """
+
+    logger.debug('copying docker image {}:{}'.format(docker_image, docker_image_tag))
+
+    # Find the link identifying this docker image.
+    docker_image_list = arvados.commands.keepdocker.list_images_in_arv(
+        src, args.retries, docker_image, docker_image_tag)
+    image_uuid, image_info = docker_image_list[0]
+    logger.debug('copying collection {} {}'.format(image_uuid, image_info))
+
+    # Copy the collection it refers to.
+    dst_image_col = copy_collection(image_uuid, src, dst, args)
+
+    # Create docker_image_repo+tag and docker_image_hash links
+    # at the destination.
+    lk = dst.links().create(
+        body={
+            'head_uuid': dst_image_col['uuid'],
+            'link_class': 'docker_image_repo+tag',
+            'name': "{}:{}".format(docker_image, docker_image_tag),
+        }
+    ).execute(num_retries=args.retries)
+    logger.debug('created dst link {}'.format(lk))
+
+    lk = dst.links().create(
+        body={
+            'head_uuid': dst_image_col['uuid'],
+            'link_class': 'docker_image_hash',
+            'name': dst_image_col['portable_data_hash'],
+        }
+    ).execute(num_retries=args.retries)
+    logger.debug('created dst link {}'.format(lk))
+
+
+# git_rev_parse(rev, repo)
+#
+#    Returns the 40-character commit hash corresponding to 'rev' in
+#    git repository 'repo' (which must be the path of a local git
+#    repository)
+#
+def git_rev_parse(rev, repo):
+    gitout, giterr = arvados.util.run_command(
+        ['git', 'rev-parse', rev], cwd=repo)
+    return gitout.strip()
+
+# uuid_type(api, object_uuid)
+#
+#    Returns the name of the class that object_uuid belongs to, based on
+#    the second field of the uuid.  This function consults the api's
+#    schema to identify the object class.
+#
+#    It returns a string such as 'Collection', 'PipelineInstance', etc.
+#
+#    Special case: if handed a Keep locator hash, return 'Collection'.
+#
+def uuid_type(api, object_uuid):
+    if re.match(r'^[a-f0-9]{32}\+[0-9]+(\+[A-Za-z0-9+-]+)?$', object_uuid):
+        return 'Collection'
+    p = object_uuid.split('-')
+    if len(p) == 3:
+        type_prefix = p[1]
+        for k in api._schema.schemas:
+            obj_class = api._schema.schemas[k].get('uuidPrefix', None)
+            if type_prefix == obj_class:
+                return k
+    return None
+
+def abort(msg, code=1):
+    logger.info("arv-copy: %s", msg)
+    exit(code)
+
+
+# Code for reporting on the progress of a collection upload.
+# Stolen from arvados.commands.put.ArvPutCollectionWriter
+# TODO(twp): figure out how to refactor into a shared library
+# (may involve refactoring some arvados.commands.arv_copy.copy_collection
+# code)
+
+def machine_progress(obj_uuid, bytes_written, bytes_expected):
+    return "{} {}: {} {} written {} total\n".format(
+        sys.argv[0],
+        os.getpid(),
+        obj_uuid,
+        bytes_written,
+        -1 if (bytes_expected is None) else bytes_expected)
+
+def human_progress(obj_uuid, bytes_written, bytes_expected):
+    if bytes_expected:
+        return "\r{}: {}M / {}M {:.1%} ".format(
+            obj_uuid,
+            bytes_written >> 20, bytes_expected >> 20,
+            float(bytes_written) / bytes_expected)
+    else:
+        return "\r{}: {} ".format(obj_uuid, bytes_written)
+
+class ProgressWriter(object):
+    _progress_func = None
+    outfile = sys.stderr
+
+    def __init__(self, progress_func):
+        self._progress_func = progress_func
+
+    def report(self, obj_uuid, bytes_written, bytes_expected):
+        if self._progress_func is not None:
+            self.outfile.write(
+                self._progress_func(obj_uuid, bytes_written, bytes_expected))
+
+    def finish(self):
+        self.outfile.write("\n")
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/keepdocker.py b/sdk/python/arvados/commands/keepdocker.py
new file mode 100644 (file)
index 0000000..933fd77
--- /dev/null
@@ -0,0 +1,354 @@
+#!/usr/bin/env python
+
+import argparse
+import datetime
+import errno
+import json
+import os
+import subprocess
+import sys
+import tarfile
+import tempfile
+import _strptime
+
+from collections import namedtuple
+from stat import *
+
+import arvados
+import arvados.commands._util as arv_cmd
+import arvados.commands.put as arv_put
+
+STAT_CACHE_ERRORS = (IOError, OSError, ValueError)
+
+DockerImage = namedtuple('DockerImage',
+                         ['repo', 'tag', 'hash', 'created', 'vsize'])
+
+keepdocker_parser = argparse.ArgumentParser(add_help=False)
+keepdocker_parser.add_argument(
+    '-f', '--force', action='store_true', default=False,
+    help="Re-upload the image even if it already exists on the server")
+
+_group = keepdocker_parser.add_mutually_exclusive_group()
+_group.add_argument(
+    '--pull', action='store_true', default=False,
+    help="Try to pull the latest image from Docker registry")
+_group.add_argument(
+    '--no-pull', action='store_false', dest='pull',
+    help="Use locally installed image only, don't pull image from Docker registry (default)")
+
+keepdocker_parser.add_argument(
+    'image', nargs='?',
+    help="Docker image to upload, as a repository name or hash")
+keepdocker_parser.add_argument(
+    'tag', nargs='?', default='latest',
+    help="Tag of the Docker image to upload (default 'latest')")
+
+# Combine keepdocker options listed above with run_opts options of arv-put.
+# The options inherited from arv-put include --name, --project-uuid,
+# --progress/--no-progress/--batch-progress and --resume/--no-resume.
+arg_parser = argparse.ArgumentParser(
+        description="Upload or list Docker images in Arvados",
+        parents=[keepdocker_parser, arv_put.run_opts, arv_cmd.retry_opt])
+
+class DockerError(Exception):
+    pass
+
+
+def popen_docker(cmd, *args, **kwargs):
+    manage_stdin = ('stdin' not in kwargs)
+    kwargs.setdefault('stdin', subprocess.PIPE)
+    kwargs.setdefault('stdout', sys.stderr)
+    try:
+        docker_proc = subprocess.Popen(['docker.io'] + cmd, *args, **kwargs)
+    except OSError:  # No docker.io in $PATH
+        docker_proc = subprocess.Popen(['docker'] + cmd, *args, **kwargs)
+    if manage_stdin:
+        docker_proc.stdin.close()
+    return docker_proc
+
+def check_docker(proc, description):
+    proc.wait()
+    if proc.returncode != 0:
+        raise DockerError("docker {} returned status code {}".
+                          format(description, proc.returncode))
+
+def docker_images():
+    # Yield a DockerImage tuple for each installed image.
+    list_proc = popen_docker(['images', '--no-trunc'], stdout=subprocess.PIPE)
+    list_output = iter(list_proc.stdout)
+    next(list_output)  # Ignore the header line
+    for line in list_output:
+        words = line.split()
+        size_index = len(words) - 2
+        repo, tag, imageid = words[:3]
+        ctime = ' '.join(words[3:size_index])
+        vsize = ' '.join(words[size_index:])
+        yield DockerImage(repo, tag, imageid, ctime, vsize)
+    list_proc.stdout.close()
+    check_docker(list_proc, "images")
+
+def find_image_hashes(image_search, image_tag=None):
+    # Given one argument, search for Docker images with matching hashes,
+    # and return their full hashes in a set.
+    # Given two arguments, also search for a Docker image with the
+    # same repository and tag.  If one is found, return its hash in a
+    # set; otherwise, fall back to the one-argument hash search.
+    # Returns None if no match is found, or a hash search is ambiguous.
+    hash_search = image_search.lower()
+    hash_matches = set()
+    for image in docker_images():
+        if (image.repo == image_search) and (image.tag == image_tag):
+            return set([image.hash])
+        elif image.hash.startswith(hash_search):
+            hash_matches.add(image.hash)
+    return hash_matches
+
+def find_one_image_hash(image_search, image_tag=None):
+    hashes = find_image_hashes(image_search, image_tag)
+    hash_count = len(hashes)
+    if hash_count == 1:
+        return hashes.pop()
+    elif hash_count == 0:
+        raise DockerError("no matching image found")
+    else:
+        raise DockerError("{} images match {}".format(hash_count, image_search))
+
+def stat_cache_name(image_file):
+    return getattr(image_file, 'name', image_file) + '.stat'
+
+def pull_image(image_name, image_tag):
+    check_docker(popen_docker(['pull', '{}:{}'.format(image_name, image_tag)]),
+                 "pull")
+
+def save_image(image_hash, image_file):
+    # Save the specified Docker image to image_file, then try to save its
+    # stats so we can try to resume after interruption.
+    check_docker(popen_docker(['save', image_hash], stdout=image_file),
+                 "save")
+    image_file.flush()
+    try:
+        with open(stat_cache_name(image_file), 'w') as statfile:
+            json.dump(tuple(os.fstat(image_file.fileno())), statfile)
+    except STAT_CACHE_ERRORS:
+        pass  # We won't resume from this cache.  No big deal.
+
+def prep_image_file(filename):
+    # Return a file object ready to save a Docker image,
+    # and a boolean indicating whether or not we need to actually save the
+    # image (False if a cached save is available).
+    cache_dir = arv_cmd.make_home_conf_dir(
+        os.path.join('.cache', 'arvados', 'docker'), 0o700)
+    if cache_dir is None:
+        image_file = tempfile.NamedTemporaryFile(suffix='.tar')
+        need_save = True
+    else:
+        file_path = os.path.join(cache_dir, filename)
+        try:
+            with open(stat_cache_name(file_path)) as statfile:
+                prev_stat = json.load(statfile)
+            now_stat = os.stat(file_path)
+            need_save = any(prev_stat[field] != now_stat[field]
+                            for field in [ST_MTIME, ST_SIZE])
+        except STAT_CACHE_ERRORS + (AttributeError, IndexError):
+            need_save = True  # We couldn't compare against old stats
+        image_file = open(file_path, 'w+b' if need_save else 'rb')
+    return image_file, need_save
+
+def make_link(api_client, num_retries, link_class, link_name, **link_attrs):
+    link_attrs.update({'link_class': link_class, 'name': link_name})
+    return api_client.links().create(body=link_attrs).execute(
+        num_retries=num_retries)
+
+def ptimestamp(t):
+    s = t.split(".")
+    if len(s) == 2:
+        t = s[0] + s[1][-1:]
+    return datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%SZ")
+
+def list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None):
+    """List all Docker images known to the api_client with image_name and
+    image_tag.  If no image_name is given, defaults to listing all
+    Docker images.
+
+    Returns a list of tuples representing matching Docker images,
+    sorted in preference order (i.e. the first collection in the list
+    is the one that the API server would use). Each tuple is a
+    (collection_uuid, collection_info) pair, where collection_info is
+    a dict with fields "dockerhash", "repo", "tag", and "timestamp".
+
+    """
+    docker_image_filters = [['link_class', 'in', ['docker_image_hash', 'docker_image_repo+tag']]]
+    if image_name:
+        image_link_name = "{}:{}".format(image_name, image_tag or 'latest')
+        docker_image_filters.append(['name', '=', image_link_name])
+
+    existing_links = api_client.links().list(
+        filters=docker_image_filters
+        ).execute(num_retries=num_retries)['items']
+    images = {}
+    for link in existing_links:
+        collection_uuid = link["head_uuid"]
+        if collection_uuid not in images:
+            images[collection_uuid]= {"dockerhash": "<none>",
+                      "repo":"<none>",
+                      "tag":"<none>",
+                      "timestamp": ptimestamp("1970-01-01T00:00:01Z")}
+
+        if link["link_class"] == "docker_image_hash":
+            images[collection_uuid]["dockerhash"] = link["name"]
+
+        if link["link_class"] == "docker_image_repo+tag":
+            r = link["name"].split(":")
+            images[collection_uuid]["repo"] = r[0]
+            if len(r) > 1:
+                images[collection_uuid]["tag"] = r[1]
+
+        if "image_timestamp" in link["properties"]:
+            images[collection_uuid]["timestamp"] = ptimestamp(link["properties"]["image_timestamp"])
+        else:
+            images[collection_uuid]["timestamp"] = ptimestamp(link["created_at"])
+
+    return sorted(images.items(), lambda a, b: cmp(b[1]["timestamp"], a[1]["timestamp"]))
+
+
+def main(arguments=None):
+    args = arg_parser.parse_args(arguments)
+    api = arvados.api('v1')
+
+    if args.image is None or args.image == 'images':
+        fmt = "{:30}  {:10}  {:12}  {:29}  {:20}"
+        print fmt.format("REPOSITORY", "TAG", "IMAGE ID", "COLLECTION", "CREATED")
+        for i, j in list_images_in_arv(api, args.retries):
+            print(fmt.format(j["repo"], j["tag"], j["dockerhash"][0:12], i, j["timestamp"].strftime("%c")))
+        sys.exit(0)
+
+    # Pull the image if requested, unless the image is specified as a hash
+    # that we already have.
+    if args.pull and not find_image_hashes(args.image):
+        pull_image(args.image, args.tag)
+
+    try:
+        image_hash = find_one_image_hash(args.image, args.tag)
+    except DockerError as error:
+        print >>sys.stderr, "arv-keepdocker:", error.message
+        sys.exit(1)
+
+    image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None
+
+    if args.name is None:
+        if image_repo_tag:
+            collection_name = 'Docker image {} {}'.format(image_repo_tag, image_hash[0:12])
+        else:
+            collection_name = 'Docker image {}'.format(image_hash[0:12])
+    else:
+        collection_name = args.name
+
+    if not args.force:
+        # Check if this image is already in Arvados.
+
+        # Project where everything should be owned
+        if args.project_uuid:
+            parent_project_uuid = args.project_uuid
+        else:
+            parent_project_uuid = api.users().current().execute(
+                num_retries=args.retries)['uuid']
+
+        # Find image hash tags
+        existing_links = api.links().list(
+            filters=[['link_class', '=', 'docker_image_hash'],
+                     ['name', '=', image_hash]]
+            ).execute(num_retries=args.retries)['items']
+        if existing_links:
+            # get readable collections
+            collections = api.collections().list(
+                filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]],
+                select=["uuid", "owner_uuid", "name", "manifest_text"]
+                ).execute(num_retries=args.retries)['items']
+
+            if collections:
+                # check for repo+tag links on these collections
+                existing_repo_tag = (api.links().list(
+                    filters=[['link_class', '=', 'docker_image_repo+tag'],
+                             ['name', '=', image_repo_tag],
+                             ['head_uuid', 'in', collections]]
+                    ).execute(num_retries=args.retries)['items']) if image_repo_tag else []
+
+                # Filter on elements owned by the parent project
+                owned_col = [c for c in collections if c['owner_uuid'] == parent_project_uuid]
+                owned_img = [c for c in existing_links if c['owner_uuid'] == parent_project_uuid]
+                owned_rep = [c for c in existing_repo_tag if c['owner_uuid'] == parent_project_uuid]
+
+                if owned_col:
+                    # already have a collection owned by this project
+                    coll_uuid = owned_col[0]['uuid']
+                else:
+                    # create new collection owned by the project
+                    coll_uuid = api.collections().create(
+                        body={"manifest_text": collections[0]['manifest_text'],
+                              "name": collection_name,
+                              "owner_uuid": parent_project_uuid},
+                        ensure_unique_name=True
+                        ).execute(num_retries=args.retries)['uuid']
+
+                link_base = {'owner_uuid': parent_project_uuid,
+                             'head_uuid':  coll_uuid }
+
+                if not owned_img:
+                    # create image link owned by the project
+                    make_link(api, args.retries,
+                              'docker_image_hash', image_hash, **link_base)
+
+                if not owned_rep and image_repo_tag:
+                    # create repo+tag link owned by the project
+                    make_link(api, args.retries, 'docker_image_repo+tag',
+                              image_repo_tag, **link_base)
+
+                print(coll_uuid)
+
+                sys.exit(0)
+
+    # Open a file for the saved image, and write it if needed.
+    outfile_name = '{}.tar'.format(image_hash)
+    image_file, need_save = prep_image_file(outfile_name)
+    if need_save:
+        save_image(image_hash, image_file)
+
+    # Call arv-put with switches we inherited from it
+    # (a.k.a., switches that aren't our own).
+    put_args = keepdocker_parser.parse_known_args(arguments)[1]
+
+    if args.name is None:
+        put_args += ['--name', collection_name]
+
+    coll_uuid = arv_put.main(
+        put_args + ['--filename', outfile_name, image_file.name]).strip()
+
+    # Read the image metadata and make Arvados links from it.
+    image_file.seek(0)
+    image_tar = tarfile.open(fileobj=image_file)
+    json_file = image_tar.extractfile(image_tar.getmember(image_hash + '/json'))
+    image_metadata = json.load(json_file)
+    json_file.close()
+    image_tar.close()
+    link_base = {'head_uuid': coll_uuid, 'properties': {}}
+    if 'created' in image_metadata:
+        link_base['properties']['image_timestamp'] = image_metadata['created']
+    if args.project_uuid is not None:
+        link_base['owner_uuid'] = args.project_uuid
+
+    make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base)
+    if image_repo_tag:
+        make_link(api, args.retries,
+                  'docker_image_repo+tag', image_repo_tag, **link_base)
+
+    # Clean up.
+    image_file.close()
+    for filename in [stat_cache_name(image_file), image_file.name]:
+        try:
+            os.unlink(filename)
+        except OSError as error:
+            if error.errno != errno.ENOENT:
+                raise
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/ls.py b/sdk/python/arvados/commands/ls.py
new file mode 100755 (executable)
index 0000000..e87244d
--- /dev/null
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import argparse
+
+import arvados
+import arvados.commands._util as arv_cmd
+
+def parse_args(args):
+    parser = argparse.ArgumentParser(
+        description='List contents of a manifest',
+        parents=[arv_cmd.retry_opt])
+
+    parser.add_argument('locator', type=str,
+                        help="""Collection UUID or locator""")
+    parser.add_argument('-s', action='store_true',
+                        help="""List file sizes, in KiB.""")
+
+    return parser.parse_args(args)
+
+def size_formatter(coll_file):
+    return "{:>10}".format((coll_file.size() + 1023) / 1024)
+
+def name_formatter(coll_file):
+    return "{}/{}".format(coll_file.stream_name(), coll_file.name)
+
+def main(args, stdout, stderr, api_client=None):
+    args = parse_args(args)
+
+    if api_client is None:
+        api_client = arvados.api('v1')
+
+    try:
+        cr = arvados.CollectionReader(args.locator, api_client=api_client,
+                                      num_retries=args.retries)
+        cr.normalize()
+    except (arvados.errors.ArgumentError,
+            arvados.errors.NotFoundError) as error:
+        print("arv-ls: error fetching collection: {}".format(error),
+              file=stderr)
+        return 1
+
+    formatters = []
+    if args.s:
+        formatters.append(size_formatter)
+    formatters.append(name_formatter)
+
+    for f in cr.all_files():
+        print(*(info_func(f) for info_func in formatters), file=stdout)
+
+    return 0
diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py
new file mode 100644 (file)
index 0000000..d070a8b
--- /dev/null
@@ -0,0 +1,507 @@
+#!/usr/bin/env python
+
+# TODO:
+# --md5sum - display md5 of each file as read from disk
+
+import argparse
+import arvados
+import base64
+import datetime
+import errno
+import fcntl
+import hashlib
+import json
+import os
+import pwd
+import signal
+import socket
+import sys
+import tempfile
+from apiclient import errors as apiclient_errors
+
+import arvados.commands._util as arv_cmd
+
+CAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]
+api_client = None
+
+upload_opts = argparse.ArgumentParser(add_help=False)
+
+upload_opts.add_argument('paths', metavar='path', type=str, nargs='*',
+                         help="""
+Local file or directory. Default: read from standard input.
+""")
+
+_group = upload_opts.add_mutually_exclusive_group()
+
+_group.add_argument('--max-manifest-depth', type=int, metavar='N',
+                    default=-1, help="""
+Maximum depth of directory tree to represent in the manifest
+structure. A directory structure deeper than this will be represented
+as a single stream in the manifest. If N=0, the manifest will contain
+a single stream. Default: -1 (unlimited), i.e., exactly one manifest
+stream per filesystem directory that contains files.
+""")
+
+_group.add_argument('--normalize', action='store_true',
+                    help="""
+Normalize the manifest by re-ordering files and streams after writing
+data.
+""")
+
+_group = upload_opts.add_mutually_exclusive_group()
+
+_group.add_argument('--as-stream', action='store_true', dest='stream',
+                    help="""
+Synonym for --stream.
+""")
+
+_group.add_argument('--stream', action='store_true',
+                    help="""
+Store the file content and display the resulting manifest on
+stdout. Do not write the manifest to Keep or save a Collection object
+in Arvados.
+""")
+
+_group.add_argument('--as-manifest', action='store_true', dest='manifest',
+                    help="""
+Synonym for --manifest.
+""")
+
+_group.add_argument('--in-manifest', action='store_true', dest='manifest',
+                    help="""
+Synonym for --manifest.
+""")
+
+_group.add_argument('--manifest', action='store_true',
+                    help="""
+Store the file data and resulting manifest in Keep, save a Collection
+object in Arvados, and display the manifest locator (Collection uuid)
+on stdout. This is the default behavior.
+""")
+
+_group.add_argument('--as-raw', action='store_true', dest='raw',
+                    help="""
+Synonym for --raw.
+""")
+
+_group.add_argument('--raw', action='store_true',
+                    help="""
+Store the file content and display the data block locators on stdout,
+separated by commas, with a trailing newline. Do not store a
+manifest.
+""")
+
+upload_opts.add_argument('--use-filename', type=str, default=None,
+                         dest='filename', help="""
+Synonym for --filename.
+""")
+
+upload_opts.add_argument('--filename', type=str, default=None,
+                         help="""
+Use the given filename in the manifest, instead of the name of the
+local file. This is useful when "-" or "/dev/stdin" is given as an
+input file. It can be used only if there is exactly one path given and
+it is not a directory. Implies --manifest.
+""")
+
+upload_opts.add_argument('--portable-data-hash', action='store_true',
+                         help="""
+Print the portable data hash instead of the Arvados UUID for the collection
+created by the upload.
+""")
+
+run_opts = argparse.ArgumentParser(add_help=False)
+
+run_opts.add_argument('--project-uuid', metavar='UUID', help="""
+Store the collection in the specified project, instead of your Home
+project.
+""")
+
+run_opts.add_argument('--name', help="""
+Save the collection with the specified name.
+""")
+
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--progress', action='store_true',
+                    help="""
+Display human-readable progress on stderr (bytes and, if possible,
+percentage of total data size). This is the default behavior when
+stderr is a tty.
+""")
+
+_group.add_argument('--no-progress', action='store_true',
+                    help="""
+Do not display human-readable progress on stderr, even if stderr is a
+tty.
+""")
+
+_group.add_argument('--batch-progress', action='store_true',
+                    help="""
+Display machine-readable progress on stderr (bytes and, if known,
+total data size).
+""")
+
+_group = run_opts.add_mutually_exclusive_group()
+_group.add_argument('--resume', action='store_true', default=True,
+                    help="""
+Continue interrupted uploads from cached state (default).
+""")
+_group.add_argument('--no-resume', action='store_false', dest='resume',
+                    help="""
+Do not continue interrupted uploads from cached state.
+""")
+
+arg_parser = argparse.ArgumentParser(
+    description='Copy data from the local filesystem to Keep.',
+    parents=[upload_opts, run_opts, arv_cmd.retry_opt])
+
+def parse_arguments(arguments):
+    args = arg_parser.parse_args(arguments)
+
+    if len(args.paths) == 0:
+        args.paths += ['/dev/stdin']
+
+    if len(args.paths) != 1 or os.path.isdir(args.paths[0]):
+        if args.filename:
+            arg_parser.error("""
+    --filename argument cannot be used when storing a directory or
+    multiple files.
+    """)
+
+    # Turn on --progress by default if stderr is a tty.
+    if (not (args.batch_progress or args.no_progress)
+        and os.isatty(sys.stderr.fileno())):
+        args.progress = True
+
+    if args.paths == ['-']:
+        args.paths = ['/dev/stdin']
+        if not args.filename:
+            args.filename = '-'
+
+    return args
+
+class ResumeCacheConflict(Exception):
+    pass
+
+
+class ResumeCache(object):
+    CACHE_DIR = '.cache/arvados/arv-put'
+
+    def __init__(self, file_spec):
+        self.cache_file = open(file_spec, 'a+')
+        self._lock_file(self.cache_file)
+        self.filename = self.cache_file.name
+
+    @classmethod
+    def make_path(cls, args):
+        md5 = hashlib.md5()
+        md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost'))
+        realpaths = sorted(os.path.realpath(path) for path in args.paths)
+        md5.update('\0'.join(realpaths))
+        if any(os.path.isdir(path) for path in realpaths):
+            md5.update(str(max(args.max_manifest_depth, -1)))
+        elif args.filename:
+            md5.update(args.filename)
+        return os.path.join(
+            arv_cmd.make_home_conf_dir(cls.CACHE_DIR, 0o700, 'raise'),
+            md5.hexdigest())
+
+    def _lock_file(self, fileobj):
+        try:
+            fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)
+        except IOError:
+            raise ResumeCacheConflict("{} locked".format(fileobj.name))
+
+    def load(self):
+        self.cache_file.seek(0)
+        return json.load(self.cache_file)
+
+    def save(self, data):
+        try:
+            new_cache_fd, new_cache_name = tempfile.mkstemp(
+                dir=os.path.dirname(self.filename))
+            self._lock_file(new_cache_fd)
+            new_cache = os.fdopen(new_cache_fd, 'r+')
+            json.dump(data, new_cache)
+            os.rename(new_cache_name, self.filename)
+        except (IOError, OSError, ResumeCacheConflict) as error:
+            try:
+                os.unlink(new_cache_name)
+            except NameError:  # mkstemp failed.
+                pass
+        else:
+            self.cache_file.close()
+            self.cache_file = new_cache
+
+    def close(self):
+        self.cache_file.close()
+
+    def destroy(self):
+        try:
+            os.unlink(self.filename)
+        except OSError as error:
+            if error.errno != errno.ENOENT:  # That's what we wanted anyway.
+                raise
+        self.close()
+
+    def restart(self):
+        self.destroy()
+        self.__init__(self.filename)
+
+
+class ArvPutCollectionWriter(arvados.ResumableCollectionWriter):
+    STATE_PROPS = (arvados.ResumableCollectionWriter.STATE_PROPS +
+                   ['bytes_written', '_seen_inputs'])
+
+    def __init__(self, cache=None, reporter=None, bytes_expected=None,
+                 api_client=None, num_retries=0):
+        self.bytes_written = 0
+        self._seen_inputs = []
+        self.cache = cache
+        self.reporter = reporter
+        self.bytes_expected = bytes_expected
+        super(ArvPutCollectionWriter, self).__init__(
+            api_client, num_retries=num_retries)
+
+    @classmethod
+    def from_cache(cls, cache, reporter=None, bytes_expected=None,
+                   num_retries=0):
+        try:
+            state = cache.load()
+            state['_data_buffer'] = [base64.decodestring(state['_data_buffer'])]
+            writer = cls.from_state(state, cache, reporter, bytes_expected,
+                                    num_retries=num_retries)
+        except (TypeError, ValueError,
+                arvados.errors.StaleWriterStateError) as error:
+            return cls(cache, reporter, bytes_expected, num_retries=num_retries)
+        else:
+            return writer
+
+    def cache_state(self):
+        if self.cache is None:
+            return
+        state = self.dump_state()
+        # Transform attributes for serialization.
+        for attr, value in state.items():
+            if attr == '_data_buffer':
+                state[attr] = base64.encodestring(''.join(value))
+            elif hasattr(value, 'popleft'):
+                state[attr] = list(value)
+        self.cache.save(state)
+
+    def report_progress(self):
+        if self.reporter is not None:
+            self.reporter(self.bytes_written, self.bytes_expected)
+
+    def flush_data(self):
+        start_buffer_len = self._data_buffer_len
+        start_block_count = self.bytes_written / self.KEEP_BLOCK_SIZE
+        super(ArvPutCollectionWriter, self).flush_data()
+        if self._data_buffer_len < start_buffer_len:  # We actually PUT data.
+            self.bytes_written += (start_buffer_len - self._data_buffer_len)
+            self.report_progress()
+            if (self.bytes_written / self.KEEP_BLOCK_SIZE) > start_block_count:
+                self.cache_state()
+
+    def _record_new_input(self, input_type, source_name, dest_name):
+        # The key needs to be a list because that's what we'll get back
+        # from JSON deserialization.
+        key = [input_type, source_name, dest_name]
+        if key in self._seen_inputs:
+            return False
+        self._seen_inputs.append(key)
+        return True
+
+    def write_file(self, source, filename=None):
+        if self._record_new_input('file', source, filename):
+            super(ArvPutCollectionWriter, self).write_file(source, filename)
+
+    def write_directory_tree(self,
+                             path, stream_name='.', max_manifest_depth=-1):
+        if self._record_new_input('directory', path, stream_name):
+            super(ArvPutCollectionWriter, self).write_directory_tree(
+                path, stream_name, max_manifest_depth)
+
+
+def expected_bytes_for(pathlist):
+    # Walk the given directory trees and stat files, adding up file sizes,
+    # so we can display progress as percent
+    bytesum = 0
+    for path in pathlist:
+        if os.path.isdir(path):
+            for filename in arvados.util.listdir_recursive(path):
+                bytesum += os.path.getsize(os.path.join(path, filename))
+        elif not os.path.isfile(path):
+            return None
+        else:
+            bytesum += os.path.getsize(path)
+    return bytesum
+
+_machine_format = "{} {}: {{}} written {{}} total\n".format(sys.argv[0],
+                                                            os.getpid())
+def machine_progress(bytes_written, bytes_expected):
+    return _machine_format.format(
+        bytes_written, -1 if (bytes_expected is None) else bytes_expected)
+
+def human_progress(bytes_written, bytes_expected):
+    if bytes_expected:
+        return "\r{}M / {}M {:.1%} ".format(
+            bytes_written >> 20, bytes_expected >> 20,
+            float(bytes_written) / bytes_expected)
+    else:
+        return "\r{} ".format(bytes_written)
+
+def progress_writer(progress_func, outfile=sys.stderr):
+    def write_progress(bytes_written, bytes_expected):
+        outfile.write(progress_func(bytes_written, bytes_expected))
+    return write_progress
+
+def exit_signal_handler(sigcode, frame):
+    sys.exit(-sigcode)
+
+def desired_project_uuid(api_client, project_uuid, num_retries):
+    if not project_uuid:
+        query = api_client.users().current()
+    elif arvados.util.user_uuid_pattern.match(project_uuid):
+        query = api_client.users().get(uuid=project_uuid)
+    elif arvados.util.group_uuid_pattern.match(project_uuid):
+        query = api_client.groups().get(uuid=project_uuid)
+    else:
+        raise ValueError("Not a valid project UUID: {}".format(project_uuid))
+    return query.execute(num_retries=num_retries)['uuid']
+
+def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
+    global api_client
+
+    args = parse_arguments(arguments)
+    status = 0
+    if api_client is None:
+        api_client = arvados.api('v1')
+
+    # Determine the name to use
+    if args.name:
+        if args.stream or args.raw:
+            print >>stderr, "Cannot use --name with --stream or --raw"
+            sys.exit(1)
+        collection_name = args.name
+    else:
+        collection_name = "Saved at {} by {}@{}".format(
+            datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC"),
+            pwd.getpwuid(os.getuid()).pw_name,
+            socket.gethostname())
+
+    if args.project_uuid and (args.stream or args.raw):
+        print >>stderr, "Cannot use --project-uuid with --stream or --raw"
+        sys.exit(1)
+
+    # Determine the parent project
+    try:
+        project_uuid = desired_project_uuid(api_client, args.project_uuid,
+                                            args.retries)
+    except (apiclient_errors.Error, ValueError) as error:
+        print >>stderr, error
+        sys.exit(1)
+
+    if args.progress:
+        reporter = progress_writer(human_progress)
+    elif args.batch_progress:
+        reporter = progress_writer(machine_progress)
+    else:
+        reporter = None
+    bytes_expected = expected_bytes_for(args.paths)
+
+    resume_cache = None
+    if args.resume:
+        try:
+            resume_cache = ResumeCache(ResumeCache.make_path(args))
+        except (IOError, OSError, ValueError):
+            pass  # Couldn't open cache directory/file.  Continue without it.
+        except ResumeCacheConflict:
+            print >>stderr, "\n".join([
+                "arv-put: Another process is already uploading this data.",
+                "         Use --no-resume if this is really what you want."])
+            sys.exit(1)
+
+    if resume_cache is None:
+        writer = ArvPutCollectionWriter(resume_cache, reporter, bytes_expected,
+                                        num_retries=args.retries)
+    else:
+        writer = ArvPutCollectionWriter.from_cache(
+            resume_cache, reporter, bytes_expected, num_retries=args.retries)
+
+    # Install our signal handler for each code in CAUGHT_SIGNALS, and save
+    # the originals.
+    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)
+                            for sigcode in CAUGHT_SIGNALS}
+
+    if writer.bytes_written > 0:  # We're resuming a previous upload.
+        print >>stderr, "\n".join([
+                "arv-put: Resuming previous upload from last checkpoint.",
+                "         Use the --no-resume option to start over."])
+
+    writer.report_progress()
+    writer.do_queued_work()  # Do work resumed from cache.
+    for path in args.paths:  # Copy file data to Keep.
+        if os.path.isdir(path):
+            writer.write_directory_tree(
+                path, max_manifest_depth=args.max_manifest_depth)
+        else:
+            writer.start_new_stream()
+            writer.write_file(path, args.filename or os.path.basename(path))
+    writer.finish_current_stream()
+
+    if args.progress:  # Print newline to split stderr from stdout for humans.
+        print >>stderr
+
+    if args.stream:
+        output = writer.manifest_text()
+        if args.normalize:
+            output = CollectionReader(output).manifest_text(normalize=True)
+    elif args.raw:
+        output = ','.join(writer.data_locators())
+    else:
+        try:
+            manifest_text = writer.manifest_text()
+            if args.normalize:
+                manifest_text = CollectionReader(manifest_text).manifest_text(normalize=True)
+            # Register the resulting collection in Arvados.
+            collection = api_client.collections().create(
+                body={
+                    'owner_uuid': project_uuid,
+                    'name': collection_name,
+                    'manifest_text': manifest_text
+                    },
+                ensure_unique_name=True
+                ).execute(num_retries=args.retries)
+
+            print >>stderr, "Collection saved as '%s'" % collection['name']
+
+            if args.portable_data_hash and 'portable_data_hash' in collection and collection['portable_data_hash']:
+                output = collection['portable_data_hash']
+            else:
+                output = collection['uuid']
+
+        except apiclient_errors.Error as error:
+            print >>stderr, (
+                "arv-put: Error creating Collection on project: {}.".format(
+                    error))
+            status = 1
+
+    # Print the locator (uuid) of the new collection.
+    stdout.write(output)
+    if not output.endswith('\n'):
+        stdout.write('\n')
+
+    for sigcode, orig_handler in orig_signal_handlers.items():
+        signal.signal(sigcode, orig_handler)
+
+    if status != 0:
+        sys.exit(status)
+
+    if resume_cache is not None:
+        resume_cache.destroy()
+
+    return output
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/run.py b/sdk/python/arvados/commands/run.py
new file mode 100644 (file)
index 0000000..f2bf0f3
--- /dev/null
@@ -0,0 +1,316 @@
+#!/usr/bin/env python
+
+import arvados
+import arvados.commands.ws as ws
+import argparse
+import json
+import re
+import os
+import stat
+import put
+import time
+import subprocess
+import logging
+import arvados.commands._util as arv_cmd
+
+logger = logging.getLogger('arvados.arv-run')
+
+arvrun_parser = argparse.ArgumentParser(parents=[arv_cmd.retry_opt])
+arvrun_parser.add_argument('--dry-run', action="store_true", help="Print out the pipeline that would be submitted and exit")
+arvrun_parser.add_argument('--local', action="store_true", help="Run locally using arv-run-pipeline-instance")
+arvrun_parser.add_argument('--docker-image', type=str, default="arvados/jobs", help="Docker image to use, default arvados/jobs")
+arvrun_parser.add_argument('--ignore-rcode', action="store_true", help="Commands that return non-zero return codes should not be considered failed.")
+arvrun_parser.add_argument('--no-reuse', action="store_true", help="Do not reuse past jobs.")
+arvrun_parser.add_argument('--no-wait', action="store_true", help="Do not wait and display logs after submitting command, just exit.")
+arvrun_parser.add_argument('--project-uuid', type=str, help="Parent project of the pipeline")
+arvrun_parser.add_argument('--git-dir', type=str, default="", help="Git repository passed to arv-crunch-job when using --local")
+arvrun_parser.add_argument('--repository', type=str, default="arvados", help="repository field of component, default 'arvados'")
+arvrun_parser.add_argument('--script-version', type=str, default="master", help="script_version field of component, default 'master'")
+arvrun_parser.add_argument('args', nargs=argparse.REMAINDER)
+
+class ArvFile(object):
+    def __init__(self, prefix, fn):
+        self.prefix = prefix
+        self.fn = fn
+
+class UploadFile(ArvFile):
+    pass
+
+# Determine if a file is in a collection, and return a tuple consisting of the
+# portable data hash and the path relative to the root of the collection.
+# Return None if the path isn't with an arv-mount collection or there was is error.
+def is_in_collection(root, branch):
+    try:
+        if root == "/":
+            return (None, None)
+        fn = os.path.join(root, ".arvados#collection")
+        if os.path.exists(fn):
+            with file(fn, 'r') as f:
+                c = json.load(f)
+            return (c["portable_data_hash"], branch)
+        else:
+            sp = os.path.split(root)
+            return is_in_collection(sp[0], os.path.join(sp[1], branch))
+    except IOError, OSError:
+        return (None, None)
+
+# Determine the project to place the output of this command by searching upward
+# for arv-mount psuedofile indicating the project.  If the cwd isn't within
+# an arv-mount project or there is an error, return current_user.
+def determine_project(root, current_user):
+    try:
+        if root == "/":
+            return current_user
+        fn = os.path.join(root, ".arvados#project")
+        if os.path.exists(fn):
+            with file(fn, 'r') as f:
+                c = json.load(f)
+            if 'writable_by' in c and current_user in c['writable_by']:
+                return c["uuid"]
+            else:
+                return current_user
+        else:
+            sp = os.path.split(root)
+            return determine_project(sp[0], current_user)
+    except IOError, OSError:
+        return current_user
+
+# Determine if string corresponds to a file, and if that file is part of a
+# arv-mounted collection or only local to the machine.  Returns one of
+# ArvFile() (file already exists in a collection), UploadFile() (file needs to
+# be uploaded to a collection), or simply returns prefix+fn (which yields the
+# original parameter string).
+def statfile(prefix, fn):
+    absfn = os.path.abspath(fn)
+    if os.path.exists(absfn):
+        st = os.stat(absfn)
+        if stat.S_ISREG(st.st_mode):
+            sp = os.path.split(absfn)
+            (pdh, branch) = is_in_collection(sp[0], sp[1])
+            if pdh:
+                return ArvFile(prefix, "$(file %s/%s)" % (pdh, branch))
+            else:
+                # trim leading '/' for path prefix test later
+                return UploadFile(prefix, absfn[1:])
+        if stat.S_ISDIR(st.st_mode):
+            sp = os.path.split(absfn)
+            (pdh, branch) = is_in_collection(sp[0], sp[1])
+            if pdh:
+                return ArvFile(prefix, "$(dir %s/%s/)" % (pdh, branch))
+
+    return prefix+fn
+
+def main(arguments=None):
+    args = arvrun_parser.parse_args(arguments)
+
+    if len(args.args) == 0:
+        arvrun_parser.print_help()
+        return
+
+    starting_args = args.args
+
+    reading_into = 2
+
+    # Parse the command arguments into 'slots'.
+    # All words following '>' are output arguments and are collected into slots[0].
+    # All words following '<' are input arguments and are collected into slots[1].
+    # slots[2..] store the parameters of each command in the pipeline.
+    #
+    # e.g. arv-run foo arg1 arg2 '|' bar arg3 arg4 '<' input1 input2 input3 '>' output.txt
+    # will be parsed into:
+    #   [['output.txt'],
+    #    ['input1', 'input2', 'input3'],
+    #    ['foo', 'arg1', 'arg2'],
+    #    ['bar', 'arg3', 'arg4']]
+    slots = [[], [], []]
+    for c in args.args:
+        if c.startswith('>'):
+            reading_into = 0
+            if len(c) > 1:
+                slots[reading_into].append(c[1:])
+        elif c.startswith('<'):
+            reading_into = 1
+            if len(c) > 1:
+                slots[reading_into].append(c[1:])
+        elif c == '|':
+            reading_into = len(slots)
+            slots.append([])
+        else:
+            slots[reading_into].append(c)
+
+    if slots[0] and len(slots[0]) > 1:
+        logger.error("Can only specify a single stdout file (run-command substitutions are permitted)")
+        return
+
+    if not args.dry_run:
+        api = arvados.api('v1')
+        if args.project_uuid:
+            project = args.project_uuid
+        else:
+            project = determine_project(os.getcwd(), api.users().current().execute()["uuid"])
+
+    # Identify input files.  Look at each parameter and test to see if there is
+    # a file by that name.  This uses 'patterns' to look for within
+    # command line arguments, such as --foo=file.txt or -lfile.txt
+    patterns = [re.compile("([^=]+=)(.*)"),
+                re.compile("(-[A-Za-z])(.+)")]
+    for j, command in enumerate(slots[1:]):
+        for i, a in enumerate(command):
+            if j > 0 and i == 0:
+                # j == 0 is stdin, j > 0 is commands
+                # always skip program executable (i == 0) in commands
+                pass
+            elif a.startswith('\\'):
+                # if it starts with a \ then don't do any interpretation
+                command[i] = a[1:]
+            else:
+                # See if it looks like a file
+                command[i] = statfile('', a)
+
+                # If a file named command[i] was found, it would now be an
+                # ArvFile or UploadFile.  If command[i] is a basestring, that
+                # means it doesn't correspond exactly to a file, so do some
+                # pattern matching.
+                if isinstance(command[i], basestring):
+                    for p in patterns:
+                        m = p.match(a)
+                        if m:
+                            command[i] = statfile(m.group(1), m.group(2))
+                            break
+
+    n = True
+    pathprefix = "/"
+    files = [c for command in slots[1:] for c in command if isinstance(c, UploadFile)]
+    if len(files) > 0:
+        # Find the smallest path prefix that includes all the files that need to be uploaded.
+        # This starts at the root and iteratively removes common parent directory prefixes
+        # until all file pathes no longer have a common parent.
+        while n:
+            pathstep = None
+            for c in files:
+                if pathstep is None:
+                    sp = c.fn.split('/')
+                    if len(sp) < 2:
+                        # no parent directories left
+                        n = False
+                        break
+                    # path step takes next directory
+                    pathstep = sp[0] + "/"
+                else:
+                    # check if pathstep is common prefix for all files
+                    if not c.fn.startswith(pathstep):
+                        n = False
+                        break
+            if n:
+                # pathstep is common parent directory for all files, so remove the prefix
+                # from each path
+                pathprefix += pathstep
+                for c in files:
+                    c.fn = c.fn[len(pathstep):]
+
+        orgdir = os.getcwd()
+        os.chdir(pathprefix)
+
+        print("Upload local files: \"%s\"" % '" "'.join([c.fn for c in files]))
+
+        if args.dry_run:
+            print("$(input) is %s" % pathprefix.rstrip('/'))
+            pdh = "$(input)"
+        else:
+            files = sorted(files, key=lambda x: x.fn)
+            collection = arvados.CollectionWriter(api, num_retries=args.retries)
+            stream = None
+            for f in files:
+                sp = os.path.split(f.fn)
+                if sp[0] != stream:
+                    stream = sp[0]
+                    collection.start_new_stream(stream)
+                collection.write_file(f.fn, sp[1])
+            item = api.collections().create(body={"owner_uuid": project, "manifest_text": collection.manifest_text()}).execute()
+            pdh = item["portable_data_hash"]
+            print "Uploaded to %s" % item["uuid"]
+
+        for c in files:
+            c.fn = "$(file %s/%s)" % (pdh, c.fn)
+
+        os.chdir(orgdir)
+
+    for i in xrange(1, len(slots)):
+        slots[i] = [("%s%s" % (c.prefix, c.fn)) if isinstance(c, ArvFile) else c for c in slots[i]]
+
+    component = {
+        "script": "run-command",
+        "script_version": args.script_version,
+        "repository": args.repository,
+        "script_parameters": {
+        },
+        "runtime_constraints": {
+            "docker_image": args.docker_image
+        }
+    }
+
+    task_foreach = []
+    group_parser = argparse.ArgumentParser()
+    group_parser.add_argument('-b', '--batch-size', type=int)
+    group_parser.add_argument('args', nargs=argparse.REMAINDER)
+
+    for s in xrange(2, len(slots)):
+        for i in xrange(0, len(slots[s])):
+            if slots[s][i] == '--':
+                inp = "input%i" % (s-2)
+                groupargs = group_parser.parse_args(slots[2][i+1:])
+                if groupargs.batch_size:
+                    component["script_parameters"][inp] = {"value": {"batch":groupargs.args, "size":groupargs.batch_size}}
+                    slots[s] = slots[s][0:i] + [{"foreach": inp, "command": "$(%s)" % inp}]
+                else:
+                    component["script_parameters"][inp] = groupargs.args
+                    slots[s] = slots[s][0:i] + ["$(%s)" % inp]
+                task_foreach.append(inp)
+                break
+            if slots[s][i] == '\--':
+                slots[s][i] = '--'
+
+    if slots[0]:
+        component["script_parameters"]["task.stdout"] = slots[0][0]
+    if slots[1]:
+        task_foreach.append("stdin")
+        component["script_parameters"]["stdin"] = slots[1]
+        component["script_parameters"]["task.stdin"] = "$(stdin)"
+
+    if task_foreach:
+        component["script_parameters"]["task.foreach"] = task_foreach
+
+    component["script_parameters"]["command"] = slots[2:]
+    if args.ignore_rcode:
+        component["script_parameters"]["task.ignore_rcode"] = args.ignore_rcode
+
+    pipeline = {
+        "name": "arv-run " + " | ".join([s[0] for s in slots[2:]]),
+        "description": "@" + " ".join(starting_args) + "@",
+        "components": {
+            "command": component
+        },
+        "state": "RunningOnClient" if args.local else "RunningOnServer"
+    }
+
+    if args.dry_run:
+        print(json.dumps(pipeline, indent=4))
+    else:
+        pipeline["owner_uuid"] = project
+        pi = api.pipeline_instances().create(body=pipeline, ensure_unique_name=True).execute()
+        print "Running pipeline %s" % pi["uuid"]
+
+        if args.local:
+            subprocess.call(["arv-run-pipeline-instance", "--instance", pi["uuid"], "--run-jobs-here"] + (["--no-reuse"] if args.no_reuse else []))
+        elif not args.no_wait:
+            ws.main(["--pipeline", pi["uuid"]])
+
+        pi = api.pipeline_instances().get(uuid=pi["uuid"]).execute()
+        print "Pipeline is %s" % pi["state"]
+        if "output_uuid" in pi["components"]["command"]:
+            print "Output is %s" % pi["components"]["command"]["output_uuid"]
+        else:
+            print "No output"
+
+if __name__ == '__main__':
+    main()
diff --git a/sdk/python/arvados/commands/ws.py b/sdk/python/arvados/commands/ws.py
new file mode 100644 (file)
index 0000000..04e3f64
--- /dev/null
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+import sys
+import logging
+import argparse
+import arvados
+import json
+from arvados.events import subscribe
+import signal
+
+def main(arguments=None):
+    logger = logging.getLogger('arvados.arv-ws')
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-u', '--uuid', type=str, default="", help="Filter events on object_uuid")
+    parser.add_argument('-f', '--filters', type=str, default="", help="Arvados query filter to apply to log events (JSON encoded)")
+
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument('--poll-interval', default=15, type=int, help="If websockets is not available, specify the polling interval, default is every 15 seconds")
+    group.add_argument('--no-poll', action='store_false', dest='poll_interval', help="Do not poll if websockets are not available, just fail")
+
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument('-p', '--pipeline', type=str, default="", help="Supply pipeline uuid, print log output from pipeline and its jobs")
+    group.add_argument('-j', '--job', type=str, default="", help="Supply job uuid, print log output from jobs")
+
+    args = parser.parse_args(arguments)
+
+    global filters
+    global known_component_jobs
+    global ws
+
+    filters = []
+    known_component_jobs = set()
+    ws = None
+
+    def update_subscribed_components(components):
+        global known_component_jobs
+        global filters
+        pipeline_jobs = set()
+        for c in components:
+            if "job" in components[c]:
+                pipeline_jobs.add(components[c]["job"]["uuid"])
+        if known_component_jobs != pipeline_jobs:
+            ws.unsubscribe(filters)
+            filters = [['object_uuid', 'in', [args.pipeline] + list(pipeline_jobs)]]
+            ws.subscribe([['object_uuid', 'in', [args.pipeline] + list(pipeline_jobs)]])
+            known_component_jobs = pipeline_jobs
+
+    api = arvados.api('v1', cache=False)
+
+    if args.uuid:
+        filters += [ ['object_uuid', '=', args.uuid] ]
+
+    if args.filters:
+        filters += json.loads(args.filters)
+
+    if args.job:
+        filters += [ ['object_uuid', '=', args.job] ]
+
+    if args.pipeline:
+        filters += [ ['object_uuid', '=', args.pipeline] ]
+
+    def on_message(ev):
+        global filters
+        global ws
+
+        logger.debug(ev)
+        if 'event_type' in ev and (args.pipeline or args.job):
+            if ev['event_type'] in ('stderr', 'stdout'):
+                sys.stdout.write(ev["properties"]["text"])
+            elif ev["event_type"] in ("create", "update"):
+                if ev["object_kind"] == "arvados#pipelineInstance":
+                    update_subscribed_components(ev["properties"]["new_attributes"]["components"])
+
+                if ev["object_kind"] == "arvados#pipelineInstance" and args.pipeline:
+                    if ev["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Paused"):
+                        ws.close()
+
+                if ev["object_kind"] == "arvados#job" and args.job:
+                    if ev["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled"):
+                        ws.close()
+        elif 'status' in ev and ev['status'] == 200:
+            pass
+        else:
+            print json.dumps(ev)
+
+    try:
+        ws = subscribe(arvados.api('v1', cache=False), filters, on_message, poll_fallback=args.poll_interval)
+        if ws:
+            if args.pipeline:
+                c = api.pipeline_instances().get(uuid=args.pipeline).execute()
+                update_subscribed_components(c["components"])
+                if c["state"] in ("Complete", "Failed", "Paused"):
+                    ws.close()
+            ws.run_forever()
+    except KeyboardInterrupt:
+        pass
+    except Exception as e:
+        logger.error(e)
+    finally:
+        if ws:
+            ws.close()
diff --git a/sdk/python/arvados/config.py b/sdk/python/arvados/config.py
new file mode 100644 (file)
index 0000000..a0c3cc6
--- /dev/null
@@ -0,0 +1,53 @@
+# config.py - configuration settings and global variables for Arvados clients
+#
+# Arvados configuration settings are taken from $HOME/.config/arvados.
+# Environment variables override settings in the config file.
+
+import os
+import re
+
+_settings = None
+if os.environ.get('HOME') is not None:
+    default_config_file = os.environ['HOME'] + '/.config/arvados/settings.conf'
+else:
+    default_config_file = ''
+
+EMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
+
+def initialize(config_file=default_config_file):
+    global _settings
+    _settings = {}
+
+    # load the specified config file if available
+    try:
+        _settings = load(config_file)
+    except IOError:
+        pass
+
+    # override any settings with environment vars
+    for var in os.environ:
+        if var.startswith('ARVADOS_'):
+            _settings[var] = os.environ[var]
+
+def load(config_file):
+    cfg = {}
+    with open(config_file, "r") as f:
+        for config_line in f:
+            if re.match('^\s*$', config_line):
+                continue
+            if re.match('^\s*#', config_line):
+                continue
+            var, val = config_line.rstrip().split('=', 2)
+            cfg[var] = val
+    return cfg
+
+def flag_is_true(key):
+    return get(key, '').lower() in set(['1', 't', 'true', 'y', 'yes'])
+
+def get(key, default_val=None):
+    return settings().get(key, default_val)
+
+def settings():
+    if _settings is None:
+        initialize()
+    return _settings
diff --git a/sdk/python/arvados/errors.py b/sdk/python/arvados/errors.py
new file mode 100644 (file)
index 0000000..4740a2d
--- /dev/null
@@ -0,0 +1,33 @@
+# errors.py - Arvados-specific exceptions.
+
+import json
+from apiclient import errors as apiclient_errors
+
+class ApiError(apiclient_errors.HttpError):
+    def _get_reason(self):
+        try:
+            return '; '.join(json.loads(self.content)['errors'])
+        except (KeyError, TypeError, ValueError):
+            return super(ApiError, self)._get_reason()
+
+
+class ArgumentError(Exception):
+    pass
+class SyntaxError(Exception):
+    pass
+class AssertionError(Exception):
+    pass
+class CommandFailedError(Exception):
+    pass
+class KeepReadError(Exception):
+    pass
+class KeepWriteError(Exception):
+    pass
+class NotFoundError(KeepReadError):
+    pass
+class NotImplementedError(Exception):
+    pass
+class NoKeepServersError(Exception):
+    pass
+class StaleWriterStateError(Exception):
+    pass
diff --git a/sdk/python/arvados/events.py b/sdk/python/arvados/events.py
new file mode 100644 (file)
index 0000000..d1abc0f
--- /dev/null
@@ -0,0 +1,128 @@
+from ws4py.client.threadedclient import WebSocketClient
+import threading
+import json
+import os
+import time
+import ssl
+import re
+import config
+import logging
+import arvados
+
+_logger = logging.getLogger('arvados.events')
+
+class EventClient(WebSocketClient):
+    def __init__(self, url, filters, on_event):
+        ssl_options = None
+        if re.match(r'(?i)^(true|1|yes)$',
+                    config.get('ARVADOS_API_HOST_INSECURE', 'no')):
+            ssl_options={'cert_reqs': ssl.CERT_NONE}
+        else:
+            ssl_options={'cert_reqs': ssl.CERT_REQUIRED}
+        super(EventClient, self).__init__(url, ssl_options=ssl_options)
+        self.filters = filters
+        self.on_event = on_event
+
+    def opened(self):
+        self.subscribe(self.filters)
+
+    def received_message(self, m):
+        self.on_event(json.loads(str(m)))
+
+    def close_connection(self):
+        try:
+            self.sock.shutdown(socket.SHUT_RDWR)
+            self.sock.close()
+        except:
+            pass
+
+    def subscribe(self, filters, last_log_id=None):
+        m = {"method": "subscribe", "filters": filters}
+        if last_log_id is not None:
+            m["last_log_id"] = last_log_id
+        self.send(json.dumps(m))
+
+    def unsubscribe(self, filters):
+        self.send(json.dumps({"method": "unsubscribe", "filters": filters}))
+
+class PollClient(threading.Thread):
+    def __init__(self, api, filters, on_event, poll_time):
+        super(PollClient, self).__init__()
+        self.api = api
+        if filters:
+            self.filters = [filters]
+        else:
+            self.filters = [[]]
+        self.on_event = on_event
+        self.poll_time = poll_time
+        self.stop = threading.Event()
+
+    def run(self):
+        self.id = 0
+        for f in self.filters:
+            items = self.api.logs().list(limit=1, order="id desc", filters=f).execute()['items']
+            if items:
+                if items[0]['id'] > self.id:
+                    self.id = items[0]['id']
+
+        self.on_event({'status': 200})
+
+        while not self.stop.isSet():
+            max_id = self.id
+            for f in self.filters:
+                items = self.api.logs().list(order="id asc", filters=f+[["id", ">", str(self.id)]]).execute()['items']
+                for i in items:
+                    if i['id'] > max_id:
+                        max_id = i['id']
+                    self.on_event(i)
+            self.id = max_id
+            self.stop.wait(self.poll_time)
+
+    def run_forever(self):
+        self.stop.wait()
+
+    def close(self):
+        self.stop.set()
+        try:
+            self.join()
+        except RuntimeError:
+            # "join() raises a RuntimeError if an attempt is made to join the
+            # current thread as that would cause a deadlock. It is also an
+            # error to join() a thread before it has been started and attempts
+            # to do so raises the same exception."
+            pass
+
+    def subscribe(self, filters):
+        self.on_event({'status': 200})
+        self.filters.append(filters)
+
+    def unsubscribe(self, filters):
+        del self.filters[self.filters.index(filters)]
+
+
+def subscribe(api, filters, on_event, poll_fallback=15):
+    '''
+    api: Must be a newly created from arvados.api(cache=False), not shared with the caller, as it may be used by a background thread.
+    filters: Initial subscription filters.
+    on_event: The callback when a message is received
+    poll_fallback: If websockets are not available, fall back to polling every N seconds.  If poll_fallback=False, this will return None if websockets are not available.
+    '''
+    ws = None
+    if 'websocketUrl' in api._rootDesc:
+        try:
+            url = "{}?api_token={}".format(api._rootDesc['websocketUrl'], api.api_token)
+            ws = EventClient(url, filters, on_event)
+            ws.connect()
+            return ws
+        except Exception as e:
+            _logger.warn("Got exception %s trying to connect to websockets at %s" % (e, api._rootDesc['websocketUrl']))
+            if ws:
+                ws.close_connection()
+    if poll_fallback:
+        _logger.warn("Websockets not available, falling back to log table polling")
+        p = PollClient(api, filters, on_event, poll_fallback)
+        p.start()
+        return p
+    else:
+        _logger.error("Websockets not available")
+        return None
diff --git a/sdk/python/arvados/keep.py b/sdk/python/arvados/keep.py
new file mode 100644 (file)
index 0000000..f4c8596
--- /dev/null
@@ -0,0 +1,742 @@
+import gflags
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+import timer
+import datetime
+import ssl
+import socket
+import requests
+
+import arvados
+import arvados.config as config
+import arvados.errors
+import arvados.retry as retry
+import arvados.util
+
+_logger = logging.getLogger('arvados.keep')
+global_client_object = None
+
+class KeepLocator(object):
+    EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)
+    HINT_RE = re.compile(r'^[A-Z][A-Za-z0-9@_-]+$')
+
+    def __init__(self, locator_str):
+        self.hints = []
+        self._perm_sig = None
+        self._perm_expiry = None
+        pieces = iter(locator_str.split('+'))
+        self.md5sum = next(pieces)
+        try:
+            self.size = int(next(pieces))
+        except StopIteration:
+            self.size = None
+        for hint in pieces:
+            if self.HINT_RE.match(hint) is None:
+                raise ValueError("unrecognized hint data {}".format(hint))
+            elif hint.startswith('A'):
+                self.parse_permission_hint(hint)
+            else:
+                self.hints.append(hint)
+
+    def __str__(self):
+        return '+'.join(
+            str(s) for s in [self.md5sum, self.size,
+                             self.permission_hint()] + self.hints
+            if s is not None)
+
+    def _make_hex_prop(name, length):
+        # Build and return a new property with the given name that
+        # must be a hex string of the given length.
+        data_name = '_{}'.format(name)
+        def getter(self):
+            return getattr(self, data_name)
+        def setter(self, hex_str):
+            if not arvados.util.is_hex(hex_str, length):
+                raise ValueError("{} must be a {}-digit hex string: {}".
+                                 format(name, length, hex_str))
+            setattr(self, data_name, hex_str)
+        return property(getter, setter)
+
+    md5sum = _make_hex_prop('md5sum', 32)
+    perm_sig = _make_hex_prop('perm_sig', 40)
+
+    @property
+    def perm_expiry(self):
+        return self._perm_expiry
+
+    @perm_expiry.setter
+    def perm_expiry(self, value):
+        if not arvados.util.is_hex(value, 1, 8):
+            raise ValueError(
+                "permission timestamp must be a hex Unix timestamp: {}".
+                format(value))
+        self._perm_expiry = datetime.datetime.utcfromtimestamp(int(value, 16))
+
+    def permission_hint(self):
+        data = [self.perm_sig, self.perm_expiry]
+        if None in data:
+            return None
+        data[1] = int((data[1] - self.EPOCH_DATETIME).total_seconds())
+        return "A{}@{:08x}".format(*data)
+
+    def parse_permission_hint(self, s):
+        try:
+            self.perm_sig, self.perm_expiry = s[1:].split('@', 1)
+        except IndexError:
+            raise ValueError("bad permission hint {}".format(s))
+
+    def permission_expired(self, as_of_dt=None):
+        if self.perm_expiry is None:
+            return False
+        elif as_of_dt is None:
+            as_of_dt = datetime.datetime.now()
+        return self.perm_expiry <= as_of_dt
+
+
+class Keep(object):
+    """Simple interface to a global KeepClient object.
+
+    THIS CLASS IS DEPRECATED.  Please instantiate your own KeepClient with your
+    own API client.  The global KeepClient will build an API client from the
+    current Arvados configuration, which may not match the one you built.
+    """
+    _last_key = None
+
+    @classmethod
+    def global_client_object(cls):
+        global global_client_object
+        # Previously, KeepClient would change its behavior at runtime based
+        # on these configuration settings.  We simulate that behavior here
+        # by checking the values and returning a new KeepClient if any of
+        # them have changed.
+        key = (config.get('ARVADOS_API_HOST'),
+               config.get('ARVADOS_API_TOKEN'),
+               config.flag_is_true('ARVADOS_API_HOST_INSECURE'),
+               config.get('ARVADOS_KEEP_PROXY'),
+               config.get('ARVADOS_EXTERNAL_CLIENT') == 'true',
+               os.environ.get('KEEP_LOCAL_STORE'))
+        if (global_client_object is None) or (cls._last_key != key):
+            global_client_object = KeepClient()
+            cls._last_key = key
+        return global_client_object
+
+    @staticmethod
+    def get(locator, **kwargs):
+        return Keep.global_client_object().get(locator, **kwargs)
+
+    @staticmethod
+    def put(data, **kwargs):
+        return Keep.global_client_object().put(data, **kwargs)
+
+class KeepBlockCache(object):
+    # Default RAM cache is 256MiB
+    def __init__(self, cache_max=(256 * 1024 * 1024)):
+        self.cache_max = cache_max
+        self._cache = []
+        self._cache_lock = threading.Lock()
+
+    class CacheSlot(object):
+        def __init__(self, locator):
+            self.locator = locator
+            self.ready = threading.Event()
+            self.content = None
+
+        def get(self):
+            self.ready.wait()
+            return self.content
+
+        def set(self, value):
+            self.content = value
+            self.ready.set()
+
+        def size(self):
+            if self.content is None:
+                return 0
+            else:
+                return len(self.content)
+
+    def cap_cache(self):
+        '''Cap the cache size to self.cache_max'''
+        self._cache_lock.acquire()
+        try:
+            # Select all slots except those where ready.is_set() and content is
+            # None (that means there was an error reading the block).
+            self._cache = [c for c in self._cache if not (c.ready.is_set() and c.content is None)]
+            sm = sum([slot.size() for slot in self._cache])
+            while len(self._cache) > 0 and sm > self.cache_max:
+                for i in xrange(len(self._cache)-1, -1, -1):
+                    if self._cache[i].ready.is_set():
+                        del self._cache[i]
+                        break
+                sm = sum([slot.size() for slot in self._cache])
+        finally:
+            self._cache_lock.release()
+
+    def reserve_cache(self, locator):
+        '''Reserve a cache slot for the specified locator,
+        or return the existing slot.'''
+        self._cache_lock.acquire()
+        try:
+            # Test if the locator is already in the cache
+            for i in xrange(0, len(self._cache)):
+                if self._cache[i].locator == locator:
+                    n = self._cache[i]
+                    if i != 0:
+                        # move it to the front
+                        del self._cache[i]
+                        self._cache.insert(0, n)
+                    return n, False
+
+            # Add a new cache slot for the locator
+            n = KeepBlockCache.CacheSlot(locator)
+            self._cache.insert(0, n)
+            return n, True
+        finally:
+            self._cache_lock.release()
+
+class KeepClient(object):
+
+    # Default Keep server connection timeout:  2 seconds
+    # Default Keep server read timeout:      300 seconds
+    # Default Keep proxy connection timeout:  20 seconds
+    # Default Keep proxy read timeout:       300 seconds
+    DEFAULT_TIMEOUT = (2, 300)
+    DEFAULT_PROXY_TIMEOUT = (20, 300)
+
+    class ThreadLimiter(object):
+        """
+        Limit the number of threads running at a given time to
+        {desired successes} minus {successes reported}. When successes
+        reported == desired, wake up the remaining threads and tell
+        them to quit.
+
+        Should be used in a "with" block.
+        """
+        def __init__(self, todo):
+            self._todo = todo
+            self._done = 0
+            self._response = None
+            self._todo_lock = threading.Semaphore(todo)
+            self._done_lock = threading.Lock()
+
+        def __enter__(self):
+            self._todo_lock.acquire()
+            return self
+
+        def __exit__(self, type, value, traceback):
+            self._todo_lock.release()
+
+        def shall_i_proceed(self):
+            """
+            Return true if the current thread should do stuff. Return
+            false if the current thread should just stop.
+            """
+            with self._done_lock:
+                return (self._done < self._todo)
+
+        def save_response(self, response_body, replicas_stored):
+            """
+            Records a response body (a locator, possibly signed) returned by
+            the Keep server.  It is not necessary to save more than
+            one response, since we presume that any locator returned
+            in response to a successful request is valid.
+            """
+            with self._done_lock:
+                self._done += replicas_stored
+                self._response = response_body
+
+        def response(self):
+            """
+            Returns the body from the response to a PUT request.
+            """
+            with self._done_lock:
+                return self._response
+
+        def done(self):
+            """
+            Return how many successes were reported.
+            """
+            with self._done_lock:
+                return self._done
+
+
+    class KeepService(object):
+        # Make requests to a single Keep service, and track results.
+        HTTP_ERRORS = (requests.exceptions.RequestException,
+                       socket.error, ssl.SSLError)
+
+        def __init__(self, root, **headers):
+            self.root = root
+            self.last_result = None
+            self.success_flag = None
+            self.get_headers = {'Accept': 'application/octet-stream'}
+            self.get_headers.update(headers)
+            self.put_headers = headers
+
+        def usable(self):
+            return self.success_flag is not False
+
+        def finished(self):
+            return self.success_flag is not None
+
+        def last_status(self):
+            try:
+                return self.last_result.status_code
+            except AttributeError:
+                return None
+
+        def get(self, locator, timeout=None):
+            # locator is a KeepLocator object.
+            url = self.root + str(locator)
+            _logger.debug("Request: GET %s", url)
+            try:
+                with timer.Timer() as t:
+                    result = requests.get(url.encode('utf-8'),
+                                          headers=self.get_headers,
+                                          timeout=timeout)
+            except self.HTTP_ERRORS as e:
+                _logger.debug("Request fail: GET %s => %s: %s",
+                              url, type(e), str(e))
+                self.last_result = e
+            else:
+                self.last_result = result
+                self.success_flag = retry.check_http_response_success(result)
+                content = result.content
+                _logger.info("%s response: %s bytes in %s msec (%.3f MiB/sec)",
+                             self.last_status(), len(content), t.msecs,
+                             (len(content)/(1024.0*1024))/t.secs)
+                if self.success_flag:
+                    resp_md5 = hashlib.md5(content).hexdigest()
+                    if resp_md5 == locator.md5sum:
+                        return content
+                    _logger.warning("Checksum fail: md5(%s) = %s",
+                                    url, resp_md5)
+            return None
+
+        def put(self, hash_s, body, timeout=None):
+            url = self.root + hash_s
+            _logger.debug("Request: PUT %s", url)
+            try:
+                result = requests.put(url.encode('utf-8'),
+                                      data=body,
+                                      headers=self.put_headers,
+                                      timeout=timeout)
+            except self.HTTP_ERRORS as e:
+                _logger.debug("Request fail: PUT %s => %s: %s",
+                              url, type(e), str(e))
+                self.last_result = e
+            else:
+                self.last_result = result
+                self.success_flag = retry.check_http_response_success(result)
+            return self.success_flag
+
+
+    class KeepWriterThread(threading.Thread):
+        """
+        Write a blob of data to the given Keep server. On success, call
+        save_response() of the given ThreadLimiter to save the returned
+        locator.
+        """
+        def __init__(self, keep_service, **kwargs):
+            super(KeepClient.KeepWriterThread, self).__init__()
+            self.service = keep_service
+            self.args = kwargs
+            self._success = False
+
+        def success(self):
+            return self._success
+
+        def run(self):
+            with self.args['thread_limiter'] as limiter:
+                if not limiter.shall_i_proceed():
+                    # My turn arrived, but the job has been done without
+                    # me.
+                    return
+                self.run_with_limiter(limiter)
+
+        def run_with_limiter(self, limiter):
+            if self.service.finished():
+                return
+            _logger.debug("KeepWriterThread %s proceeding %s %s",
+                          str(threading.current_thread()),
+                          self.args['data_hash'],
+                          self.args['service_root'])
+            self._success = bool(self.service.put(
+                self.args['data_hash'],
+                self.args['data'],
+                timeout=self.args.get('timeout', None)))
+            status = self.service.last_status()
+            if self._success:
+                result = self.service.last_result
+                _logger.debug("KeepWriterThread %s succeeded %s %s",
+                              str(threading.current_thread()),
+                              self.args['data_hash'],
+                              self.args['service_root'])
+                # Tick the 'done' counter for the number of replica
+                # reported stored by the server, for the case that
+                # we're talking to a proxy or other backend that
+                # stores to multiple copies for us.
+                try:
+                    replicas_stored = int(result.headers['x-keep-replicas-stored'])
+                except (KeyError, ValueError):
+                    replicas_stored = 1
+                limiter.save_response(result.text.strip(), replicas_stored)
+            elif status is not None:
+                _logger.debug("Request fail: PUT %s => %s %s",
+                              self.args['data_hash'], status,
+                              self.service.last_result.text)
+
+
+    def __init__(self, api_client=None, proxy=None,
+                 timeout=DEFAULT_TIMEOUT, proxy_timeout=DEFAULT_PROXY_TIMEOUT,
+                 api_token=None, local_store=None, block_cache=None,
+                 num_retries=0):
+        """Initialize a new KeepClient.
+
+        Arguments:
+        * api_client: The API client to use to find Keep services.  If not
+          provided, KeepClient will build one from available Arvados
+          configuration.
+        * proxy: If specified, this KeepClient will send requests to this
+          Keep proxy.  Otherwise, KeepClient will fall back to the setting
+          of the ARVADOS_KEEP_PROXY configuration setting.  If you want to
+          ensure KeepClient does not use a proxy, pass in an empty string.
+        * timeout: The timeout (in seconds) for HTTP requests to Keep
+          non-proxy servers.  A tuple of two floats is interpreted as
+          (connection_timeout, read_timeout): see
+          http://docs.python-requests.org/en/latest/user/advanced/#timeouts.
+          Default: (2, 300).
+        * proxy_timeout: The timeout (in seconds) for HTTP requests to
+          Keep proxies. A tuple of two floats is interpreted as
+          (connection_timeout, read_timeout). Default: (20, 300).
+        * api_token: If you're not using an API client, but only talking
+          directly to a Keep proxy, this parameter specifies an API token
+          to authenticate Keep requests.  It is an error to specify both
+          api_client and api_token.  If you specify neither, KeepClient
+          will use one available from the Arvados configuration.
+        * local_store: If specified, this KeepClient will bypass Keep
+          services, and save data to the named directory.  If unspecified,
+          KeepClient will fall back to the setting of the $KEEP_LOCAL_STORE
+          environment variable.  If you want to ensure KeepClient does not
+          use local storage, pass in an empty string.  This is primarily
+          intended to mock a server for testing.
+        * num_retries: The default number of times to retry failed requests.
+          This will be used as the default num_retries value when get() and
+          put() are called.  Default 0.
+        """
+        self.lock = threading.Lock()
+        if proxy is None:
+            proxy = config.get('ARVADOS_KEEP_PROXY')
+        if api_token is None:
+            if api_client is None:
+                api_token = config.get('ARVADOS_API_TOKEN')
+            else:
+                api_token = api_client.api_token
+        elif api_client is not None:
+            raise ValueError(
+                "can't build KeepClient with both API client and token")
+        if local_store is None:
+            local_store = os.environ.get('KEEP_LOCAL_STORE')
+
+        self.block_cache = block_cache if block_cache else KeepBlockCache()
+        self.timeout = timeout
+        self.proxy_timeout = proxy_timeout
+
+        if local_store:
+            self.local_store = local_store
+            self.get = self.local_store_get
+            self.put = self.local_store_put
+        else:
+            self.num_retries = num_retries
+            if proxy:
+                if not proxy.endswith('/'):
+                    proxy += '/'
+                self.api_token = api_token
+                self._keep_services = [{
+                    'uuid': 'proxy',
+                    '_service_root': proxy,
+                    }]
+                self.using_proxy = True
+                self._static_services_list = True
+            else:
+                # It's important to avoid instantiating an API client
+                # unless we actually need one, for testing's sake.
+                if api_client is None:
+                    api_client = arvados.api('v1')
+                self.api_client = api_client
+                self.api_token = api_client.api_token
+                self._keep_services = None
+                self.using_proxy = None
+                self._static_services_list = False
+
+    def current_timeout(self):
+        """Return the appropriate timeout to use for this client: the proxy
+        timeout setting if the backend service is currently a proxy,
+        the regular timeout setting otherwise.
+        """
+        # TODO(twp): the timeout should be a property of a
+        # KeepService, not a KeepClient. See #4488.
+        return self.proxy_timeout if self.using_proxy else self.timeout
+
+    def build_services_list(self, force_rebuild=False):
+        if (self._static_services_list or
+              (self._keep_services and not force_rebuild)):
+            return
+        with self.lock:
+            try:
+                keep_services = self.api_client.keep_services().accessible()
+            except Exception:  # API server predates Keep services.
+                keep_services = self.api_client.keep_disks().list()
+
+            self._keep_services = keep_services.execute().get('items')
+            if not self._keep_services:
+                raise arvados.errors.NoKeepServersError()
+
+            self.using_proxy = any(ks.get('service_type') == 'proxy'
+                                   for ks in self._keep_services)
+
+            # Precompute the base URI for each service.
+            for r in self._keep_services:
+                r['_service_root'] = "{}://[{}]:{:d}/".format(
+                    'https' if r['service_ssl_flag'] else 'http',
+                    r['service_host'],
+                    r['service_port'])
+            _logger.debug(str(self._keep_services))
+
+    def _service_weight(self, data_hash, service_uuid):
+        """Compute the weight of a Keep service endpoint for a data
+        block with a known hash.
+
+        The weight is md5(h + u) where u is the last 15 characters of
+        the service endpoint's UUID.
+        """
+        return hashlib.md5(data_hash + service_uuid[-15:]).hexdigest()
+
+    def weighted_service_roots(self, data_hash, force_rebuild=False):
+        """Return an array of Keep service endpoints, in the order in
+        which they should be probed when reading or writing data with
+        the given hash.
+        """
+        self.build_services_list(force_rebuild)
+
+        # Sort the available services by weight (heaviest first) for
+        # this data_hash, and return their service_roots (base URIs)
+        # in that order.
+        sorted_roots = [
+            svc['_service_root'] for svc in sorted(
+                self._keep_services,
+                reverse=True,
+                key=lambda svc: self._service_weight(data_hash, svc['uuid']))]
+        _logger.debug(data_hash + ': ' + str(sorted_roots))
+        return sorted_roots
+
+    def map_new_services(self, roots_map, md5_s, force_rebuild, **headers):
+        # roots_map is a dictionary, mapping Keep service root strings
+        # to KeepService objects.  Poll for Keep services, and add any
+        # new ones to roots_map.  Return the current list of local
+        # root strings.
+        headers.setdefault('Authorization', "OAuth2 %s" % (self.api_token,))
+        local_roots = self.weighted_service_roots(md5_s, force_rebuild)
+        for root in local_roots:
+            if root not in roots_map:
+                roots_map[root] = self.KeepService(root, **headers)
+        return local_roots
+
+    @staticmethod
+    def _check_loop_result(result):
+        # KeepClient RetryLoops should save results as a 2-tuple: the
+        # actual result of the request, and the number of servers available
+        # to receive the request this round.
+        # This method returns True if there's a real result, False if
+        # there are no more servers available, otherwise None.
+        if isinstance(result, Exception):
+            return None
+        result, tried_server_count = result
+        if (result is not None) and (result is not False):
+            return True
+        elif tried_server_count < 1:
+            _logger.info("No more Keep services to try; giving up")
+            return False
+        else:
+            return None
+
+    @retry.retry_method
+    def get(self, loc_s, num_retries=None):
+        """Get data from Keep.
+
+        This method fetches one or more blocks of data from Keep.  It
+        sends a request each Keep service registered with the API
+        server (or the proxy provided when this client was
+        instantiated), then each service named in location hints, in
+        sequence.  As soon as one service provides the data, it's
+        returned.
+
+        Arguments:
+        * loc_s: A string of one or more comma-separated locators to fetch.
+          This method returns the concatenation of these blocks.
+        * num_retries: The number of times to retry GET requests to
+          *each* Keep server if it returns temporary failures, with
+          exponential backoff.  Note that, in each loop, the method may try
+          to fetch data from every available Keep service, along with any
+          that are named in location hints in the locator.  The default value
+          is set when the KeepClient is initialized.
+        """
+        if ',' in loc_s:
+            return ''.join(self.get(x) for x in loc_s.split(','))
+        locator = KeepLocator(loc_s)
+        expect_hash = locator.md5sum
+
+        slot, first = self.block_cache.reserve_cache(expect_hash)
+        if not first:
+            v = slot.get()
+            return v
+
+        # See #3147 for a discussion of the loop implementation.  Highlights:
+        # * Refresh the list of Keep services after each failure, in case
+        #   it's being updated.
+        # * Retry until we succeed, we're out of retries, or every available
+        #   service has returned permanent failure.
+        hint_roots = ['http://keep.{}.arvadosapi.com/'.format(hint[2:])
+                      for hint in locator.hints if hint.startswith('K@')]
+        # Map root URLs their KeepService objects.
+        roots_map = {root: self.KeepService(root) for root in hint_roots}
+        blob = None
+        loop = retry.RetryLoop(num_retries, self._check_loop_result,
+                               backoff_start=2)
+        for tries_left in loop:
+            try:
+                local_roots = self.map_new_services(
+                    roots_map, expect_hash,
+                    force_rebuild=(tries_left < num_retries))
+            except Exception as error:
+                loop.save_result(error)
+                continue
+
+            # Query KeepService objects that haven't returned
+            # permanent failure, in our specified shuffle order.
+            services_to_try = [roots_map[root]
+                               for root in (local_roots + hint_roots)
+                               if roots_map[root].usable()]
+            for keep_service in services_to_try:
+                blob = keep_service.get(locator, timeout=self.current_timeout())
+                if blob is not None:
+                    break
+            loop.save_result((blob, len(services_to_try)))
+
+        # Always cache the result, then return it if we succeeded.
+        slot.set(blob)
+        self.block_cache.cap_cache()
+        if loop.success():
+            return blob
+
+        # No servers fulfilled the request.  Count how many responded
+        # "not found;" if the ratio is high enough (currently 75%), report
+        # Not Found; otherwise a generic error.
+        # Q: Including 403 is necessary for the Keep tests to continue
+        # passing, but maybe they should expect KeepReadError instead?
+        not_founds = sum(1 for ks in roots_map.values()
+                         if ks.last_status() in set([403, 404, 410]))
+        if roots_map and ((float(not_founds) / len(roots_map)) >= .75):
+            raise arvados.errors.NotFoundError(loc_s)
+        else:
+            raise arvados.errors.KeepReadError(loc_s)
+
+    @retry.retry_method
+    def put(self, data, copies=2, num_retries=None):
+        """Save data in Keep.
+
+        This method will get a list of Keep services from the API server, and
+        send the data to each one simultaneously in a new thread.  Once the
+        uploads are finished, if enough copies are saved, this method returns
+        the most recent HTTP response body.  If requests fail to upload
+        enough copies, this method raises KeepWriteError.
+
+        Arguments:
+        * data: The string of data to upload.
+        * copies: The number of copies that the user requires be saved.
+          Default 2.
+        * num_retries: The number of times to retry PUT requests to
+          *each* Keep server if it returns temporary failures, with
+          exponential backoff.  The default value is set when the
+          KeepClient is initialized.
+        """
+        data_hash = hashlib.md5(data).hexdigest()
+        if copies < 1:
+            return data_hash
+
+        headers = {}
+        if self.using_proxy:
+            # Tell the proxy how many copies we want it to store
+            headers['X-Keep-Desired-Replication'] = str(copies)
+        roots_map = {}
+        thread_limiter = KeepClient.ThreadLimiter(copies)
+        loop = retry.RetryLoop(num_retries, self._check_loop_result,
+                               backoff_start=2)
+        for tries_left in loop:
+            try:
+                local_roots = self.map_new_services(
+                    roots_map, data_hash,
+                    force_rebuild=(tries_left < num_retries), **headers)
+            except Exception as error:
+                loop.save_result(error)
+                continue
+
+            threads = []
+            for service_root, ks in roots_map.iteritems():
+                if ks.finished():
+                    continue
+                t = KeepClient.KeepWriterThread(
+                    ks,
+                    data=data,
+                    data_hash=data_hash,
+                    service_root=service_root,
+                    thread_limiter=thread_limiter,
+                    timeout=self.current_timeout())
+                t.start()
+                threads.append(t)
+            for t in threads:
+                t.join()
+            loop.save_result((thread_limiter.done() >= copies, len(threads)))
+
+        if loop.success():
+            return thread_limiter.response()
+        raise arvados.errors.KeepWriteError(
+            "Write fail for %s: wanted %d but wrote %d" %
+            (data_hash, copies, thread_limiter.done()))
+
+    # Local storage methods need no-op num_retries arguments to keep
+    # integration tests happy.  With better isolation they could
+    # probably be removed again.
+    def local_store_put(self, data, num_retries=0):
+        md5 = hashlib.md5(data).hexdigest()
+        locator = '%s+%d' % (md5, len(data))
+        with open(os.path.join(self.local_store, md5 + '.tmp'), 'w') as f:
+            f.write(data)
+        os.rename(os.path.join(self.local_store, md5 + '.tmp'),
+                  os.path.join(self.local_store, md5))
+        return locator
+
+    def local_store_get(self, loc_s, num_retries=0):
+        try:
+            locator = KeepLocator(loc_s)
+        except ValueError:
+            raise arvados.errors.NotFoundError(
+                "Invalid data locator: '%s'" % loc_s)
+        if locator.md5sum == config.EMPTY_BLOCK_LOCATOR.split('+')[0]:
+            return ''
+        with open(os.path.join(self.local_store, locator.md5sum), 'r') as f:
+            return f.read()
diff --git a/sdk/python/arvados/retry.py b/sdk/python/arvados/retry.py
new file mode 100644 (file)
index 0000000..52a68fa
--- /dev/null
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+
+import functools
+import inspect
+import time
+
+from collections import deque
+
+import arvados.errors
+
+_HTTP_SUCCESSES = set(xrange(200, 300))
+_HTTP_CAN_RETRY = set([408, 409, 422, 423, 500, 502, 503, 504])
+
+class RetryLoop(object):
+    """Coordinate limited retries of code.
+
+    RetryLoop coordinates a loop that runs until it records a
+    successful result or tries too many times, whichever comes first.
+    Typical use looks like:
+
+        loop = RetryLoop(num_retries=2)
+        for tries_left in loop:
+            try:
+                result = do_something()
+            except TemporaryError as error:
+                log("error: {} ({} tries left)".format(error, tries_left))
+            else:
+                loop.save_result(result)
+        if loop.success():
+            return loop.last_result()
+    """
+    def __init__(self, num_retries, success_check=lambda r: True,
+                 backoff_start=0, backoff_growth=2, save_results=1):
+        """Construct a new RetryLoop.
+
+        Arguments:
+        * num_retries: The maximum number of times to retry the loop if it
+          doesn't succeed.  This means the loop could run at most 1+N times.
+        * success_check: This is a function that will be called each
+          time the loop saves a result.  The function should return
+          True if the result indicates loop success, False if it
+          represents a permanent failure state, and None if the loop
+          should continue.  If no function is provided, the loop will
+          end as soon as it records any result.
+        * backoff_start: The number of seconds that must pass before the
+          loop's second iteration.  Default 0, which disables all waiting.
+        * backoff_growth: The wait time multiplier after each iteration.
+          Default 2 (i.e., double the wait time each time).
+        * save_results: Specify a number to save the last N results
+          that the loop recorded.  These records are available through
+          the results attribute, oldest first.  Default 1.
+        """
+        self.tries_left = num_retries + 1
+        self.check_result = success_check
+        self.backoff_wait = backoff_start
+        self.backoff_growth = backoff_growth
+        self.next_start_time = 0
+        self.results = deque(maxlen=save_results)
+        self._running = None
+        self._success = None
+
+    def __iter__(self):
+        return self
+
+    def running(self):
+        return self._running and (self._success is None)
+
+    def next(self):
+        if self._running is None:
+            self._running = True
+        if (self.tries_left < 1) or not self.running():
+            self._running = False
+            raise StopIteration
+        else:
+            wait_time = max(0, self.next_start_time - time.time())
+            time.sleep(wait_time)
+            self.backoff_wait *= self.backoff_growth
+        self.next_start_time = time.time() + self.backoff_wait
+        self.tries_left -= 1
+        return self.tries_left
+
+    def save_result(self, result):
+        """Record a loop result.
+
+        Save the given result, and end the loop if it indicates
+        success or permanent failure.  See __init__'s documentation
+        about success_check to learn how to make that indication.
+        """
+        if not self.running():
+            raise arvados.errors.AssertionError(
+                "recorded a loop result after the loop finished")
+        self.results.append(result)
+        self._success = self.check_result(result)
+
+    def success(self):
+        """Return the loop's end state.
+
+        Returns True if the loop obtained a successful result, False if it
+        encountered permanent failure, or else None.
+        """
+        return self._success
+
+    def last_result(self):
+        """Return the most recent result the loop recorded."""
+        try:
+            return self.results[-1]
+        except IndexError:
+            raise arvados.errors.AssertionError(
+                "queried loop results before any were recorded")
+
+
+def check_http_response_success(result):
+    """Convert a 'requests' response to a loop control flag.
+
+    Pass this method a requests.Response object.  It returns True if
+    the response indicates success, None if it indicates temporary
+    failure, and False otherwise.  You can use this as the
+    success_check for a RetryLoop.
+
+    Implementation details:
+    * Any 2xx result returns True.
+    * A select few status codes, or any malformed responses, return None.
+      422 Unprocessable Entity is in this category.  This may not meet the
+      letter of the HTTP specification, but the Arvados API server will
+      use it for various server-side problems like database connection
+      errors.
+    * Everything else returns False.  Note that this includes 1xx and
+      3xx status codes.  They don't indicate success, and you can't
+      retry those requests verbatim.
+    """
+    try:
+        status = result.status_code
+    except Exception:
+        return None
+    if status in _HTTP_SUCCESSES:
+        return True
+    elif status in _HTTP_CAN_RETRY:
+        return None
+    elif 100 <= status < 600:
+        return False
+    else:
+        return None  # Get well soon, server.
+
+def retry_method(orig_func):
+    """Provide a default value for a method's num_retries argument.
+
+    This is a decorator for instance and class methods that accept a
+    num_retries argument, with a None default.  When the method is called
+    without a value for num_retries, it will be set from the underlying
+    instance or class' num_retries attribute.
+    """
+    @functools.wraps(orig_func)
+    def num_retries_setter(self, *args, **kwargs):
+        arg_vals = inspect.getcallargs(orig_func, self, *args, **kwargs)
+        if arg_vals['num_retries'] is None:
+            kwargs['num_retries'] = self.num_retries
+        return orig_func(self, *args, **kwargs)
+    return num_retries_setter
diff --git a/sdk/python/arvados/stream.py b/sdk/python/arvados/stream.py
new file mode 100644 (file)
index 0000000..c263dd8
--- /dev/null
@@ -0,0 +1,333 @@
+import bz2
+import collections
+import hashlib
+import os
+import re
+import zlib
+
+from .arvfile import ArvadosFileBase
+from arvados.retry import retry_method
+from keep import *
+import config
+import errors
+
+LOCATOR = 0
+BLOCKSIZE = 1
+OFFSET = 2
+SEGMENTSIZE = 3
+
+def locators_and_ranges(data_locators, range_start, range_size, debug=False):
+    '''
+    Get blocks that are covered by the range
+    data_locators: list of [locator, block_size, block_start], assumes that blocks are in order and contigous
+    range_start: start of range
+    range_size: size of range
+    returns list of [block locator, blocksize, segment offset, segment size] that satisfies the range
+    '''
+    if range_size == 0:
+        return []
+    resp = []
+    range_start = long(range_start)
+    range_size = long(range_size)
+    range_end = range_start + range_size
+    block_start = 0L
+
+    # range_start/block_start is the inclusive lower bound
+    # range_end/block_end is the exclusive upper bound
+
+    hi = len(data_locators)
+    lo = 0
+    i = int((hi + lo) / 2)
+    block_size = data_locators[i][BLOCKSIZE]
+    block_start = data_locators[i][OFFSET]
+    block_end = block_start + block_size
+    if debug: print '---'
+
+    # perform a binary search for the first block
+    # assumes that all of the blocks are contigious, so range_start is guaranteed
+    # to either fall into the range of a block or be outside the block range entirely
+    while not (range_start >= block_start and range_start < block_end):
+        if lo == i:
+            # must be out of range, fail
+            return []
+        if range_start > block_start:
+            lo = i
+        else:
+            hi = i
+        i = int((hi + lo) / 2)
+        if debug: print lo, i, hi
+        block_size = data_locators[i][BLOCKSIZE]
+        block_start = data_locators[i][OFFSET]
+        block_end = block_start + block_size
+
+    while i < len(data_locators):
+        locator, block_size, block_start = data_locators[i]
+        block_end = block_start + block_size
+        if debug:
+            print locator, "range_start", range_start, "block_start", block_start, "range_end", range_end, "block_end", block_end
+        if range_end <= block_start:
+            # range ends before this block starts, so don't look at any more locators
+            break
+
+        #if range_start >= block_end:
+            # range starts after this block ends, so go to next block
+            # we should always start at the first block due to the binary above, so this test is redundant
+            #next
+
+        if range_start >= block_start and range_end <= block_end:
+            # range starts and ends in this block
+            resp.append([locator, block_size, range_start - block_start, range_size])
+        elif range_start >= block_start and range_end > block_end:
+            # range starts in this block
+            resp.append([locator, block_size, range_start - block_start, block_end - range_start])
+        elif range_start < block_start and range_end > block_end:
+            # range starts in a previous block and extends to further blocks
+            resp.append([locator, block_size, 0L, block_size])
+        elif range_start < block_start and range_end <= block_end:
+            # range starts in a previous block and ends in this block
+            resp.append([locator, block_size, 0L, range_end - block_start])
+        block_start = block_end
+        i += 1
+    return resp
+
+def split(path):
+    """split(path) -> streamname, filename
+
+    Separate the stream name and file name in a /-separated stream path.
+    If no stream name is available, assume '.'.
+    """
+    try:
+        stream_name, file_name = path.rsplit('/', 1)
+    except ValueError:  # No / in string
+        stream_name, file_name = '.', path
+    return stream_name, file_name
+
+class StreamFileReader(ArvadosFileBase):
+    class _NameAttribute(str):
+        # The Python file API provides a plain .name attribute.
+        # Older SDK provided a name() method.
+        # This class provides both, for maximum compatibility.
+        def __call__(self):
+            return self
+
+
+    def __init__(self, stream, segments, name):
+        super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb')
+        self._stream = stream
+        self.segments = segments
+        self._filepos = 0L
+        self.num_retries = stream.num_retries
+        self._readline_cache = (None, None)
+
+    def __iter__(self):
+        while True:
+            data = self.readline()
+            if not data:
+                break
+            yield data
+
+    def decompressed_name(self):
+        return re.sub('\.(bz2|gz)$', '', self.name)
+
+    def stream_name(self):
+        return self._stream.name()
+
+    @ArvadosFileBase._before_close
+    def seek(self, pos, whence=os.SEEK_CUR):
+        if whence == os.SEEK_CUR:
+            pos += self._filepos
+        elif whence == os.SEEK_END:
+            pos += self.size()
+        self._filepos = min(max(pos, 0L), self.size())
+
+    def tell(self):
+        return self._filepos
+
+    def size(self):
+        n = self.segments[-1]
+        return n[OFFSET] + n[BLOCKSIZE]
+
+    @ArvadosFileBase._before_close
+    @retry_method
+    def read(self, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at the current file position"""
+        if size == 0:
+            return ''
+
+        data = ''
+        available_chunks = locators_and_ranges(self.segments, self._filepos, size)
+        if available_chunks:
+            locator, blocksize, segmentoffset, segmentsize = available_chunks[0]
+            data = self._stream.readfrom(locator+segmentoffset, segmentsize,
+                                         num_retries=num_retries)
+
+        self._filepos += len(data)
+        return data
+
+    @ArvadosFileBase._before_close
+    @retry_method
+    def readfrom(self, start, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at 'start'"""
+        if size == 0:
+            return ''
+
+        data = []
+        for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self.segments, start, size):
+            data.append(self._stream.readfrom(locator+segmentoffset, segmentsize,
+                                              num_retries=num_retries))
+        return ''.join(data)
+
+    @ArvadosFileBase._before_close
+    @retry_method
+    def readall(self, size=2**20, num_retries=None):
+        while True:
+            data = self.read(size, num_retries=num_retries)
+            if data == '':
+                break
+            yield data
+
+    @ArvadosFileBase._before_close
+    @retry_method
+    def readline(self, size=float('inf'), num_retries=None):
+        cache_pos, cache_data = self._readline_cache
+        if self.tell() == cache_pos:
+            data = [cache_data]
+        else:
+            data = ['']
+        data_size = len(data[-1])
+        while (data_size < size) and ('\n' not in data[-1]):
+            next_read = self.read(2 ** 20, num_retries=num_retries)
+            if not next_read:
+                break
+            data.append(next_read)
+            data_size += len(next_read)
+        data = ''.join(data)
+        try:
+            nextline_index = data.index('\n') + 1
+        except ValueError:
+            nextline_index = len(data)
+        nextline_index = min(nextline_index, size)
+        self._readline_cache = (self.tell(), data[nextline_index:])
+        return data[:nextline_index]
+
+    @ArvadosFileBase._before_close
+    @retry_method
+    def decompress(self, decompress, size, num_retries=None):
+        for segment in self.readall(size, num_retries):
+            data = decompress(segment)
+            if data:
+                yield data
+
+    @ArvadosFileBase._before_close
+    @retry_method
+    def readall_decompressed(self, size=2**20, num_retries=None):
+        self.seek(0)
+        if self.name.endswith('.bz2'):
+            dc = bz2.BZ2Decompressor()
+            return self.decompress(dc.decompress, size,
+                                   num_retries=num_retries)
+        elif self.name.endswith('.gz'):
+            dc = zlib.decompressobj(16+zlib.MAX_WBITS)
+            return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),
+                                   size, num_retries=num_retries)
+        else:
+            return self.readall(size, num_retries=num_retries)
+
+    @ArvadosFileBase._before_close
+    @retry_method
+    def readlines(self, sizehint=float('inf'), num_retries=None):
+        data = []
+        data_size = 0
+        for s in self.readall(num_retries=num_retries):
+            data.append(s)
+            data_size += len(s)
+            if data_size >= sizehint:
+                break
+        return ''.join(data).splitlines(True)
+
+    def as_manifest(self):
+        manifest_text = ['.']
+        manifest_text.extend([d[LOCATOR] for d in self._stream._data_locators])
+        manifest_text.extend(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], self.name().replace(' ', '\\040')) for seg in self.segments])
+        return arvados.CollectionReader(' '.join(manifest_text) + '\n').manifest_text(normalize=True)
+
+
+class StreamReader(object):
+    def __init__(self, tokens, keep=None, debug=False, _empty=False,
+                 num_retries=0):
+        self._stream_name = None
+        self._data_locators = []
+        self._files = collections.OrderedDict()
+        self._keep = keep
+        self.num_retries = num_retries
+
+        streamoffset = 0L
+
+        # parse stream
+        for tok in tokens:
+            if debug: print 'tok', tok
+            if self._stream_name is None:
+                self._stream_name = tok.replace('\\040', ' ')
+                continue
+
+            s = re.match(r'^[0-9a-f]{32}\+(\d+)(\+\S+)*$', tok)
+            if s:
+                blocksize = long(s.group(1))
+                self._data_locators.append([tok, blocksize, streamoffset])
+                streamoffset += blocksize
+                continue
+
+            s = re.search(r'^(\d+):(\d+):(\S+)', tok)
+            if s:
+                pos = long(s.group(1))
+                size = long(s.group(2))
+                name = s.group(3).replace('\\040', ' ')
+                if name not in self._files:
+                    self._files[name] = StreamFileReader(self, [[pos, size, 0]], name)
+                else:
+                    n = self._files[name]
+                    n.segments.append([pos, size, n.size()])
+                continue
+
+            raise errors.SyntaxError("Invalid manifest format")
+
+    def name(self):
+        return self._stream_name
+
+    def files(self):
+        return self._files
+
+    def all_files(self):
+        return self._files.values()
+
+    def size(self):
+        n = self._data_locators[-1]
+        return n[OFFSET] + n[BLOCKSIZE]
+
+    def locators_and_ranges(self, range_start, range_size):
+        return locators_and_ranges(self._data_locators, range_start, range_size)
+
+    @retry_method
+    def readfrom(self, start, size, num_retries=None):
+        """Read up to 'size' bytes from the stream, starting at 'start'"""
+        if size == 0:
+            return ''
+        if self._keep is None:
+            self._keep = KeepClient(num_retries=self.num_retries)
+        data = []
+        for locator, blocksize, segmentoffset, segmentsize in locators_and_ranges(self._data_locators, start, size):
+            data.append(self._keep.get(locator, num_retries=num_retries)[segmentoffset:segmentoffset+segmentsize])
+        return ''.join(data)
+
+    def manifest_text(self, strip=False):
+        manifest_text = [self.name().replace(' ', '\\040')]
+        if strip:
+            for d in self._data_locators:
+                m = re.match(r'^[0-9a-f]{32}\+\d+', d[LOCATOR])
+                manifest_text.append(m.group(0))
+        else:
+            manifest_text.extend([d[LOCATOR] for d in self._data_locators])
+        manifest_text.extend([' '.join(["{}:{}:{}".format(seg[LOCATOR], seg[BLOCKSIZE], f.name().replace(' ', '\\040'))
+                                        for seg in f.segments])
+                              for f in self._files.values()])
+        return ' '.join(manifest_text) + '\n'
diff --git a/sdk/python/arvados/timer.py b/sdk/python/arvados/timer.py
new file mode 100644 (file)
index 0000000..739d0d5
--- /dev/null
@@ -0,0 +1,16 @@
+import time
+
+class Timer(object):
+    def __init__(self, verbose=False):
+        self.verbose = verbose
+
+    def __enter__(self):
+        self.start = time.time()
+        return self
+
+    def __exit__(self, *args):
+        self.end = time.time()
+        self.secs = self.end - self.start
+        self.msecs = self.secs * 1000  # millisecs
+        if self.verbose:
+            print 'elapsed time: %f ms' % self.msecs
diff --git a/sdk/python/arvados/util.py b/sdk/python/arvados/util.py
new file mode 100644 (file)
index 0000000..79692ae
--- /dev/null
@@ -0,0 +1,371 @@
+import fcntl
+import hashlib
+import os
+import re
+import subprocess
+import errno
+import sys
+from arvados.collection import *
+
+HEX_RE = re.compile(r'^[0-9a-fA-F]+$')
+
+keep_locator_pattern = re.compile(r'[0-9a-f]{32}\+\d+(\+\S+)*')
+signed_locator_pattern = re.compile(r'[0-9a-f]{32}\+\d+(\+\S+)*\+A\S+(\+\S+)*')
+portable_data_hash_pattern = re.compile(r'[0-9a-f]{32}\+\d+')
+uuid_pattern = re.compile(r'[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}')
+collection_uuid_pattern = re.compile(r'[a-z0-9]{5}-4zz18-[a-z0-9]{15}')
+group_uuid_pattern = re.compile(r'[a-z0-9]{5}-j7d0g-[a-z0-9]{15}')
+user_uuid_pattern = re.compile(r'[a-z0-9]{5}-tpzed-[a-z0-9]{15}')
+link_uuid_pattern = re.compile(r'[a-z0-9]{5}-o0j2j-[a-z0-9]{15}')
+manifest_pattern = re.compile(r'((\S+)( +[a-f0-9]{32}(\+\d+)(\+\S+)*)+( +\d+:\d+:\S+)+$)+', flags=re.MULTILINE)
+
+def clear_tmpdir(path=None):
+    """
+    Ensure the given directory (or TASK_TMPDIR if none given)
+    exists and is empty.
+    """
+    if path is None:
+        path = arvados.current_task().tmpdir
+    if os.path.exists(path):
+        p = subprocess.Popen(['rm', '-rf', path])
+        stdout, stderr = p.communicate(None)
+        if p.returncode != 0:
+            raise Exception('rm -rf %s: %s' % (path, stderr))
+    os.mkdir(path)
+
+def run_command(execargs, **kwargs):
+    kwargs.setdefault('stdin', subprocess.PIPE)
+    kwargs.setdefault('stdout', subprocess.PIPE)
+    kwargs.setdefault('stderr', sys.stderr)
+    kwargs.setdefault('close_fds', True)
+    kwargs.setdefault('shell', False)
+    p = subprocess.Popen(execargs, **kwargs)
+    stdoutdata, stderrdata = p.communicate(None)
+    if p.returncode != 0:
+        raise errors.CommandFailedError(
+            "run_command %s exit %d:\n%s" %
+            (execargs, p.returncode, stderrdata))
+    return stdoutdata, stderrdata
+
+def git_checkout(url, version, path):
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    if not os.path.exists(path):
+        run_command(["git", "clone", url, path],
+                    cwd=os.path.dirname(path))
+    run_command(["git", "checkout", version],
+                cwd=path)
+    return path
+
+def tar_extractor(path, decompress_flag):
+    return subprocess.Popen(["tar",
+                             "-C", path,
+                             ("-x%sf" % decompress_flag),
+                             "-"],
+                            stdout=None,
+                            stdin=subprocess.PIPE, stderr=sys.stderr,
+                            shell=False, close_fds=True)
+
+def tarball_extract(tarball, path):
+    """Retrieve a tarball from Keep and extract it to a local
+    directory.  Return the absolute path where the tarball was
+    extracted. If the top level of the tarball contained just one
+    file or directory, return the absolute path of that single
+    item.
+
+    tarball -- collection locator
+    path -- where to extract the tarball: absolute, or relative to job tmp
+    """
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+    already_have_it = False
+    try:
+        if os.readlink(os.path.join(path, '.locator')) == tarball:
+            already_have_it = True
+    except OSError:
+        pass
+    if not already_have_it:
+
+        # emulate "rm -f" (i.e., if the file does not exist, we win)
+        try:
+            os.unlink(os.path.join(path, '.locator'))
+        except OSError:
+            if os.path.exists(os.path.join(path, '.locator')):
+                os.unlink(os.path.join(path, '.locator'))
+
+        for f in CollectionReader(tarball).all_files():
+            if re.search('\.(tbz|tar.bz2)$', f.name()):
+                p = tar_extractor(path, 'j')
+            elif re.search('\.(tgz|tar.gz)$', f.name()):
+                p = tar_extractor(path, 'z')
+            elif re.search('\.tar$', f.name()):
+                p = tar_extractor(path, '')
+            else:
+                raise errors.AssertionError(
+                    "tarball_extract cannot handle filename %s" % f.name())
+            while True:
+                buf = f.read(2**20)
+                if len(buf) == 0:
+                    break
+                p.stdin.write(buf)
+            p.stdin.close()
+            p.wait()
+            if p.returncode != 0:
+                lockfile.close()
+                raise errors.CommandFailedError(
+                    "tar exited %d" % p.returncode)
+        os.symlink(tarball, os.path.join(path, '.locator'))
+    tld_extracts = filter(lambda f: f != '.locator', os.listdir(path))
+    lockfile.close()
+    if len(tld_extracts) == 1:
+        return os.path.join(path, tld_extracts[0])
+    return path
+
+def zipball_extract(zipball, path):
+    """Retrieve a zip archive from Keep and extract it to a local
+    directory.  Return the absolute path where the archive was
+    extracted. If the top level of the archive contained just one
+    file or directory, return the absolute path of that single
+    item.
+
+    zipball -- collection locator
+    path -- where to extract the archive: absolute, or relative to job tmp
+    """
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+    already_have_it = False
+    try:
+        if os.readlink(os.path.join(path, '.locator')) == zipball:
+            already_have_it = True
+    except OSError:
+        pass
+    if not already_have_it:
+
+        # emulate "rm -f" (i.e., if the file does not exist, we win)
+        try:
+            os.unlink(os.path.join(path, '.locator'))
+        except OSError:
+            if os.path.exists(os.path.join(path, '.locator')):
+                os.unlink(os.path.join(path, '.locator'))
+
+        for f in CollectionReader(zipball).all_files():
+            if not re.search('\.zip$', f.name()):
+                raise errors.NotImplementedError(
+                    "zipball_extract cannot handle filename %s" % f.name())
+            zip_filename = os.path.join(path, os.path.basename(f.name()))
+            zip_file = open(zip_filename, 'wb')
+            while True:
+                buf = f.read(2**20)
+                if len(buf) == 0:
+                    break
+                zip_file.write(buf)
+            zip_file.close()
+
+            p = subprocess.Popen(["unzip",
+                                  "-q", "-o",
+                                  "-d", path,
+                                  zip_filename],
+                                 stdout=None,
+                                 stdin=None, stderr=sys.stderr,
+                                 shell=False, close_fds=True)
+            p.wait()
+            if p.returncode != 0:
+                lockfile.close()
+                raise errors.CommandFailedError(
+                    "unzip exited %d" % p.returncode)
+            os.unlink(zip_filename)
+        os.symlink(zipball, os.path.join(path, '.locator'))
+    tld_extracts = filter(lambda f: f != '.locator', os.listdir(path))
+    lockfile.close()
+    if len(tld_extracts) == 1:
+        return os.path.join(path, tld_extracts[0])
+    return path
+
+def collection_extract(collection, path, files=[], decompress=True):
+    """Retrieve a collection from Keep and extract it to a local
+    directory.  Return the absolute path where the collection was
+    extracted.
+
+    collection -- collection locator
+    path -- where to extract: absolute, or relative to job tmp
+    """
+    matches = re.search(r'^([0-9a-f]+)(\+[\w@]+)*$', collection)
+    if matches:
+        collection_hash = matches.group(1)
+    else:
+        collection_hash = hashlib.md5(collection).hexdigest()
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+    already_have_it = False
+    try:
+        if os.readlink(os.path.join(path, '.locator')) == collection_hash:
+            already_have_it = True
+    except OSError:
+        pass
+
+    # emulate "rm -f" (i.e., if the file does not exist, we win)
+    try:
+        os.unlink(os.path.join(path, '.locator'))
+    except OSError:
+        if os.path.exists(os.path.join(path, '.locator')):
+            os.unlink(os.path.join(path, '.locator'))
+
+    files_got = []
+    for s in CollectionReader(collection).all_streams():
+        stream_name = s.name()
+        for f in s.all_files():
+            if (files == [] or
+                ((f.name() not in files_got) and
+                 (f.name() in files or
+                  (decompress and f.decompressed_name() in files)))):
+                outname = f.decompressed_name() if decompress else f.name()
+                files_got += [outname]
+                if os.path.exists(os.path.join(path, stream_name, outname)):
+                    continue
+                mkdir_dash_p(os.path.dirname(os.path.join(path, stream_name, outname)))
+                outfile = open(os.path.join(path, stream_name, outname), 'wb')
+                for buf in (f.readall_decompressed() if decompress
+                            else f.readall()):
+                    outfile.write(buf)
+                outfile.close()
+    if len(files_got) < len(files):
+        raise errors.AssertionError(
+            "Wanted files %s but only got %s from %s" %
+            (files, files_got,
+             [z.name() for z in CollectionReader(collection).all_files()]))
+    os.symlink(collection_hash, os.path.join(path, '.locator'))
+
+    lockfile.close()
+    return path
+
+def mkdir_dash_p(path):
+    if not os.path.isdir(path):
+        try:
+            os.makedirs(path)
+        except OSError as e:
+            if e.errno == errno.EEXIST and os.path.isdir(path):
+                # It is not an error if someone else creates the
+                # directory between our exists() and makedirs() calls.
+                pass
+            else:
+                raise
+
+def stream_extract(stream, path, files=[], decompress=True):
+    """Retrieve a stream from Keep and extract it to a local
+    directory.  Return the absolute path where the stream was
+    extracted.
+
+    stream -- StreamReader object
+    path -- where to extract: absolute, or relative to job tmp
+    """
+    if not re.search('^/', path):
+        path = os.path.join(arvados.current_job().tmpdir, path)
+    lockfile = open(path + '.lock', 'w')
+    fcntl.flock(lockfile, fcntl.LOCK_EX)
+    try:
+        os.stat(path)
+    except OSError:
+        os.mkdir(path)
+
+    files_got = []
+    for f in stream.all_files():
+        if (files == [] or
+            ((f.name() not in files_got) and
+             (f.name() in files or
+              (decompress and f.decompressed_name() in files)))):
+            outname = f.decompressed_name() if decompress else f.name()
+            files_got += [outname]
+            if os.path.exists(os.path.join(path, outname)):
+                os.unlink(os.path.join(path, outname))
+            mkdir_dash_p(os.path.dirname(os.path.join(path, outname)))
+            outfile = open(os.path.join(path, outname), 'wb')
+            for buf in (f.readall_decompressed() if decompress
+                        else f.readall()):
+                outfile.write(buf)
+            outfile.close()
+    if len(files_got) < len(files):
+        raise errors.AssertionError(
+            "Wanted files %s but only got %s from %s" %
+            (files, files_got, [z.name() for z in stream.all_files()]))
+    lockfile.close()
+    return path
+
+def listdir_recursive(dirname, base=None, max_depth=None):
+    """listdir_recursive(dirname, base, max_depth)
+
+    Return a list of file and directory names found under dirname.
+
+    If base is not None, prepend "{base}/" to each returned name.
+
+    If max_depth is None, descend into directories and return only the
+    names of files found in the directory tree.
+
+    If max_depth is a non-negative integer, stop descending into
+    directories at the given depth, and at that point return directory
+    names instead.
+
+    If max_depth==0 (and base is None) this is equivalent to
+    sorted(os.listdir(dirname)).
+    """
+    allfiles = []
+    for ent in sorted(os.listdir(dirname)):
+        ent_path = os.path.join(dirname, ent)
+        ent_base = os.path.join(base, ent) if base else ent
+        if os.path.isdir(ent_path) and max_depth != 0:
+            allfiles += listdir_recursive(
+                ent_path, base=ent_base,
+                max_depth=(max_depth-1 if max_depth else None))
+        else:
+            allfiles += [ent_base]
+    return allfiles
+
+def is_hex(s, *length_args):
+    """is_hex(s[, length[, max_length]]) -> boolean
+
+    Return True if s is a string of hexadecimal digits.
+    If one length argument is given, the string must contain exactly
+    that number of digits.
+    If two length arguments are given, the string must contain a number of
+    digits between those two lengths, inclusive.
+    Return False otherwise.
+    """
+    num_length_args = len(length_args)
+    if num_length_args > 2:
+        raise errors.ArgumentError("is_hex accepts up to 3 arguments ({} given)"
+                                   .format(1 + num_length_args))
+    elif num_length_args == 2:
+        good_len = (length_args[0] <= len(s) <= length_args[1])
+    elif num_length_args == 1:
+        good_len = (len(s) == length_args[0])
+    else:
+        good_len = True
+    return bool(good_len and HEX_RE.match(s))
+
+def list_all(fn, num_retries=0, **kwargs):
+    items = []
+    offset = 0
+    items_available = sys.maxint
+    while len(items) < items_available:
+        c = fn(offset=offset, **kwargs).execute(num_retries=num_retries)
+        items += c['items']
+        items_available = c['items_available']
+        offset = c['offset'] + len(c['items'])
+    return items
diff --git a/sdk/python/bin/arv-copy b/sdk/python/bin/arv-copy
new file mode 100755 (executable)
index 0000000..4ee08de
--- /dev/null
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+
+from arvados.commands.arv_copy import main
+main()
diff --git a/sdk/python/bin/arv-get b/sdk/python/bin/arv-get
new file mode 100755 (executable)
index 0000000..2451416
--- /dev/null
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+
+import argparse
+import hashlib
+import os
+import re
+import string
+import sys
+import logging
+
+import arvados
+import arvados.commands._util as arv_cmd
+
+logger = logging.getLogger('arvados.arv-get')
+
+def abort(msg, code=1):
+    print >>sys.stderr, "arv-get:", msg
+    exit(code)
+
+parser = argparse.ArgumentParser(
+    description='Copy data from Keep to a local file or pipe.',
+    parents=[arv_cmd.retry_opt])
+parser.add_argument('locator', type=str,
+                    help="""
+Collection locator, optionally with a file path or prefix.
+""")
+parser.add_argument('destination', type=str, nargs='?', default='/dev/stdout',
+                    help="""
+Local file or directory where the data is to be written. Default:
+/dev/stdout.
+""")
+group = parser.add_mutually_exclusive_group()
+group.add_argument('--progress', action='store_true',
+                   help="""
+Display human-readable progress on stderr (bytes and, if possible,
+percentage of total data size). This is the default behavior when it
+is not expected to interfere with the output: specifically, stderr is
+a tty _and_ either stdout is not a tty, or output is being written to
+named files rather than stdout.
+""")
+group.add_argument('--no-progress', action='store_true',
+                   help="""
+Do not display human-readable progress on stderr.
+""")
+group.add_argument('--batch-progress', action='store_true',
+                   help="""
+Display machine-readable progress on stderr (bytes and, if known,
+total data size).
+""")
+group = parser.add_mutually_exclusive_group()
+group.add_argument('--hash',
+                    help="""
+Display the hash of each file as it is read from Keep, using the given
+hash algorithm. Supported algorithms include md5, sha1, sha224,
+sha256, sha384, and sha512.
+""")
+group.add_argument('--md5sum', action='store_const',
+                    dest='hash', const='md5',
+                    help="""
+Display the MD5 hash of each file as it is read from Keep.
+""")
+parser.add_argument('-n', action='store_true',
+                    help="""
+Do not write any data -- just read from Keep, and report md5sums if
+requested.
+""")
+parser.add_argument('-r', action='store_true',
+                    help="""
+Retrieve all files in the specified collection/prefix. This is the
+default behavior if the "locator" argument ends with a forward slash.
+""")
+group = parser.add_mutually_exclusive_group()
+group.add_argument('-f', action='store_true',
+                   help="""
+Overwrite existing files while writing. The default behavior is to
+refuse to write *anything* if any of the output files already
+exist. As a special case, -f is not needed to write to /dev/stdout.
+""")
+group.add_argument('--skip-existing', action='store_true',
+                   help="""
+Skip files that already exist. The default behavior is to refuse to
+write *anything* if any files exist that would have to be
+overwritten. This option causes even devices, sockets, and fifos to be
+skipped.
+""")
+
+args = parser.parse_args()
+
+if args.locator[-1] == os.sep:
+    args.r = True
+if (args.r and
+    not args.n and
+    not (args.destination and
+         os.path.isdir(args.destination))):
+    parser.error('Destination is not a directory.')
+if not args.r and (os.path.isdir(args.destination) or
+                   args.destination[-1] == os.path.sep):
+    args.destination = os.path.join(args.destination,
+                                    os.path.basename(args.locator))
+    logger.debug("Appended source file name to destination directory: %s",
+                 args.destination)
+
+if args.destination == '-':
+    args.destination = '/dev/stdout'
+if args.destination == '/dev/stdout':
+    # Normally you have to use -f to write to a file (or device) that
+    # already exists, but "-" and "/dev/stdout" are common enough to
+    # merit a special exception.
+    args.f = True
+else:
+    args.destination = args.destination.rstrip(os.sep)
+
+# Turn on --progress by default if stderr is a tty and output is
+# either going to a named file, or going (via stdout) to something
+# that isn't a tty.
+if (not (args.batch_progress or args.no_progress)
+    and sys.stderr.isatty()
+    and (args.destination != '/dev/stdout'
+         or not sys.stdout.isatty())):
+    args.progress = True
+
+
+r = re.search(r'^(.*?)(/.*)?$', args.locator)
+collection = r.group(1)
+get_prefix = r.group(2)
+if args.r and not get_prefix:
+    get_prefix = os.sep
+api_client = arvados.api('v1')
+reader = arvados.CollectionReader(collection, num_retries=args.retries)
+
+if not get_prefix:
+    if not args.n:
+        open_flags = os.O_CREAT | os.O_WRONLY
+        if not args.f:
+            open_flags |= os.O_EXCL
+        try:
+            out_fd = os.open(args.destination, open_flags)
+            with os.fdopen(out_fd, 'wb') as out_file:
+                out_file.write(reader.manifest_text())
+        except (IOError, OSError) as error:
+            abort("can't write to '{}': {}".format(args.destination, error))
+        except (arvados.errors.ApiError, arvados.errors.KeepReadError) as error:
+            abort("failed to download '{}': {}".format(collection, error))
+    sys.exit(0)
+
+reader.normalize()
+
+# Scan the collection. Make an array of (stream, file, local
+# destination filename) tuples, and add up total size to extract.
+todo = []
+todo_bytes = 0
+try:
+    for s in reader.all_streams():
+        for f in s.all_files():
+            if get_prefix and get_prefix[-1] == os.sep:
+                if 0 != string.find(os.path.join(s.name(), f.name()),
+                                    '.' + get_prefix):
+                    continue
+                dest_path = os.path.join(
+                    args.destination,
+                    os.path.join(s.name(), f.name())[len(get_prefix)+1:])
+                if (not (args.n or args.f or args.skip_existing) and
+                    os.path.exists(dest_path)):
+                    abort('Local file %s already exists.' % (dest_path,))
+            else:
+                if os.path.join(s.name(), f.name()) != '.' + get_prefix:
+                    continue
+                dest_path = args.destination
+            todo += [(s, f, dest_path)]
+            todo_bytes += f.size()
+except arvados.errors.NotFoundError as e:
+    abort(e)
+
+# Read data, and (if not -n) write to local file(s) or pipe.
+
+out_bytes = 0
+for s,f,outfilename in todo:
+    outfile = None
+    digestor = None
+    if not args.n:
+        if args.skip_existing and os.path.exists(outfilename):
+            logger.debug('Local file %s exists. Skipping.', outfilename)
+            continue
+        elif not args.f and (os.path.isfile(outfilename) or
+                           os.path.isdir(outfilename)):
+            # Good thing we looked again: apparently this file wasn't
+            # here yet when we checked earlier.
+            abort('Local file %s already exists.' % (outfilename,))
+        if args.r:
+            arvados.util.mkdir_dash_p(os.path.dirname(outfilename))
+        try:
+            outfile = open(outfilename, 'wb')
+        except Exception as error:
+            abort('Open(%s) failed: %s' % (outfilename, error))
+    if args.hash:
+        digestor = hashlib.new(args.hash)
+    try:
+        for data in f.readall():
+            if outfile:
+                outfile.write(data)
+            if digestor:
+                digestor.update(data)
+            out_bytes += len(data)
+            if args.progress:
+                sys.stderr.write('\r%d MiB / %d MiB %.1f%%' %
+                                 (out_bytes >> 20,
+                                  todo_bytes >> 20,
+                                  (100
+                                   if todo_bytes==0
+                                   else 100.0*out_bytes/todo_bytes)))
+            elif args.batch_progress:
+                sys.stderr.write('%s %d read %d total\n' %
+                                 (sys.argv[0], os.getpid(),
+                                  out_bytes, todo_bytes))
+        if digestor:
+            sys.stderr.write("%s  %s/%s\n"
+                             % (digestor.hexdigest(), s.name(), f.name()))
+    except KeyboardInterrupt:
+        if outfile and outfilename != '/dev/stdout':
+            os.unlink(outfilename)
+        break
+
+if args.progress:
+    sys.stderr.write('\n')
diff --git a/sdk/python/bin/arv-keepdocker b/sdk/python/bin/arv-keepdocker
new file mode 100755 (executable)
index 0000000..20d9d62
--- /dev/null
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+
+from arvados.commands.keepdocker import main
+main()
diff --git a/sdk/python/bin/arv-ls b/sdk/python/bin/arv-ls
new file mode 100755 (executable)
index 0000000..23b99f2
--- /dev/null
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+import sys
+
+from arvados.commands.ls import main
+
+sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/sdk/python/bin/arv-normalize b/sdk/python/bin/arv-normalize
new file mode 100755 (executable)
index 0000000..b84910e
--- /dev/null
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+import argparse
+import hashlib
+import os
+import re
+import string
+import sys
+
+parser = argparse.ArgumentParser(
+    description='Read manifest on standard input and put normalized manifest on standard output.')
+
+parser.add_argument('--extract', type=str, help="The file to extract from the input manifest")
+parser.add_argument('--strip', action='store_true', help="Strip authorization tokens")
+
+args = parser.parse_args()
+
+import arvados
+
+r = sys.stdin.read()
+
+cr = arvados.CollectionReader(r)
+cr.normalize()
+
+if args.extract:
+    i = args.extract.rfind('/')
+    if i == -1:
+        stream = '.'
+        fn = args.extract
+    else:
+        stream = args.extract[:i]
+        fn = args.extract[(i+1):]
+    for s in cr.all_streams():
+        if s.name() == stream:
+            if fn in s.files():
+                sys.stdout.write(s.files()[fn].as_manifest())
+else:
+    sys.stdout.write(cr.manifest_text(args.strip))
diff --git a/sdk/python/bin/arv-put b/sdk/python/bin/arv-put
new file mode 100755 (executable)
index 0000000..cdb831b
--- /dev/null
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+
+from arvados.commands.put import main
+main()
diff --git a/sdk/python/bin/arv-run b/sdk/python/bin/arv-run
new file mode 100755 (executable)
index 0000000..41f5fd3
--- /dev/null
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+
+from arvados.commands.run import main
+main()
diff --git a/sdk/python/bin/arv-ws b/sdk/python/bin/arv-ws
new file mode 100755 (executable)
index 0000000..4e663ce
--- /dev/null
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+
+from arvados.commands.ws import main
+main()
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
new file mode 100644 (file)
index 0000000..754d89b
--- /dev/null
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+import os
+import subprocess
+import time
+
+from setuptools import setup, find_packages
+from setuptools.command.egg_info import egg_info
+
+SETUP_DIR = os.path.dirname(__file__)
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+class TagBuildWithCommit(egg_info):
+    """Tag the build with the sha1 and date of the last git commit.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def tags(self):
+        if self.tag_build is None:
+            git_tags = subprocess.check_output(
+                ['git', 'log', '--first-parent', '--max-count=1',
+                 '--format=format:%ct %h', SETUP_DIR]).split()
+            assert len(git_tags) == 2
+            git_tags[0] = time.strftime(
+                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
+            self.tag_build = '.{}+{}'.format(*git_tags)
+        return egg_info.tags(self)
+
+
+setup(name='arvados-python-client',
+      version='0.1',
+      description='Arvados client library',
+      long_description=open(README).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      download_url="https://github.com/curoverse/arvados.git",
+      license='Apache 2.0',
+      packages=find_packages(),
+      scripts=[
+        'bin/arv-copy',
+        'bin/arv-get',
+        'bin/arv-keepdocker',
+        'bin/arv-ls',
+        'bin/arv-normalize',
+        'bin/arv-put',
+        'bin/arv-run',
+        'bin/arv-ws'
+        ],
+      install_requires=[
+        'python-gflags',
+        'google-api-python-client',
+        'httplib2',
+        'requests>=2.4',
+        'urllib3',
+        'ws4py'
+        ],
+      test_suite='tests',
+      tests_require=['mock>=1.0', 'PyYAML'],
+      zip_safe=False,
+      cmdclass={'egg_info': TagBuildWithCommit},
+      )
diff --git a/sdk/python/tests/__init__.py b/sdk/python/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sdk/python/tests/arvados_testutil.py b/sdk/python/tests/arvados_testutil.py
new file mode 100644 (file)
index 0000000..04ca6b5
--- /dev/null
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+import errno
+import hashlib
+import httplib
+import httplib2
+import io
+import mock
+import os
+import requests
+import shutil
+import tempfile
+import unittest
+
+# Use this hostname when you want to make sure the traffic will be
+# instantly refused.  100::/64 is a dedicated black hole.
+TEST_HOST = '100::'
+
+skip_sleep = mock.patch('time.sleep', lambda n: None)  # clown'll eat me
+
+# fake_httplib2_response and mock_responses
+# mock calls to httplib2.Http.request()
+def fake_httplib2_response(code, **headers):
+    headers.update(status=str(code),
+                   reason=httplib.responses.get(code, "Unknown Response"))
+    return httplib2.Response(headers)
+
+def mock_responses(body, *codes, **headers):
+    return mock.patch('httplib2.Http.request', side_effect=(
+            (fake_httplib2_response(code, **headers), body) for code in codes))
+
+# fake_requests_response, mock_get_responses and mock_put_responses
+# mock calls to requests.get() and requests.put()
+def fake_requests_response(code, body, **headers):
+    r = requests.Response()
+    r.status_code = code
+    r.reason = httplib.responses.get(code, "Unknown Response")
+    r.headers = headers
+    r.raw = io.BytesIO(body)
+    return r
+
+def mock_get_responses(body, *codes, **headers):
+    return mock.patch('requests.get', side_effect=(
+        fake_requests_response(code, body, **headers) for code in codes))
+
+def mock_put_responses(body, *codes, **headers):
+    return mock.patch('requests.put', side_effect=(
+        fake_requests_response(code, body, **headers) for code in codes))
+
+def mock_requestslib_responses(method, body, *codes, **headers):
+    return mock.patch(method, side_effect=(
+        fake_requests_response(code, body, **headers) for code in codes))
+
+class MockStreamReader(object):
+    def __init__(self, name='.', *data):
+        self._name = name
+        self._data = ''.join(data)
+        self._data_locators = ['{}+{}'.format(hashlib.md5(d).hexdigest(),
+                                              len(d)) for d in data]
+        self.num_retries = 0
+
+    def name(self):
+        return self._name
+
+    def readfrom(self, start, size, num_retries=None):
+        return self._data[start:start + size]
+
+
+class ArvadosBaseTestCase(unittest.TestCase):
+    # This class provides common utility functions for our tests.
+
+    def setUp(self):
+        self._tempdirs = []
+
+    def tearDown(self):
+        for workdir in self._tempdirs:
+            shutil.rmtree(workdir, ignore_errors=True)
+
+    def make_tmpdir(self):
+        self._tempdirs.append(tempfile.mkdtemp())
+        return self._tempdirs[-1]
+
+    def data_file(self, filename):
+        try:
+            basedir = os.path.dirname(__file__)
+        except NameError:
+            basedir = '.'
+        return open(os.path.join(basedir, 'data', filename))
+
+    def build_directory_tree(self, tree):
+        tree_root = self.make_tmpdir()
+        for leaf in tree:
+            path = os.path.join(tree_root, leaf)
+            try:
+                os.makedirs(os.path.dirname(path))
+            except OSError as error:
+                if error.errno != errno.EEXIST:
+                    raise
+            with open(path, 'w') as tmpfile:
+                tmpfile.write(leaf)
+        return tree_root
+
+    def make_test_file(self, text="test"):
+        testfile = tempfile.NamedTemporaryFile()
+        testfile.write(text)
+        testfile.flush()
+        return testfile
diff --git a/sdk/python/tests/data/1000G_ref_manifest b/sdk/python/tests/data/1000G_ref_manifest
new file mode 100644 (file)
index 0000000..d0fe113
--- /dev/null
@@ -0,0 +1 @@
+. 231e69ef8840dcdb883b934a008f0eeb+67108864+K@qr1hi e14a59b578206d2d32dd858715645e0b+67108864+K@qr1hi 27a4b87e4cf1f85dc3fd917d2c388641+67108864+K@qr1hi 06b3ff80cf45bda52aca0711059a0bd6+67108864+K@qr1hi b036f1120ca429d0a148a5e8312663d9+67108864+K@qr1hi 83dc6b43bf27ce28da50967cd7dd23c3+67108864+K@qr1hi 3f6a4512b125bca64e1fa3d82f1e638d+67108864+K@qr1hi c0a8af66954841dae178e9417a82b710+67108864+K@qr1hi b3b4fb7120fae8b8f804849e36de9b55+67108864+K@qr1hi 2323ea3c93cc9664f35b6f90493ad01e+67108864+K@qr1hi e0b0f131d6f4669d0eaafbd4d72e0268+67108864+K@qr1hi 4274ff53c12dd5821c9ff6b12c4678f2+67108864+K@qr1hi 5d7af6348037a8161b1f932edcf32fae+67108864+K@qr1hi b7d88946691cc0d0f3c22dc3619b2ef3+67108864+K@qr1hi 799dd7f25556ad3a90604e094b538149+67108864+K@qr1hi 41b4d1c38afbcc48c0d463ad37adbdb7+67108864+K@qr1hi 6adcba8494cb0a6f6563e4c92a5c002a+67108864+K@qr1hi d0b7417a3872a5889cdc66daff4da326+67108864+K@qr1hi 7a5aeeb69132524c3e35cf454683fabe+67108864+K@qr1hi 9b09a732903086533e58e1acefc5df1f+67108864+K@qr1hi cdd07f6573f9e1239ef83ba8a02d6bf4+67108864+K@qr1hi 3d802b3e5b532210397b6992d9d9caff+67108864+K@qr1hi 5ce57221bdb69beb1376479a00dc839c+67108864+K@qr1hi 9173b38ec40c457fa3bd36ef89e562fc+67108864+K@qr1hi 05c712dee07f2115b657bb83d18f77dc+67108864+K@qr1hi 5764176e6aa0e8dd1195eb37c10ff921+67108864+K@qr1hi ce5d4f465c761cdfce6a7075e75e4c8d+67108864+K@qr1hi 2f594f1a5028e5954b14aba3cc7edc5d+67108864+K@qr1hi 9cb165ce899a80dc0aba79290d054f2e+67108864+K@qr1hi 9c5c8f5ad6dd0a23dbdd7bae47d9b77c+67108864+K@qr1hi b7afd688ca053e6cdd44f7c7add74c88+67108864+K@qr1hi 0b6c92b166993dbf4ebe65b130a18531+67108864+K@qr1hi 9c0e13bc1825573446cdb2e0d2a13057+67108864+K@qr1hi a64e372054ceb89494e7da42e1869a19+67108864+K@qr1hi 04b9acc199d01058c413c0b61474fa42+67108864+K@qr1hi 4d3f26dd05c50bbc5dac61ffc45f4a36+67108864+K@qr1hi f7f4400a463b1950a9422d3d798ae4e4+67108864+K@qr1hi 88814bec594bc5207ed70b52da08c964+67108864+K@qr1hi e645deb6e2cb633d175636327432b547+67108864+K@qr1hi 82249761efba1a759a94533a0f0225ee+67108864+K@qr1hi ff981edca6999dddb04e2b3528be376a+67108864+K@qr1hi d6e5f14509a26363492aab3775b070a7+67108864+K@qr1hi 20395032fda158a8fddd68281559b706+67108864+K@qr1hi f8dce2bf8d30bfa2306715f355e14702+67108864+K@qr1hi 937e9736f130276bd99c5a3cfd419f01+67108864+K@qr1hi c5ce8d646b2e1f8ceda6a88bdaa42354+67108864+K@qr1hi e82f42c6458660ba3e4aba0bf9f2fa83+67108864+K@qr1hi d2940d4c7155a94dfb31caf6951d9a59+67108864+K@qr1hi 165ffca3eae18d12aebfc395f4565547+67108864+K@qr1hi 8500ebaa55b2b377414c1d627e9a1982+67108864+K@qr1hi 594501b0e794179d7a04170d668358a8+67108864+K@qr1hi cd353db66ea7bba1ce4aa9fcd78fbf47+67108864+K@qr1hi 63b0e03e3a11c00f2b36de1384325756+67108864+K@qr1hi ac811ad87a8a5bc4e23130d2cc8ab588+67108864+K@qr1hi bb2c282f29b1ff680e1ecf82ba2902a9+67108864+K@qr1hi 5d3dbfa93769c1cba4406e616cc15d92+67108864+K@qr1hi 4f70517fc34def9e912ca98c127e8568+67108864+K@qr1hi 7da6aeb8a23b001ef8324ac28996f79d+67108864+K@qr1hi bcaba7f5eb10668c62c366527c5b7225+67108864+K@qr1hi 6fe50b949e8ace7e7bbe4a443b15cd89+67108864+K@qr1hi 7f104154192f4f15ef6760cd8ef836b4+67108864+K@qr1hi 9e528cc6e61ac04ab4806b1eca14473b+67108864+K@qr1hi b9af9ac127033b9639a32b4b4d21a033+67108864+K@qr1hi 724d08ad32a6de2dea78c357367f4de8+67108864+K@qr1hi 8afc63f0435da749a1a58544c290a792+67108864+K@qr1hi 7b6ea56cbf9bfce245fab717b02bc5c7+67108864+K@qr1hi c5903ce244515bb870dbad04acd7b482+67108864+K@qr1hi 491f4228b75e5d9af5c10eb4c7960d63+67108864+K@qr1hi 252bcf6db697723c6bcea11f601be4b0+67108864+K@qr1hi 81244819a14415d52b26a340d0b9a367+67108864+K@qr1hi 40336ab12de8a3c192dc4275c65e0b16+67108864+K@qr1hi 991ffe3960f9510b26352f5f925c23ba+67108864+K@qr1hi 0331ed4cfbc01ce07053691a31568c55+67108864+K@qr1hi b7d63f0ae6507c7f828c376ef2861e1c+67108864+K@qr1hi 548c0041481a1795913f62321944a81b+67108864+K@qr1hi b804bd2af77b7a3a32ac9512380d94e2+67108864+K@qr1hi a1702dad1c2354c9c85b02191800d5df+67108864+K@qr1hi 8e314c6cbda431e328e2de30ed0bacf0+67108864+K@qr1hi 8930eda1c1c868067fe4514c86cb0006+67108864+K@qr1hi 8ab6885d8e7a65fdff1a64fae78f83cb+67108864+K@qr1hi bb9ca8c63316097110e34d49f9b1a551+67108864+K@qr1hi 280e1ceb75a5db7a72d23b0705e2de94+67108864+K@qr1hi 47172b0b71e03d88fe560c68c3ca5b13+67108864+K@qr1hi 3f985d99b0929ab8869c2d6ee78ebe06+67108864+K@qr1hi f99ce1f7543bf5d6b92bbf1d74bf341b+67108864+K@qr1hi 2d1073b508cde7fdb47d3e0045f12ba1+67108864+K@qr1hi 66d40bfdb5d04f954745e47bd958c959+67108864+K@qr1hi 423753509d9fb12a3b99eb463a8e6441+67108864+K@qr1hi 5d7676bf15ff2c73c84da0d6a1b21ec9+67108864+K@qr1hi 17dd674093f3ec22acb79db4d3c958e8+67108864+K@qr1hi 792f960e48650f1ff4397400d61e4868+67108864+K@qr1hi e44a4987cc0df77331131a4b7ee5408e+67108864+K@qr1hi 1d5b68a793bde89b35afd0abfffa1e91+67108864+K@qr1hi 93474190338acbc799a96e095c1ce6e1+67108864+K@qr1hi 19ad3d61a0729bd779608cefcefd3f17+67108864+K@qr1hi 4effea732567c4def12cb91616fcadab+67108864+K@qr1hi 00313b354ef436c75e89b7d9abb83d4f+67108864+K@qr1hi df5e63c2a4f060d462436f512c54858a+67108864+K@qr1hi ce8b016b1c46ebc3cf3872b26a799ec1+67108864+K@qr1hi 87163cb8078652223da89aa3d2f902f3+67108864+K@qr1hi f85e64b456e64bbb679dfaa2c462a033+67108864+K@qr1hi de8050929d7d89303e809d8f27c422dc+67108864+K@qr1hi adf643ec5085bf757fcc59a47813288a+67108864+K@qr1hi 7d2b01ba0f9be07644927cca7c459e43+67108864+K@qr1hi a5dc9a1d620b49be2da2098bef54ab18+67108864+K@qr1hi d24da6679ba48e4cbe29af40b5f36968+67108864+K@qr1hi 7d9f73079ff9aeeda056e21e64cddadc+67108864+K@qr1hi 47e45f31917f41385dd79c9316e1cefc+67108864+K@qr1hi a8f9a8ca17809b7c2f64aba9a0c13b1f+67108864+K@qr1hi d8d534661a3af310b085323d496f6d44+67108864+K@qr1hi 899756d962d283156f681a461c0588ce+67108864+K@qr1hi 0f28d102f6e05dca547d5a019fe85b61+67108864+K@qr1hi ddd31b0c369f60219e0a65a071772018+67108864+K@qr1hi 7ff925b478d6a51806982f1a32ecdb5a+67108864+K@qr1hi b7afe2d94145776bc7fecb7a8385527d+67108864+K@qr1hi c77b5ee8577c6c89533114803b5efc9b+67108864+K@qr1hi 884661029ffb441e16ca87a248bcf394+67108864+K@qr1hi cd22c4e3fd707f54818545004da24a1c+67108864+K@qr1hi a90b6f132d31540d6a1c9a5e426997f7+67108864+K@qr1hi c9f699cdb3ab822dd0bd9c345926c86d+67108864+K@qr1hi 4b0abce1b72d97eebb652fb438c1b3be+67108864+K@qr1hi d4568db71967719cda57374a9963a6dc+67108864+K@qr1hi cfb15250fb5eb9ac69a18f64eaeb3e31+67108864+K@qr1hi 4d3ac3e4c5a278b91cda0d890db84e3a+67108864+K@qr1hi 40b137b57f8d83e4d508a1a4b95ac134+67108864+K@qr1hi c86b47517a22f1303bb519a7a244e57d+67108864+K@qr1hi 712681dd526dd49add64af4f168254d0+67108864+K@qr1hi b5abb03c2b4fbd01e8997f8c755bf347+67108864+K@qr1hi 50245663fc28f000c536a9b85ae05d52+67108864+K@qr1hi 89e2b1ece4b8a34700a48bd5625c1ddc+67108864+K@qr1hi a06799afa99163b7ba2471f4bc2da4a2+67108864+K@qr1hi 4cecb752f0015ca1b70a95c1b4497c7b+67108864+K@qr1hi e0b110c9e392c0126787a7b1635f4923+67108864+K@qr1hi f32788adf3c77d67b09abc43f2922012+67108864+K@qr1hi fb6e48fa0feb6ee3d6356beb765a98a4+67108864+K@qr1hi 52a464c2209f6e262dc72e485b5df16c+67108864+K@qr1hi 50ed10e5d3dfedbd18f1fbff62048487+67108864+K@qr1hi b4d649a55f537c628bc337729857a2cd+67108864+K@qr1hi d8d5fa84e7199021ed7f3daa9e1d3c6a+67108864+K@qr1hi 7ea980aec5a6d9bc7ae8353b34145daa+67108864+K@qr1hi f9b989d982bbc5b3587a2a06df2f7bdc+67108864+K@qr1hi 27c8ba6cc8208fa1c220657768e8780e+67108864+K@qr1hi 3ec1187853bc27c62ecc02c7a27f0587+67108864+K@qr1hi 9b34e5a81de59c416255d6e23de498fc+67108864+K@qr1hi 469f6398ee90d5dd398c21ad06b05fe3+67108864+K@qr1hi d0969eae2340c1e1d34d311cc4815fde+67108864+K@qr1hi 2e3e6b8823e39e08d5e697adc2120339+67108864+K@qr1hi 224fc36688f7adb2ec5cc088b09e8463+42902639+K@qr1hi 0:51549666:1000G_omni2.5.b37.vcf.gz 51549666:95:1000G_omni2.5.b37.vcf.gz.md5 51549761:475087:1000G_omni2.5.b37.vcf.idx.gz 52024848:99:1000G_omni2.5.b37.vcf.idx.gz.md5 52024947:45036197:1000G_phase1.indels.b37.vcf.gz 97061144:101:1000G_phase1.indels.b37.vcf.gz.md5 97061245:333605:1000G_phase1.indels.b37.vcf.idx.gz 97394850:105:1000G_phase1.indels.b37.vcf.idx.gz.md5 97394955:550102132:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz 647497087:124:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz.md5 647497211:3555843:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz 651053054:128:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz.md5 651053182:19868212:Mills_and_1000G_gold_standard.indels.b37.vcf.gz 670921394:118:Mills_and_1000G_gold_standard.indels.b37.vcf.gz.md5 670921512:547962:Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz 671469474:122:Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz.md5 671469596:29993649:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz 701463245:128:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz.md5 701463373:578447:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz 702041820:132:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz.md5 702041952:38839441:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz 740881393:122:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz.md5 740881515:605289:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz 741486804:126:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz.md5 741486930:6040047539:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam 6781534469:113749:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz 6781648218:124:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz.md5 6781648342:117:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.md5 6781648459:3928395:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz 6785576854:120:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz.md5 6785576974:66113:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz 6785643087:124:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz.md5 6785643211:282374229:dbsnp_137.b37.excluding_sites_after_129.vcf.gz 7068017440:117:dbsnp_137.b37.excluding_sites_after_129.vcf.gz.md5 7068017557:3824375:dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz 7071841932:121:dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz.md5 7071842053:1022107667:dbsnp_137.b37.vcf.gz 8093949720:91:dbsnp_137.b37.vcf.gz.md5 8093949811:3982568:dbsnp_137.b37.vcf.idx.gz 8097932379:95:dbsnp_137.b37.vcf.idx.gz.md5 8097932474:59819710:hapmap_3.3.b37.vcf.gz 8157752184:92:hapmap_3.3.b37.vcf.gz.md5 8157752276:1022297:hapmap_3.3.b37.vcf.idx.gz 8158774573:96:hapmap_3.3.b37.vcf.idx.gz.md5 8158774669:2597:human_g1k_v37.dict.gz 8158777266:92:human_g1k_v37.dict.gz.md5 8158777358:1044:human_g1k_v37.fasta.fai.gz 8158778402:97:human_g1k_v37.fasta.fai.gz.md5 8158778499:869925027:human_g1k_v37.fasta.gz 9028703526:93:human_g1k_v37.fasta.gz.md5 9028703619:85:human_g1k_v37.stats.gz 9028703704:93:human_g1k_v37.stats.gz.md5 9028703797:2689:human_g1k_v37_decoy.dict.gz 9028706486:98:human_g1k_v37_decoy.dict.gz.md5 9028706584:1095:human_g1k_v37_decoy.fasta.fai.gz 9028707679:103:human_g1k_v37_decoy.fasta.fai.gz.md5 9028707782:879197576:human_g1k_v37_decoy.fasta.gz 9907905358:99:human_g1k_v37_decoy.fasta.gz.md5 9907905457:91:human_g1k_v37_decoy.stats.gz 9907905548:99:human_g1k_v37_decoy.stats.gz.md5
diff --git a/sdk/python/tests/data/jlake_manifest b/sdk/python/tests/data/jlake_manifest
new file mode 100644 (file)
index 0000000..5da24dd
--- /dev/null
@@ -0,0 +1,20 @@
+./PG0002577-DNA-jlake-germline ec9ae2be7620af8b6efd96809e5e75e0+1917833 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:1904161:PG0002577-DNA.pdf 1914405:76:PersonalGenome_Mac.command 1914481:704:PersonalGenome_Windows.bat 1915185:2648:md5sum.txt
+./PG0002577-DNA-jlake-germline/Assembly 42a43977e2303a4337129331577d122a+10244 0:6148:.DS_Store 6148:4096:._.DS_Store
+./PG0002577-DNA-jlake-germline/Assembly/conf 95747f01cc916acc1f385ea2d018841a+165976 0:61440:dirs.tar 61440:11245:project.conf 72685:61440:project.dirs.tar 134125:31851:run.conf.xml
+./PG0002577-DNA-jlake-germline/Assembly/genome 6762e2e508fc8eb451f7ec0ea0faee33+10244 0:6148:.DS_Store 6148:4096:._.DS_Store
+./PG0002577-DNA-jlake-germline/Assembly/genome/bam 5a854b86da65e1ff3572f17e088633fe+67108864 9c6c77dd7511dada2e631a3b038b5fa1+67108864 54c6025d7dd4cd8ecccfe573499a2176+67108864 c9cfd86fe5df4b83c6ad8dddd901becc+67108864 c9d7be11850c860e2d5cb0c9e1a44915+67108864 3fb42e18373c8710aed8620fa81db3c1+67108864 f419d215df8195df08394865f719fbca+67108864 d263423f327da62e24867f22fc490706+67108864 40ef57add0dfbee66836365ae0fd7d06+67108864 0fea4b759d870ab6decbb632176fbeb5+67108864 c109c9e484cdafee14c53198c0d6b610+67108864 103f902681b7d40dcde1dce0b79b17c8+67108864 b6b93a2b846620c39e38302f1f7ffe10+67108864 e38be01100ab8d220f06b800b7cd62dd+67108864 f5812ce719898a4d2cb06eebcafdacc0+67108864 6f5fc3c25e8abeb9c079ef801d0f9450+67108864 d9ad4e7e17a79487817ae2ad8e534498+67108864 e5a15e06712a2090e74f78e678b490f6+67108864 69d57fee8eb8dd67906c9a7e9e3e5cac+67108864 68395295a64f0d0b75117a2400a4c1ee+67108864 497dbca0f9dd89eeb2e8dd8aed9796fa+67108864 d14e96347585f714ed527fb0e2e78be5+67108864 03bcad873029c5778e59dd4c4109f5d7+67108864 0bb736900002c0c2b2ab31a6f8009fdb+67108864 22509a92aca7813b2363661ab233772f+67108864 c23528b87c5581e6d6ad538ff007e3a8+67108864 784f055bea23e35078c9437eaa233e0e+67108864 76a724595ce0b01bb51c6a948fa2f5a4+67108864 c52e5205c23728d439e90378267ee382+67108864 e4f2400071c39af9661de0662018e984+67108864 66b8a86b349902d359dc6ac9788e0246+67108864 98353a4694dbc5eae4c3960d2cd7baf9+67108864 130d8c29af7d0a5468b854bb1758dd4d+67108864 9741fb84b07c6282bd6b6a002ecdc15e+67108864 7eaa6674cf09efc2adfd14a7b169e357+67108864 4071ca2915995af71dd9da03e8c6c4c1+67108864 888673ec3ca2787511dca770f090c138+67108864 21d21d390663b32c3738bebdd97c32e8+67108864 f575e7b15bc194bf98e346bb4b2788a4+67108864 b5d983d92f990f90622be2232ced743b+67108864 0e6bd01b5b9e49b1969b5d3817499476+67108864 481144b5d5ab8a1ab7235fe64bdc872b+67108864 6c8f5fe6d45e5719fc6270748015db12+67108864 68966f5d47a96aa8731c5950fd1f467d+67108864 4d48317b33738dfa96174bd5a1dcc841+67108864 24f75fafbe546fe366f7a6d465240f8d+67108864 8711ad611384bd7f0e89c400c90cfb7e+67108864 2ae54914c5bf92a8cbdad755e8135b1b+67108864 e11116820a72cfed92d64bced7172187+67108864 4cf0a16d830cb8c91837b3f24093ca2a+67108864 196a74cb26cd1bd8e540d22e46fa4302+67108864 92982486ba3b6d370cc31ccfddfd8110+67108864 6c5e160bb75c99ed338bdbbe9c968955+67108864 d0a48c19f2a00ad5b3709326c428b3d9+67108864 877890001e22c2d88375892890e634fa+67108864 f3cad932b80e8d20c8f67f4239a5c279+67108864 15d980c196e5d7690de660a3c3400154+67108864 964bb3d46fe32c7b3ff9077c96a32a0d+67108864 60107013728b9a04d9822588613cc1bb+67108864 e265d4a367e48f740f6fc1c881fcb053+67108864 4808ac7dbb6393908ebf47f453e659c2+67108864 1442fbee68d6d39576d7de798d293859+67108864 c9a16c84c579e83b7423358b7141d66f+67108864 36874b0c65f95e43a0656adad0734d8f+67108864 b174dadbcc463d64b80980a4cdd3fe41+67108864 518c5f4c27b2d5d2b42863e2132e0320+67108864 ab658f08b58b199860913a4b55d68d1d+67108864 92d9f01091907ec183981cedf2f26b37+67108864 539a6fe604549facefb68c4cfc7b204f+67108864 31e641b640af914fe6a2e4bcc31c1948+67108864 ba1e9349e65c068170b62f655a300d3b+67108864 131ff6d51b65974c9d366e81cd608d57+67108864 4ff4d286591293acdd3610373bce6f11+67108864 07cf4b86c65559b68689849c85d8cd64+67108864 051cf1cbf95a08df5c18fc2e98f705a5+67108864 2737c7e7ea93ff9a2c5b2cfd76c06022+67108864 60c00403a73e9b0c40d4b137870abe53+67108864 64068170004d472feb843e4448d8d121+67108864 e955ab9f87979be9d09753e843d36b10+67108864 180692a3f95576c265370680a2a2e052+67108864 cd7a1d16bdef3fa9474ca0a5b21264a2+67108864 4ac9f4d038b4be59a9a2808fef0fea43+67108864 c4947ab9755e190b5ebc9ee9bdc609da+67108864 96ff83a8ceb80097fa25b893b88104f0+67108864 e893caeea6fc8b2f39695b48bbc16362+67108864 254628d570c39863ee1c7fd1a0750196+67108864 2e24dd5c0c0696e79e674e85a3f12934+67108864 1990250dc54ed2e708dca7b4f9a41c18+67108864 8e6e40e53a663018974749ceebb1b98a+67108864 d84c21e61a9cb82dc9c2a7674a51657f+67108864 95f61628504580fa779eeeb96c0577cd+67108864 19f4baba83495e69c94ad52d22ccc2e7+67108864 0b9a91c910d6bee96f8c1caf129a10c9+67108864 0fb342be2b7e50536485fad5fd98280d+67108864 482ccfbf6dc6d3cec1c3a58982f40db4+67108864 b51612ee241fc06eeac1d823abd70858+67108864 14ff4c77a9a151b0062338c1d5b30242+67108864 9c37ab2ca3179d71544b2793cab38918+67108864 3947527126983bccbcc795c2a922dbb3+67108864 df6e54c39b918a1a26a3f557fc61c69c+67108864 94ae725934b1aeee78db89dec7e629b9+67108864 b08756a57d798709bcbb60d448cfe703+67108864 50de90bf9d8cf7fdc8ff54a5a6ff6f5b+67108864 24c451aa9481b72c0f989ed264198730+67108864 b1f79cc8b5729ae7523bd5d0d7c840b0+67108864 9b5f5392fb4d814ae13d5106bec28696+67108864 55d49c99f24cd778295f3637daddbd4f+67108864 e18e2c5dac882f932e1e4e4cf557b572+67108864 453f3aa0d1a9832e2dd99c44b30cd3ed+67108864 57f19b58f8da911138af4351c6aea5a9+67108864 5e3843ce5bfaf6170a16f26b4e4b1aac+67108864 597919fdf8ff48500c264d0c9f7439f5+67108864 b51bb4c6be8618ab377787f180560048+67108864 ff6a60a658e75febf565ca3cecdd90fb+67108864 9c993d598ac6a696591249ca115169b0+67108864 283ecc93e81dbae4a4cc8523e0260fbf+67108864 ad8ad32aec1b8df99ae91a3580908f96+67108864 d3c0d337118c727705fc5de57e05c235+67108864 7fc7f73d3ccd5c6205c80f845aae7ed3+67108864 4e555e6ce17d792bb5982232fcf70405+67108864 320bd68a5462576ad726c129f6941844+67108864 7ffbde2e853e1e9b0fbfdae63be37dab+67108864 ae25a4cc941178b62929aae15831ef8b+67108864 ba24657691267443b2e7bf6b6964e051+67108864 1e9e12b68f26db8c8f7c7cc788945df0+67108864 edf4acc2527cddf0e20d009f4486d7d2+67108864 baefb0589e1516ed42fced78370f8710+67108864 c037629c703f4a2c1df194202cecff0a+67108864 944ceef59fc6dc1e2dac2d8bc71ef6fb+67108864 d955e2eee34d9873d36065870fe6904b+67108864 eb4b9530be56e0bb33b9827279990dd6+67108864 559d40ebfeb35b1d88e797a3e9f8fdd7+67108864 623e4c4f5a5ae8c466a7b52bcf6e830c+67108864 5790327fc52b0d57357ec948331ca387+67108864 232a548bccb18ba351c681ddb0f84d4a+67108864 e4797b5eea62b9980478d01e7092a05c+67108864 6b888368142e02359ae8241daa0ba99c+67108864 57969f6dd6f8f56064c57ed4929d6c09+67108864 10f07bd7b6c3b8bd55e990cc6b65de37+67108864 ce126a25374dfe6e4cd91d27239e3606+67108864 536c1ff25f7334b3eee12fd61cd4c88c+67108864 07f03c33b0a0ca3d76d31e65aec87a05+67108864 156cc2e702d5706c6dcbfc65937e0ced+67108864 ed19220354dd45366dda185aa6fcdda9+67108864 f8f3c1650d6d9fd1fb81defcef4f5e37+67108864 157836f6e84cb522a520b8e00cbdddb1+67108864 ea34080f4a2c67c8eac2d204adae0214+67108864 ceff9152067f3579158d38f67ccfe3ee+67108864 d42fd7b0f9afa30622f19cdad886e510+67108864 ca252dacf27733bcd1bbb0fee73dbcf1+67108864 74bd8949c0a3ad016dd2fe0dff389217+67108864 c198a863ca7030e39d8876e0f84ac8fe+67108864 6c63203260faceabf16fed782bee1e6a+67108864 d5d3f52e5615f06c301d8ffd43bdcaf1+67108864 921c874804339001b4122f0302cef8cc+67108864 e0c59c1f377fe5cd9d6ba345edb4f519+67108864 fced978955c7d8526a6dbc344eeff8ed+67108864 4da3bdacd3c86355934b49b053f09650+67108864 973880f2f3c56727b95dedcaa4d0a60e+67108864 caf2b7a8550df7b241e371efa2fe692d+67108864 d7cddaecf10aa58a54daa2b7b2a02e11+67108864 2bc6e177da10b99fcd6d091fd23fb155+67108864 de817551ab1a9cd5dd51c6e81e0e44bf+67108864 f137d5fffb9fd4cfcf7aa4926f3e99c0+67108864 9d17da7cf8b872e2a826fff2c5374142+67108864 5830325f09fd13cd12bce3f691aad968+67108864 8215a5983c95e23a63c687b642f13083+67108864 11f76f54f3d0552c10299367a10cdb22+67108864 6efb7fad52edf9859561e9499cf510e0+67108864 a088c12c4f3927e74ce4a4c5f8760621+67108864 e0f969e633f92e756b1094e28839bd5e+67108864 ab0b88f96c9ee1916797ef424b683167+67108864 dbb6ddb82e96ffecd55bf7b7f7a512ed+67108864 9a055029ec6bd836c85dc954fdf7bbc9+67108864 c5f10798cc176c312c605fff373b3a17+67108864 05ed65cc2bb638c886bcf4ffce6dfeac+67108864 c4a3be0b33972b566228dbd5f47d7e5b+67108864 57ef58cc15ea5d8b8ebbe8420cd06b55+67108864 274c1514d474e64f3de02c179a82956f+67108864 810a77d1d7ba6a20e82c8910f5e70763+67108864 42a4f5e30a2f86bf11fe622f7a618ae6+67108864 9ff7eeca6c1879bd22dd86ce89349276+67108864 913bb4a677d4b4c7c131fd3b1cbebf06+67108864 9ba24586a78d824ace0337f321d2c100+67108864 2f892d650527a819ed7d499a7e09a3f3+67108864 b25bda0cfe2c4344d1703fcf3a6d4b8a+67108864 2784662ce216490c73fe67fbd52130fa+67108864 e79a6367f486e2fae9e54d0020b0db17+67108864 2f24bfab56da9e223c37b44e1f1ce9b2+67108864 b50f659dd903cb6f9c707f699d21af72+67108864 b94162503a41517f5a419ee397340005+67108864 6231562ab94c3be051ea9087fb8401c9+67108864 ef51cf3cd05f7810b6f322adc187ba6d+67108864 5d621ca36e6f35cb4f057b117d372e50+67108864 8b8239bd23a8adc2cee1f8a156d85e85+67108864 66676e37602149cc7d37ccebe484fc03+67108864 f91cb726e6ac09145c242752c4404e1f+67108864 ce73c7682c4fe26b98ee72a730010746+67108864 8f1642675b8f47e6cef4ce6a65e7dddb+67108864 a0c772eee3f139b1831cca60ce49b347+67108864 547699429dc8c3cfcb1e77e7618f2f17+67108864 02df0efc955879c3f2501fa8b0bcc801+67108864 a701fcf88e400f11220ddc2cbd83a73e+67108864 4c1721a042b1a9437c3eef71a58754dd+67108864 0a5fe95394f9f497e66217c117bfeb42+67108864 f23e04ce1d188c78e33f258b45c5ac8d+67108864 f3c7badf1ec40e33b375e62971320278+67108864 d2242648880ab86690ccea9f98275e43+67108864 8b397dfb9224fdfed3859ce86d74ad0e+67108864 09153d492860fc9d412c01785340798a+67108864 c3350ac90cf4d559a7295a730f18d20a+67108864 baebf81afe44cb6b42f5f6e6290ec0fe+67108864 96cd500b282c3113007c697612444603+67108864 a73c14020805ae3c1925ce55eb2e5971+67108864 433ae5411010c6213478f28ad6cab58a+67108864 3e042ee7b88f2d28bfe5b98b44f3be6a+67108864 fd9dde59a76b04cef85e85d61d02e6ad+67108864 d16f4bbcf2a0e8fdf932b54e35695844+67108864 efe1099652662006f509b99df819f1b7+67108864 613fef2dfc96ad3305006cdb8dea0728+67108864 b3eedbfe69eb52fb6bf7d21ea1c3bd5e+67108864 2f1784343b9d2cc4682ee293d051500d+67108864 62ac15c401cabc308f91639667229e0d+67108864 2afc3e34c284f72faded649c0ec3b72c+67108864 1399e4bff224e69402319a61c6db3dd2+67108864 533d737e352255f8f1b65622d92fe9c7+67108864 203eeb4e4410b7a59230eeac6d8e6e71+67108864 c8bacf1552def30bf0a3525bb90d4ee3+67108864 de0d669dd7c9cc7c394eade1f4a11100+67108864 b995714c0553fb41ae5ab417579381f6+67108864 5f6be59b124ec208b9e4d461fe0e5321+67108864 fd8f0a64895e2d28f52b94e980d4a6e3+67108864 776afe2ccc11fc9b9b9bcd1637ef346d+67108864 95b8cdfe70f64a49bc570565107ecf5f+67108864 26479dc9fe565e63bfff3f5850d77998+67108864 8da85395f1b08cd52869d77a7364249f+67108864 66f76a05e19e0580251ede71e2958e92+67108864 5ff92a30a1aa13ab0a42320b77cfea45+67108864 6f3f8613d13412c9530801a9a3f35c1b+67108864 50dc0b3a14dbeffd71b69058275b2d37+67108864 cf19857fbd6aa6f6c3c258431674a106+67108864 e0b084b9493be695c18f5d3cb42fbaf6+67108864 3aef149ddbecaa8ade6fa0afbc9de9e9+67108864 3854fb0bc82e15b33cb2966f5be46239+67108864 06b43ce1c20a4bd948750f7214e40f4e+67108864 2ee5b68ddb703c975e18319a67621c7d+67108864 2306d047cf9aa09caa1b2a9a0c541896+67108864 af2c70d38e589ebd8c7649a90cb065fa+67108864 71bc4c0a4c2d00cc34988fb0ecd1285c+67108864 0c2c0cbfe580ec70c9cdf1b8ef31d4f9+67108864 cb15da81b8aab0167e184f2ee2b78b2e+67108864 003e15c44f959ec8e46327b05e79d599+67108864 6ff440ea12517ebcc7636b07f840b7ab+67108864 7a40e62a496ee3bb3b5fdadf6d2af3ae+67108864 db1e814bd3ae2f39083a62aff4381f90+67108864 7c68aa83cb27f585291a835b4ac5ced3+67108864 c7a7efb80d3347d4f7a13618ba5aa3f0+67108864 1411ce7884539214a1e92568231f235b+67108864 02a2b2122889b53f45667af2a120d01f+67108864 ad22b2a76908ccde5aa8adc29df538c0+67108864 280e52e831b65105f06a38a4f67b6402+67108864 a5f9439149211f3a7d25fa11de178fd1+67108864 0bb88f24287ad72b8d58c536c2e34663+67108864 417d3b1862cb8df75a7e8a210f15e3a0+67108864 a441aa0f23faba7c3cf66def56e66004+67108864 f55322a307d173bb797924fc5f4afbc3+67108864 5a9a3ad6dd8ae415a0e696dcd40a2b7f+67108864 0bab46dbc9501d08a4625629f4d5b860+67108864 67893ccbe30016c1eabe3d2c8a80692f+67108864 cc8357e24fe32c5cb15b5bc8057b9f4a+67108864 9e9673069303440d7f9fbfbbd4092044+67108864 f0c3f8cddd5792cf5eebe8b79dcc1bbe+67108864 21ce8204bbdb6f016f11fe9ae5f6020a+67108864 0f8a1598ee8c418b9ce7c34c175bf9c1+67108864 d7c5a77e5397635ce1430ed537183d5f+67108864 56b7099cd423ebba5cbbf10d5e0d2e7c+67108864 1c9804af0c110daa81ce966a7fbf41ea+67108864 163197488a7121c0e9a33bc0fde5ec51+67108864 374b8b0e3949983e1ac9fba65a3e4024+67108864 ced30f4e0c46e5659f364248955c1667+67108864 5988fa40843d6a1ca07b957e77c8ec12+67108864 fe2111482b8ba2c58e2accfb309b3b72+67108864 87a7d5a127d9273d55f5dd0ff4760a13+67108864 a64150d31725405d33e689ab39377efe+67108864 c8abd310e6150bd4afadfe16e7d256ac+67108864 027fe6eb0b09a4cb755d1d62b0a1423f+67108864 a9b655be082bfc1c61e21e0510153776+67108864 aa2a094dca5481091ce7ba08852b0903+67108864 eab0206180b5c06bdc9202aa6d7e0eaf+67108864 d84e88e3bf76cb3a4a44bf98eee1d262+67108864 9d39e5294e994cda2d993e06770d9b7f+67108864 427b9a850025c42f01274fd92bc4ad51+67108864 6155de11d88a0a5e81c04d75e240bf37+67108864 b700a6e431b551630b28368317194aa7+67108864 2d5a90b6e65546928056048116c45dd9+67108864 c9ed6f6d6dbf42ffb3b377973ca7dba7+67108864 08302c1433b0fb80ca8dfff9e6f74681+67108864 92db4d53c6888a14602556bbb79f835c+67108864 c21fba4899004a955ef88e4ab3d38941+67108864 de1b9dbb2f511d2ece9d9837323101f1+67108864 54180d64a3cb966eca0356ffaea49f26+67108864 381354a3022c66550fb4e44d8034a141+67108864 597876a4a4464de636f8a5fbf3e543f5+67108864 c29963efcbdeee5b08f5f1575a1d6092+67108864 fce9d2b920f580ea8779f72fbea8d964+67108864 2effefe2d35c5ba29c9e2e6acc03ed4c+67108864 e04da28dc87c175eb01704e4b51fb4c7+67108864 f4adcd9d6ecdbdb8c2b91ac2ad591d59+67108864 53f8e4d2781414ad30e4479605b589c3+67108864 301c286433798a684f2d721c7d1f5c03+67108864 4f5e4d9c261ec572be8c117135ccd8fb+67108864 d9feb7b8981f846190b4e4f8c3075164+67108864 ff9343ec031079a66cd0a5560e4ae9ac+67108864 c4abd8ab5a655d3bdded0516ee442db1+67108864 db082ef8f2e3ee42677b34ccca86b5e3+67108864 6fc83a81ea15c177c41b2a56a736a098+67108864 c09ed63c14f57b8218470f6b0ba1435b+67108864 3c87cc66c90c9f0d995408df69305209+67108864 9c9e9b96e4d2970f55ff3f5930495d44+67108864 7572a36b461e6c3fee3512d8582aeb61+67108864 948d1d1e7c8ddf237fa910623c963568+67108864 84fea0e98e7718e039bc9d6c1632586c+67108864 9eb6eee60f28e1d075f71d5cc0447bae+67108864 3f36fc2738992c1135a4e80a28b28b75+67108864 db81cde123e9bcd6c291209d385129c4+67108864 e14d1a2b49e62d23b81fc9383322dd19+67108864 c79fa6a3288e27bcedee730a1f0d54f3+67108864 80679d686e1a55b5b22a9d250023c2f0+67108864 114434b3a95875165370385cd5adfcb3+67108864 83c5ee9bf0da6e2419913d4954d28e35+67108864 8c26ea89fb32a840068c2b049380ac15+67108864 afd1f950225dc30b68260e0a85297e72+67108864 35ca9657c4d2ad957b3d5f9f56a3f2c1+67108864 6bbffec1991d0308e6fefaa7a260097f+67108864 0e9f68eab1ff4f688863e192a374846d+67108864 eb672833bd531bfe24f1246628a3dd8e+67108864 7a3eb4e715458a189579184ebdec2215+67108864 1f34c4808c3503e1993ccd80ce850ed1+67108864 041b3c92294ed8be56bad1009ebde213+67108864 a88c8e6b793f189af93f8c2bae915bc8+67108864 af77c7d4cadc116f4c78f46f9f648edc+67108864 4890bb9d239182d6b11b12f4322b3097+67108864 4707a2b5d2951423b9cb5cdf32141783+67108864 35ba9cc88e7ad5b5c98e43a921f7ff56+67108864 2eb75b8978046345cde58d2176af7170+67108864 add1039d192c6e42526cf2b6afa9bd42+67108864 9a477df1480fd7592fd2ce32c39f7406+67108864 6fcace189ae0cf89e39c98ae8738a85d+67108864 5bf37955e0e45794d0390850401964a0+67108864 446e566fd032785f676ebdd3ba1dfafa+67108864 fdf9c3e975893f7c3df285eaaef9d69c+67108864 9befa5fbe2f43cbcd728b81508369f97+67108864 d862dfb30026a97111e96504a3058e04+67108864 78f0cd0b88f301624b5f104fa30858f5+67108864 4ab8f486d99a3b4fe66c9c348d6fd907+67108864 580601b7db8d4a0c9adbb2e4fbfdd486+67108864 9cff8f97cc34a751dd58feb19e416e37+67108864 63ddfe988e3fc9c4b88365d69996f4e1+67108864 b56ecf4f9b696c9453ee8dc948fe17ad+67108864 81c2eebdfa8ea6950cf57b863175e2a9+67108864 da2e52c5fa244bff3d5c86f7b702bb25+67108864 462e9f5d6031574b2a22ad60f8cab54d+67108864 c445d18f7531e24f0a160a7630b1b8ff+67108864 1974531544580a294bc8ed247b5786a4+67108864 21e93b14a1f1e35264a62d1ad97a3501+67108864 5c634b78ab66b25b93a6c019dd10307e+67108864 40c7314f60f524b551a121a98a3b89e5+67108864 1677007ea98f9147329aa04fd618fe2b+67108864 a79c00d0291a392abe73c0c162649ac3+67108864 15bf1499acf1c24c8b66ddb5c2f7588c+67108864 2eaea836bff8cde4519db6cf2e69b05e+67108864 1e5ec5554dce9eede58caf8933ce3596+67108864 ac1078460e08af47d6c5d2ae1dd3b746+67108864 a6dea03259087ab7207a5f5a1ef679d0+67108864 78ed9f9db2e9643daf166658b952c127+67108864 c5d12b1cff4bca0c8d15e1bd73dae1d0+67108864 c883939a7cf896bcd2f85d850e71b07d+67108864 fc16e276e1588daaee99de8018df0571+67108864 7a21a92e38106e93ee8ae7a7381329a7+67108864 8eb5b51ffdc62129fe0eb28e9e38b489+67108864 d9c70a717133085d230e29c83b495676+67108864 2eedcd7fe3d79c6a9f2b4aaeaad04164+67108864 da5e919680b6ea8d41b8758e7086133e+67108864 d1cc0d339784a72a4f0d263464e071d8+67108864 124967dc391185cd924e86e2d9ff3745+67108864 e6b68f6b97dc9e2426b699b3140564a9+67108864 6cc20ee8b272deff8980d2b3a4477e90+67108864 dad069a6ba6c47b35483cc9fa3885d53+67108864 b0eda0c60528e29f5b216012d4616b1f+67108864 eb5e3a335f47c063adfef05de7b4667c+67108864 b545b5ad590984a3f478998f5696a72b+67108864 f73ddc8d5914626ddd50bcaae6abdeef+67108864 62793b6bdb743f9448ccd5740b33680f+67108864 1a84b04f18a2bf3cbf38ee71592361c6+67108864 518bbaeeb370f12d449c8d63946966e2+67108864 5457694f602380c630b15f36ec30981b+67108864 b8b72eacb6589d8103d1bd9c05759d9c+67108864 122d43c6dd8f63428580de4e2a3b7e31+67108864 5f5bf06a26037d6e683b088559a7853e+67108864 487e0c8983c7aa7a9660dced20fd75a4+67108864 0c4bab937d54d2b41d64874d05266304+67108864 e585812acfbdcef8b863cc1c55d3f070+67108864 a636fd1e57ba72c9710a20b0615c9c36+67108864 7ec1d0e5a83afbfe0848b1d11a192977+67108864 6206fde59d76ea51aa58dea598f35df7+67108864 5541b1f4b2ea70f386f99fe2bec7ca8e+67108864 2765d0875069650b3ded5bcf1057748b+67108864 76fc47f77e38cd9e600e0eeaf9e30b6b+67108864 c158c77c833392aa67e8499812e5f83e+67108864 b180e70bae15ed646c3f53e3e741651c+67108864 c9270bff3ed8f8b6676299d470ca10b5+67108864 50e72e004bc6fafcc55c95af20b38cb9+67108864 c4b5047e53e0330b9312be395cae7de8+67108864 8471c29a2e2a64144cfcf81102567306+67108864 edce7e124a06247a4eeaa5961def2fc8+67108864 83623c9a99c45b228a04d4099be110ff+67108864 98c5f2deab5676fc460e553c792fd5d5+67108864 583fd68a679773af986ddea8c2bb2323+67108864 2bcdc93ccfb5601210573e985f54dfb1+67108864 b36651e5cd042a6d221dbbb370fdf595+67108864 e815d8da6a24f58b6079500dcb6dde92+67108864 3ecbd8481a063f405a5b87ada486e229+67108864 559159e2bbe19ec78551d4f04ca71219+67108864 207965435d3dfa7dd6b872205ecee670+67108864 09c39df8dd0ed895f2f06f5fdf331f44+67108864 222acaaf41033f052ace00e4f633f8de+67108864 3bfca4b0dddb38adf015657390aa84b7+67108864 ca141e093060662c7fda108ce8467b9f+67108864 b72c6eda282461590d16f05cc5b6b6be+67108864 ee2d50ded50608024eeb6e2a375775c4+67108864 97aa77b8d241de99d3ca107876e59577+67108864 5fbe7fb61babb4c6ea78353db046577e+67108864 22eeffd26c93bc1832c08664e567f2c5+67108864 ee46575fb609641bfc79f45d0a552263+67108864 af222b7594ef2a21968e8f5a355f602f+67108864 a3196fe7b6d2a992ca31d59eb86a4884+67108864 f86726c23cb938537bda8f12bed90a0b+67108864 92f65da548b38c97fb87baeb414b985a+67108864 6a379105ad86d53af712cf270cf201c2+67108864 a6342eb8f300adce14b3dceb65c48042+67108864 68c0b82d38f8f55f234c8b864f3124aa+67108864 9f836e76ec05bc98614f88104805ce94+67108864 9698568c75af9181b50fb1aaa248a17d+67108864 ba20012679f7e77e55d5c0c8d4c9795e+67108864 ca7637fe43241795a070542007cc0fb1+67108864 73b586d5f610791131e73f5e5e2bbb33+67108864 ebaa2386120e44e5c3b003ed9afb1a1a+67108864 fe94d8e9d03089abdc49b5a91bd0a5f4+67108864 549a252c97994224d3f1878bdb2d3bd2+67108864 b4f2c8c6c2a363005dd59ed295f7a372+67108864 4d3df1f5848bad821d9aa9ba4b906e5d+67108864 29214f6eba324b577f3e57eeed313d8a+67108864 c2b2b768109ee7a185389bb2c03c9463+67108864 430763efd090fd67b9d34648ec769ff3+67108864 0b45e8e8635ce07dbe95511a0f73a58c+67108864 9672ae65f691a9906a30eaaf1f412456+67108864 2790ff8d6a65162c64a8020737c056ca+67108864 71deb0da8ab7b2cb8ac31bbbd2153b7a+67108864 0176831f5ced03159f23d1605f09c0b5+67108864 a214e08c45eb9659412f7e1da66f8393+67108864 082f6320725c4353e1713e5543896ceb+67108864 2ea08c52cc0a421cdb787af6093d6a8f+67108864 293b457808839b5fb1c96b4ad5307f64+67108864 a8bd1439b2bf08516f48867f2649c92b+67108864 7fd29dc64c3dd3945eaff1cb9159fbbb+67108864 e9de19748265de7ae96d9b7d9bc4f021+67108864 2990043a6a02592178b3d515cebbeabb+67108864 3e22d44e8668c64b812e6a69e3fc9ba1+67108864 17a9aedc84771d8b0b2a3a8004f1b5af+67108864 58015b5a67f8bd5924732d5821c0cf6c+67108864 00b48587ac18be30d2ee3be476f753c7+67108864 8c6e61d5713cbe261f9bda849810b113+67108864 6687b061aedcac8b3c0397ca06cd7454+67108864 a680a002f3165cea16d65e66ae764e6f+67108864 6a5f349fc52033d3e94f98904ab425a7+67108864 51eb722ed0402ac5da6cca70a5ec5c17+67108864 0b71fc77e9ac56ad924716419c4b14b9+67108864 cfd35aa4526e8ace42fe58ae778310ba+67108864 c189e3b91cf3fdedd4f16420da6edada+67108864 90414e8a86c002ad919e3fca22698f24+67108864 4548a56cc0285c88b12cbd149a8c33b9+67108864 57ad0742333b640abaf99d2d76251bbb+67108864 c96c7bfb6e4e5e91eaa95ef4a7f0063b+67108864 0a2432298689a7930310d505a8b72b25+67108864 68bf60dbfd64b9e42a4ffe985ec0ed1c+67108864 82af1dfa30b23d296c8c492ff49d363d+67108864 52012e0f2502a53d1a7e6bd0b53c1f7f+67108864 b7a38a5eb68c133166608d6894c40323+67108864 99a474347fe2aa2f885d3e03b41c5a13+67108864 3a7812b49e18c60e8ffc7e06f3f611c1+67108864 bbf3832ed3621c86917153a27bcd6c96+67108864 f7c80aa5a5c8912c7b374d17e53f8812+67108864 31d27c0506420d4dae49bf3500171123+67108864 5bf3a9a223b785637a976d8a2314d15b+67108864 0e2e4d0f02b414d814bd8f142a63c611+67108864 e0cd118bff0722657e8af9d6f7ab9387+67108864 aafbd2e68aaddc80b1b2c9a58d34d912+67108864 31bf19492fca47885d25ff1f03458eed+67108864 22ec120fd0738b9f641e45b0062eba27+67108864 218c385c10060cb8ce8d1af4df3199f9+67108864 1662e09426794ba9017f0b2ee3bbe056+67108864 62814cac58ffa10896b0e1efe404ab07+67108864 aaf6128b4e32d506c09fc2ff789e4659+67108864 fa9132261ef770571d448da6e820f05c+67108864 7a155a0ecd60a1a3802f9a3dbbdac312+67108864 b5c9b5769ba7f38b516dbad74e4b85f5+67108864 7738f2fa92dbe1dfdb7baf21db826cbd+67108864 07d0c63d6265000dbb3900229e27ff72+67108864 ad08ac30caaa64b0b1c6ae78ef1eeb70+67108864 14373867dd0ea7c9c7ef2f5c8f6ab326+67108864 6bdce1b8e03734102e07a68a1cedafe6+67108864 582c8263766cceff15f0e1aff84998b1+67108864 e71f57598751ea906c68ff46c478c76b+67108864 a4804871f087cd8fe93b888e274ee024+67108864 144967b90aacf70d4265a83656223e35+67108864 4da6c6e79f38ceca986d250f6e932096+67108864 9bae8b1ff4292019c53888841f72a982+67108864 f0ee2e68deba6b63c248003cb82f71c7+67108864 a5ec67f66515b948dfa49ebf4c85222f+67108864 04c73a8657409844f66619031721cf93+67108864 1da9010012af695023de6802e1f4cd96+67108864 d7e4035ba647ca2aeb4f218a5fba7f65+67108864 ea989512fa8076eb0fd44a028d460924+67108864 c8e3fe48f7b331d0b264f302275818b4+67108864 a1f498f4b71bf600f5416509cfb3e65a+67108864 24b0c787a8d21be9435748465ec26515+67108864 d11ca8b28ae1b8bdba9ea71a78ff73e7+67108864 0017d2c46a2e0250eb5c6d75a9045f92+67108864 bc156095174bc4814efae2ed49367fe8+67108864 7f127376f0cf418c605c5950a4bfec42+67108864 bb9e9b067b0363c6f1502b444c0b4472+67108864 2f6b3a746f5e8cc2254ed9cc08450a3d+67108864 196f05634db6db0b069ba240e6a646cf+67108864 5493c7d92b9710993ae12ae788904d3a+67108864 845f7edc40ff2731224713ec3deb2575+67108864 f050395dfec775d38db5b0d57ea0a581+67108864 4e5b768d18d85067d93a6a519ffd10ef+67108864 5cf888ffe5474f587318b36460ce0ad7+67108864 c3be66ebe0fd039a13cf0fae05774afe+67108864 303023470389e4048aae95d8bb34fe7e+67108864 af06698e1893e2704e03f722959d7831+67108864 57e7124fdd21f6c580c4ac374861a026+67108864 7dbb565e8fbae60e3597eb711921acdd+67108864 3e6550d9f748d441dca47757cf98ef22+67108864 178423073bb92104feabc8f7708164f0+67108864 98eeab6311f4d210e230573b9d5d43aa+67108864 8e3feb1831e7ad494dab2ba877831944+67108864 32ad56f602dcee3ecd0b26b33f439705+67108864 13fc3c90af586e0171cff2c1ced46af5+67108864 15cab69619aca7efbd9dc84dda97c2be+67108864 f734bc52b02ff5e7d00ff35469495084+67108864 2eae852de4af35923e335c11ab9a707f+67108864 fcfcdae7f7368cd3f4cbdf5bbe4c9218+67108864 da45807d889e3a9c7fff8d5184101bfd+67108864 9b877fb32436c4b8b207244e01e1874a+67108864 fb1c79d9d336df91ee9e8f16acab2866+67108864 cc45d4bc6c390fcc65ac41cb478414ea+67108864 abe9501379c619480d0894455896c4bf+67108864 8272118879a5ba38ade3942a7678dfbb+67108864 e94ed5c5ae2b7b30088b5b0f15f7b070+67108864 56586f0d6388bf0a191685aea2884b17+67108864 2f78d776307e364cac16d82b7ce20c3f+67108864 819cbd489af5fa54f388dc255e156012+67108864 023ea9e60186ddf1cf51b39abd8d9b89+67108864 52aa46153ceef085ac1c81b22f8fc7d6+67108864 e79234af9de91a7a55c5c96097fde9a2+67108864 eaeb9ddcac5b8cb42cd5c311995e5923+67108864 3c98cf812311fbd27ca8562640969aee+67108864 b8b1496bfe192d3863aed28169a1a5a1+67108864 7c0055922279d093c461459d93be5d6c+67108864 89109de3468e0c004a5df2e6ab4aaa5f+67108864 9fdcec9a7189a195b82946f477e8bcf6+67108864 2e924831c2241f3f8a9deeb44013e8d3+67108864 1cf6e8b97738a95881063cff3c1d11d5+67108864 7cc135fab1e7d88c70034bef501ff218+67108864 b1e2ed6915c6103dd5b854cc064271d7+67108864 7734fa966c13cea05fc1d67fec540a83+67108864 983e5f5a2f2632e9ac307f343ab18362+67108864 7d0fc77105fc9c1326e81baedadc2611+67108864 8e623fe4072c4e9ebd146d836c725076+67108864 06494a14aa546919573fde57a86ba94c+67108864 ba0b2cba826fae4c98c807b96fda418e+67108864 ed01c980866649a1a8dee6d43017aac6+67108864 f65de1fbbaa627a0e8388905bb9c2272+67108864 ce6ce58654540a18de19258cccf39b0d+67108864 a496b4869c4b258a443a9ef49073838f+67108864 d5522d98d48cc32f511b54efc4b85076+67108864 d82c6c2cfcdcacbf396f782a66f5a0b5+67108864 e16c8092f0a4ea3b0810f3d428581d36+67108864 a3882f1484c0817d39c5cf4a9e5f5cb6+67108864 c047df254090031ac754fd1518dda335+67108864 adf67c2a1461a1f3d31825563e9f591a+67108864 490c76dad1f70cd18fb91d4ae73227f3+67108864 6b5df83749324e88e4201030f97c203b+67108864 428d3fc9733f6833c773d207bc48b3bb+67108864 ecfcfcd10eeadeb5d551475d6ca99d8d+67108864 c79936bfbe83976274d8841a55edc0a0+67108864 3690834378f8713ddb8b209ca5dbdc60+67108864 7995163ac59ad78033956399183c6f80+67108864 19a11565a6e351a70b1f081490adee4f+67108864 0e629545f3e092533656aae73161ef7a+67108864 1c89e4d1ead0d3962c6a5a476ab7259d+67108864 94bb9ba880fc950bf008fdfe31926d18+67108864 ef92797ef7501d6748c0565527b78e13+67108864 1ceae0b2ee74bfd514655fbff18f185a+67108864 08fb4aa7c2e95ecf51a7b9edb6ca8abf+67108864 0005f0aae91d1b8aafa7cdf811aba9ad+67108864 821b28d84dd2717999becb924400b2a1+67108864 62fc209aa8daf7ed5e7d3e368a959715+67108864 a51feb570bc08c90109f072a00a86e6a+67108864 3a43bf7c8329e803d5a5b946f3d2eeb2+67108864 5fbaefefd0e33c0561d2d1ad02243cc2+67108864 70125926487b5e14434f4ee652e57f5c+67108864 266db112d5741f7cc0033a9a5d892a02+67108864 6402d4e9e089275626ada0a304569b08+67108864 ac89f489e384afdeca6f675efc065d03+67108864 7d712d4ce185e805bdc1f4824a2c2013+67108864 5365fec63b04a937ce25921cc74fe5f4+67108864 6ad1ceacaac14aa728bc5a0dbbb1cb8e+67108864 34435c0dedd729486c3629f8cf9f577d+67108864 79bdaa7e0686a43759a8bc554afe288f+67108864 976edc9593c14a6bd89b17d64b960e39+67108864 64b9bf319d1625a1ab43bcf702beb1a9+67108864 10f927dec0765488bc0c4e4ad5864fa4+67108864 f49bef52b8d0fb6b617da9c2e7f9670c+67108864 455129b3c82cc5883065979d29eee373+67108864 021ab29d986daf4d3eac5016202f7133+67108864 7a6b5fa82b5504061c240832980b5fb3+67108864 66ef0cabcdb0a0597c1d0cda9ea80ccd+67108864 7162f8592aaef6ba81eaec040513748b+67108864 616a218103d3bb29469d3a114a161b28+67108864 943d9da1352428c411e9dc7481a048bb+67108864 63afb9a885c0eeca44dc27b09bd5e08d+67108864 c4a6f3d00323af98bb03f4036d534f66+67108864 0caf3534294ff0737818091a223b39c1+67108864 8ff5a74aa3014ff730a4b2cc7fa19f8d+67108864 ea60fb9aadfd2b7dd2c2ec8311a78917+67108864 664f1859521ffa626e2180080a86da49+67108864 39c2b35af70b0cbbd185e525b6811d12+67108864 9cd080c53f594249856240fd8fe3befd+67108864 46c00423ce315e452ef072f1fd2f446c+67108864 791ec75ea6e41fb6569b906340c8359a+67108864 d6171b1a4a7cd7e7151181da6e355389+67108864 db9124b010b6d85517bf6c19df1fbcea+67108864 9d342295e2fa66cab7a9bf525e423cad+67108864 ebfb6d5d8da2c885ab89ccaab22addee+67108864 edc6f46d9e98132e8a70a5dba6d0a524+67108864 c3a3a1a69c7d06724eb506231ebaa4ad+67108864 a5efeed9314f0ed8d8cd098d27899fdc+67108864 6357216e884f403729f9ed85a1e156ff+67108864 4e2e7af2debd0ac7334125eb148736ea+67108864 ee9885bbe6fabb1d20ecf5075c3ef1ce+67108864 2e082c8cee844f71be6d3a8dc63a3275+67108864 8204308ba14a9b7cf1cd14e26ae231c9+67108864 49656b0e77291783b56dff27f69054f3+67108864 bce59a39ca6e7171f413c737a80771dc+67108864 90818e71eec72d6ba69f87a2ae552885+67108864 713752842990e7e98f4c6aaf4174847b+67108864 c9c4f210937340bc0741740dc0bfed2c+67108864 37abdd6893d4b1104ed0aab3031e28d8+67108864 29da5ad57f46c5105f97b87db4e080b4+67108864 e482af928333956a09b440dbddf00088+67108864 08b9e84d1f6841abd8a994210db3c69a+67108864 213b94ad5bbc43168711864b441349ad+67108864 c703506d76acb781ec09c3285fb0381f+67108864 31c7fb70b7eb1832377c81509aac937a+67108864 6bfc148caa4b734b423d889d3b0602b0+67108864 110e00453a504570e90799b4daa042a0+67108864 6bc9e6edf15a1168249ed54795464f41+67108864 0bfe1332f9773d4056009cb1f1a8eab7+67108864 ba552c6d88ca5c6005143c6a7e85e7c0+67108864 11f93451e28ff65ce40de89091b62f8f+67108864 6acdc268644d8ad70e90e68210e8852d+67108864 7eabb04f5237edf30c0152c632ccdbcf+67108864 a038c58f9dda274d06736df758a29436+67108864 9aa9974c9fbc581261e09d59e9224680+67108864 6e5398224bfc952fe566de3ea89076d7+67108864 265eb4465984d2f68fc639657fdde4db+67108864 0b2b8a7e74cc58cb8594cfbf75e5cf7a+67108864 7d19acd58426a5b4d7cd2a8cd4e4ce55+67108864 7ac5458db9dc2cff8f961850d4959771+67108864 eedd1feb836fce4707d7f44ece26b086+67108864 82e86b19ac612bbcc31206c40fa9b883+67108864 41d7129bb2145abcdeaa257b3b31312d+67108864 6e1240f3fda156181fe0a1d52ca5fd05+67108864 3180a5269907a15aa42c4e448724f816+67108864 ddeb2b08aeda71f0655c6cc786f2a85a+67108864 7db290b6db7f6d60386f08bf00518e68+67108864 4379e303cb12df9a9f28a0f86756737c+67108864 5621082de6125cae13913cad7cd546e3+67108864 be1af8242fce966ba091fdcdf5d3d769+67108864 1e714951ba26e3b7a010ba78ac5dc9ca+67108864 68921acf53a6b458cd6e5b8e0ed1a374+67108864 7c6d8b960e16168a32805ee8373f8eb4+67108864 5a3b518f9b1529a7c144d569521b7731+67108864 a0bae5564daf95e14cce3b8842b15535+67108864 4406c2de1a9787f1b589789ce2f4e768+67108864 118f08b4245eb3857a46276e45bab1e9+67108864 3d8d6effccf3b0342af688187dc4f3e6+67108864 d192c8d6741a6adee036f5aba554edef+67108864 965ae2d62cada40f496ac05b403e827b+67108864 81258bfeaf7a9fc15666ae10af305953+67108864 ae23e489f9d9eaaf7d2945855e972c75+67108864 297187be9f6fa75f3445720cf961ac77+67108864 2bcb1194581501e8c20f00264256ed58+67108864 16bafb9d0cc7730239d180101520734c+67108864 976cc5ddaee712bede5542e7b003b0bd+67108864 bcd02f6a3b00f4550730440a5aabfa9a+67108864 a455a0dd014194ae08007ea6e8ffc203+67108864 6495f63085f1eda582bb0475a44bb34b+67108864 76f39563689b46c281fc5af603fe744c+67108864 3c730aa83b0c01bc36b5cd8d666a35c4+67108864 637a2ec9a7840d248259466fd9025b05+67108864 0fc5ed24a204865033809c785cb7c5cf+67108864 4a3908945ac82bff0c9c133a42a60035+67108864 a2bd743f3304c65fedbdb7904f76c6db+67108864 b4b548164626fb0fb5bd4e1027c65634+67108864 b8ada412683bdaded98f4d772bfc0b72+67108864 b6230517ca29374241efe1fe500f3bf4+67108864 c2219a2c649a50b03e63447d7cf76290+67108864 7d5c44bcd5631650e79c979faa3ff1f5+67108864 e8589cfe03cac2b0958c8d8182005944+67108864 2e2260880ffdf589dd731679669d4162+67108864 ce30ca4d2ee870a4b645641fe46f4bb4+67108864 222aa06cfae2d5fdc4c54e31e0a2f7cf+67108864 7a5e6c5b9e962ad698ef50a3546e50f7+67108864 cbaa5957827447ed2f2548c044c98c2e+67108864 1316d790c4a0c56c96042c27c774d417+67108864 539b1ff5b0cf43d24364c8a2ddada430+67108864 d297a6c4ce2c3b5738afb24bfaf4112c+67108864 9c28f3fe301bb8359609a38272b99166+67108864 b14881938bd2985ee9780c543d79cd0a+67108864 b9131f8ccc3d3fb4d7879d3f6ca6ba8c+67108864 7b9bd91dff75a3da7898c54341a84797+67108864 d729676cb8f2032aa5068c0b6f3edded+67108864 36fd3979f82c521e0ac49591c38a1399+67108864 621f831d2103670dd086fbf74a2baeeb+67108864 434b0edbde1d7c8ccc524754ca511336+67108864 6191761726b291039971013e456860b2+67108864 81f56aa9beee03bf95e2d96e10331e6a+67108864 478af52460b6c572c4586067e621ecb7+67108864 d1b46bdc141e6beb00f573c7aa2eb76a+67108864 95e9acc5d676161bd4011c915d35cebb+67108864 31fb0b0f97de60f84cbbb9c6bae275fd+67108864 68383b28acd96e9a23087138fd3f67c9+67108864 06e7e4d9dd82b4dd307805302f59b6a8+67108864 f01d7a68a50b47c25636a61615fb2dcb+67108864 3d92fe7f228b5db0eb83a955ac765284+67108864 22543080bfb2cafc6276f067344b6c95+67108864 c777be42a3b970dbbfae050a59db63c0+67108864 523ed14e70faffe938d9562bda81b006+67108864 52e0dd73df65a5c28727088a336ca9f8+67108864 5d3735e054e694a998cd7d656afc498b+67108864 1a8a3f179210a12402a6308381e78a1f+67108864 8406bc8043e9a249e9575354b90420ec+67108864 a8aafb1fc4c208d712afd441c10ce110+67108864 7ef66e1c5a9aa8ddf12e37881a3fad5b+67108864 1cc5c7bebb389cb5d7cb2768aaae7a8d+67108864 de67652071ea4c8bd9fca0485d687406+67108864 bccb14bacc9d2bf516903dafeba159ce+67108864 208b6c5356a84735b523bdf1ee8c3352+67108864 cc6678ac9df8cfd452e5cf84e7382119+67108864 d05d3afe72f85020cbf1c47619e08137+67108864 811a2caffe5295fda0c20cfe07eb9e7c+67108864 91b6e3aad7844631e03a4b4691b7467e+67108864 3b36c286695fd13293960956a1d9ec79+67108864 e7cf08e2fa2b68acefe9c7ed126b1924+67108864 01756f89511027e9f7afdc51155f9c68+67108864 b63bc9636a6bf211183727f9e124888b+67108864 b36a38a8b9a5b2bc0c42ed1f58e75175+67108864 059efb6daa914416834e244e34b2f7ad+67108864 30cdfb63916ff1e609bf2e53895b77d1+67108864 9ccff5d27dbbdcc68dd1805afb4a0015+67108864 9bdbbec05d44b0c1ef1198a474dc0ef2+67108864 e98c4216d9976c63f8a7237d8aac1732+67108864 a8d8205ea2e1356fad587c2e7bc8c8b2+67108864 49d5ecb84c5327966e083984c43d218f+67108864 702343649c578c8961b92373eb5e6324+67108864 81b31e5e42d1e0dc2499686be84a5a0f+67108864 92e94d7be66176d50ef2c7b75df8388b+67108864 e052099368f8175e3cfd319a69b88e5f+67108864 50e2cacd47f698549ee368fe0b86af2f+67108864 067a888f2fcf0e334a59b76efb988f16+67108864 ea973e82d757f6027d79169dec882267+67108864 d6ba6ba9cd7ed076c6ca75a666c1cde0+67108864 f970793e593c3b7b273dd64f3278b06b+67108864 181e4e39f6fe1eff305c7ca124de8df9+67108864 81d3e93e5fed970958ba33baf6514018+67108864 d94d8d656d73e7c22a9fc21d61814021+67108864 cba3f4ddd207d8d6550929d3d6f59e6b+67108864 ec09778d02b57020f1df80e19fc43df7+67108864 31f7f1ac9de231b22b0756f9f90cd1c2+67108864 f7f69f67fc00841c81539220938061ec+67108864 9a4e943bbc8002c7973b34877d3a8832+67108864 8b2dbd591511b111aade4ac6dbe7e9ba+67108864 3f048fe65ab2388790762965f79018a6+67108864 7295219eaa859a1050df9fd769a109a2+67108864 d414303c3465d3c676cbd88bf62751de+67108864 e2cc7f0c2a6f8f2be16566448187fb9f+67108864 67d498ecf74a5b2bffef378249de6986+67108864 3dc7cde461783f6603ae130c886747b7+67108864 20bb90dab534b0fe6740d0f9a91b69ef+67108864 c341fcf8c17f3f0bc8fb77b1c38a87dc+67108864 4f6b22463d3bbbcd1ec223769a991c35+67108864 49b7d5714200d8fc8065cb7867feedc2+67108864 3fe374cf036f57b052f5bfac5e777536+67108864 849a12126e6e77ef89c6a6fd40b3b50e+67108864 d6597ebb50acf6026937d7e34e0fe5f5+67108864 1db60c0d626a7dd6931ad9a8f012a7a3+67108864 c541492c1dd39a9652a12929de6c132d+67108864 133456ad3ef89c02c15255894c8b4c38+67108864 ac762257261e25124d0f3bff34d87e5a+67108864 902a5877d0ca144be4e64e49ddbc6a84+67108864 4bed9dae329bb5a139487c26f77beadf+67108864 cd57fdb99ce8f02a528e55adf394c3c9+67108864 ddded9688f0f1d644ffdd0713a418434+67108864 c78069eef75782c7ca73f2cd4b222e5b+67108864 770652d2d600ea9467c19cd6b25318f0+67108864 e15caa477a7f2919ac7f50421fd4add1+67108864 4313b9b47b2af2652e84b57faff175cc+67108864 c7d7e55181b1614e5db644302b89d7b2+67108864 be4e2c5abe2b975abbfbbe05481205c9+67108864 0eebb9ad647ca490eecc8fd85f9e3130+67108864 b81284f74640253d18110baf27b2edb1+67108864 1bf994344e46cefef0ef528a65b16616+67108864 5b4e51780ab0d2c620dd4a09607c914e+67108864 a369023a52d53a580c9ac2e5b6414298+67108864 1ca1cc6b116631ef9042b7fcc1a71069+67108864 4c238ef73d518fe846db3da59e7f3371+67108864 9cff0ccf69f6718db3e2a2c814d5e93d+67108864 7c08baa884bceb5a721a2d1c3631303a+67108864 3c4632c93c3d96608d76a1835fb7c53f+67108864 21a1e400a720f29ad5ef0c398447ab96+67108864 e03a0738f567dd01ead1cdf5195482c2+67108864 31b21120078f7cfa2ed8f365e7d4ae67+67108864 72b9f9dfd2dcf67316d397d592515465+67108864 ec2acd07100379e405ee88c69baf8700+67108864 ed0e5cf5845d7b4d029579396edaa93f+67108864 2d6e8325ae138e53a69daf270a6a7143+67108864 165b7b0a9b59a0779078b237a0f43859+67108864 5426931979acf526c7dac829940b4744+67108864 a19f2f3722ba8b743f37ad4fc1de7efd+67108864 7899a1f6dba4a7561613ac8c346be233+67108864 d4af28e803d3acf8baf02343704771e9+67108864 c0acd238a240fe5d887796ebf2c59928+67108864 81c9046e203b9ea21a3854a08be3b6c7+67108864 19be9dc41ce37336f95cdfdcb9bcb1bc+67108864 592955756e6e716301b545670dee9519+67108864 664791aa59678b7f35d91490ef0e98e9+67108864 871796aec9cfe0957e02a973e6929aae+67108864 a29cbb99c16228863e2b94aeb52a4d9b+67108864 21529a807f3ca8c79de6985f65c229e6+67108864 65d73e789e7e2afb2398350501c94582+67108864 ab0f11f1bbf28776c8a942cc66a9fa24+67108864 f26f4e283ff6b79edb5b518483eb5698+67108864 eac4a53e663ffe1ba44350304e4f80eb+67108864 4bc565e82bb421fb178737d8b35e4de7+67108864 d6c94182c341a2cefb3c471fbca1aa5a+67108864 93053012095770ee0ec3efbbe5250dfa+67108864 861d66ba010c5138d68feb386a8929a6+67108864 c7ef775c4324deba52152a63d56e22ba+67108864 120c437c1605010561ae054991e0f06d+67108864 62fcc121f247bddeb2e4690e8794ce04+67108864 b219871e84e96e5e15caf19df374730e+67108864 7a3b52d2d071fe20ec7e4d1ebc3181c7+67108864 462271a7aace014a37e2278bad547c57+67108864 cac6ec7d98ff4b14bc19090ad1e7b5b8+67108864 dda917590a670289dba64ebf4ec81e7c+67108864 d8ed3cb70550899546ade41043264a91+67108864 5476c343b293ceb581ff79577c026993+67108864 bdd564f80a3ccf32c745be5e1b379712+67108864 674c08db645cb5081699beb789581124+67108864 de7b66c8448497df3b29bd147a6a372e+67108864 ff6d3e917e61990074f704fd680c8795+67108864 188c19e8e02c3d94e78a8e87ffc3d808+67108864 84c8d082ff79b3358a206846466a5bc3+67108864 3908b9e0a2e7cccf8d84e417e9742311+67108864 b03f0a17946803d7ac479ed9e24de52e+67108864 bad1eefd25bde16ef8234917d2200934+67108864 05b1547d6a2c9ccbc8ca098ceb9cd56e+67108864 227b71d3bc344620f24ca2bdd096d1f9+67108864 4e3a911661430007959d4d3c8195683b+67108864 e003b58fda6183184169d6cc75ddce25+67108864 7fad6f2dbdccee6cbbdb08cf1baec185+67108864 5b7acabe0b10aef1faf11f84606ee771+67108864 9fd97f1a0b485c0fec8b1b3719e7c077+67108864 31032fec03d16b41492a4f4be3dd436c+67108864 084d081193ca6c8ffecca056ea8f49c1+67108864 dae3aab6f390dc7e5356e53c9fd96e4d+67108864 f3f9b46cb5fe5960dc39ae0aff326805+67108864 c1c90111b2966da38021e23ef7624d2d+67108864 58d9d76b23af01b0b8a79ed6c8adaeb0+67108864 1b7ae7ec0e2a8eecf3b104c6a69b4557+67108864 0a32a1d91c8ce9e4139f1175eaaecdc4+67108864 31ca474df7cc3539f1811768bc88b1ab+67108864 1096ff9961700c784e2d8602f32be34d+67108864 85baad3a44ea3d1e052f9f065cbae2ef+67108864 e1fbe64ae53e81697580c7fe706dd637+67108864 00af57013678b23cd6701d2462c7cb45+67108864 66ed4630ae4c7f5650b77019778501d7+67108864 c53eba1250783154d4d6c796519b0743+67108864 2509db5b0048a95a99c8a05141d25a08+67108864 7bca0e2f4ed3f7975da1eb96e5e991b7+67108864 6dbc441cf6a53df1e0ccb2c458147187+67108864 9e68be13f859009d018bc89f013e2a5d+67108864 0b3260c834bcb8b24957ec1bdd834828+67108864 8fc3818ed7233f087dff6035d0d0e68c+67108864 af889dedf0f936ab5cc7751a3d8e273c+67108864 9619e43d50be73c63668dbe0143d7848+67108864 b172946e97fe8377517616b10aa6f5cd+67108864 483a1c541268bda2ae26f5babd1a3e4e+67108864 b849c2251770ba0a27b8cde071a7a8ea+67108864 21ad98835da14f125e0ddf3e5f641d1b+67108864 e746bc97b5689c1f05824139e6268be3+67108864 4bc2175232d8e03caf985f0de1d57ef5+67108864 0770fd6f7d5be477b1cd57357c65f2f3+67108864 215aeaa9a9883a9608751c8426340bfb+67108864 c30527b55a4841c52206b2ac388fd09c+67108864 109da8a7a4f5d899cfc2aec78f10417d+67108864 cc6b5a96b78c9f72186ab904b53bf6cb+67108864 45d85dd5079605ea72b2956e178936b7+67108864 511290d203b744c2f205f8e391565aee+67108864 292c5c76e08184652dc6b016cdf7bd69+67108864 b6b9f3aa332b32172b12227452fb9a0d+67108864 3faf30b6e440a7e63ec5b33026c4e8f2+67108864 287c54f2ededd394a348db4a51f31d73+67108864 86ea3ab570a70fcfb97cad610f30b2ad+67108864 b0ca221dd6800789a2b0e3ea3b3735ce+67108864 bf170401d62007080905447557163bd0+67108864 845e02d0645ac0bcc74f12573af46cbf+67108864 a7074ebcbd3bc6e477d74789565cb2b6+67108864 dfed9499e9f1aabe61841af5db7e32d4+67108864 aac55aad3070c0f1decd3648bb8dc75d+67108864 1e430b86330b7c4f20235ad50934e29d+67108864 8e6952bca44fa90f55d7df7efa35c24b+67108864 c9bd6e4d596358098d46e9c9852ba08a+67108864 f0481b0838851da2d672fecb8bed0ed1+67108864 39351d86071a678a156bd6685830852f+67108864 6f405976b4fd4b9f0d0fc482f11cdbb2+67108864 b99d1829308454f6a6d4c1f7fa67fc22+67108864 a65479a2abf49e006fff628f524c64d8+67108864 384fc792211710c36090828945f633b3+67108864 1b0ef2edf16035aa787a2234a2773678+67108864 2997683ebe6be9bbdf19777e6dcb7f85+67108864 a97176ce1a49dcead6f98c5d21e21f4f+67108864 fd8b3633b4da408c5348c901ee991c5f+67108864 05846a8b6bac781a0d5f0ae93da95c31+67108864 60bd664d28ab876ab2e4561f45a81413+67108864 4b2333ef5f5d87b933c943f993baf855+67108864 6b6cb6b3c49d63ac5b8c27a7287ffaa5+67108864 5c4c18633f8ca72ce9e4762b8e39c5ed+67108864 cda8a7e76d682bf2724dc36dfd628fd0+67108864 251af4c3c4c0297371c9cbc18f5391b7+67108864 3d9cb1ee432726b6704f4f062242a15f+67108864 d9be281b494fda2b2e097ecee73951ae+67108864 64097a37bcb45de638b1d1f3d02525e8+67108864 ad7c99315b582d3eee02a188986bbb06+67108864 7dbbcd083f126383c604d25a36318240+67108864 953dd0cc0f65581a304a69d1d8c6c074+67108864 6a4717fd23889c9e57e2fc16702b2ddc+67108864 ce2cc9afe4e799d38ac872ada6dd1c76+67108864 4874f67fe3f584f4bceb296e08e902fe+67108864 2eb3f1c36065e4d1356e30f849ef3887+67108864 8224b179ede992888735c59abe61208d+67108864 3bfb71ec8ce2fe06abdc1c072d2109f9+67108864 95b077361de7df58fac636a8c63f084d+67108864 34ce1eabb9b94098ffdde1971958457b+67108864 f6dcbab5c3c046737e2603deab056786+67108864 97381d8b85c75d95e4060e90ed0429cb+67108864 8009035222650ae111fc04889f6a91a0+67108864 7de4cff45fb41d2ab43da4ca4787a1b6+67108864 56e158d2d96c87c61ab07aec79a3f83d+67108864 80446cb5b295a6f943c345d60ca60fc4+67108864 014e252601f01f531ccca2232756c299+67108864 4521c10e47a426b0c8837cc808e20877+67108864 acc6f83179ba69237c125d8fbb85f5fb+67108864 b3c6f417342f1b43da8d644c10d2728f+67108864 f7d991cf5cfb8923681dd841fec2c3a6+67108864 81a497293d206956365b5e5994acd53f+67108864 6bbec259f46456e0ecdbd4cb4b3e38a8+67108864 6920db353f03e0b89a17c6eba721024d+67108864 11e1346974f9c5c99ab4e116c8b6e721+67108864 dc85c5e79ca12b3ce241634db0887a63+67108864 02ba2753b14df08c6065fe3d8067f520+67108864 512295f97d8196f4a3ee96fa7fbf28a8+67108864 167e357fef386e6fba3be79338c6b242+67108864 be1203699a729599cb62682276d7912f+67108864 57af384e85fbf8f3794f7187dd66da47+67108864 47551bb1052fd5456d8146be45ed0553+67108864 b96a537acec586386129a748be6210f0+67108864 5d9ebd409950ea6daacbf4fc81f0b8d7+67108864 efde5b728f3dcf9f88d791b2796d155f+67108864 2d0b282d815567c51d601a70fcddd68d+67108864 c4ceff02ed6b0513cafc9e3246d1457c+67108864 d84624307b9c2ff7948a79ab9f98646b+67108864 c1a0198f44e5bb6317988e9bdd613650+67108864 41e517c6cfc94a2f01f4f3d8391a9c66+67108864 c88cb112ac786c60539be3892adfa4e8+67108864 5ccbe64ef42d17e7d9a5efb4e555bcbd+67108864 98d2f0a55f84aeb228eac5d2f1bfb880+67108864 c065beef0ef2df2a3e046c13d2f13c21+67108864 6d029e69045be3737979e2711bd50ea7+67108864 384231d48e990757125f8ed75de83b6b+67108864 0a12b9a19ca1cfdb25ffee4cc3b79f33+67108864 5b8cb8cc4670684db3308db01c15e426+67108864 de0a828daef9037a9c79f2c631c1e0c5+67108864 02673dc624408ab64b89325ed9758ccf+67108864 7e3379b52437c0063d65dec9c0d425e4+67108864 fb6874cbe8aad09ac938aec059f8a64e+67108864 6f7e3055a1d27198d59b9db66e7f9fdf+67108864 592c577ce395168ab1e2b7f169f2810d+67108864 8dc6e1551248f7597693d128f0099708+67108864 8e59539a8b9c76e300824cb4c6e77400+67108864 e06cfaba57b978a4a60ed0d92830019c+67108864 fdc786f66bbe0ed72d008a9a93507238+67108864 df834d931b666e9892346816d1e023dd+67108864 5abd6568f3f49075aec50be5deb08b24+67108864 51c3db0547419829724af016a34376f4+67108864 5532f28919c3e73e4b311532f5173885+67108864 9f35d38d94c833e4bbecd64e2a5d820f+67108864 b9a010eb74aaea97ec508e46d2724635+67108864 96c729f98c1cc39b094e65e9912bd47b+67108864 37e46f23c8deef3b89bf4003034d3d0d+67108864 9bdfb8ab69c3f939c089658364e408b7+67108864 36e9ee64d69e5e766673cc0ff3568784+67108864 141e5f5a866f40a332481f8f0b5a02c4+67108864 13759de2988ae2c61bdc0646bd3f20c9+67108864 cbaffeea577a3e3022dd528979dd66a5+67108864 47d71f445f8a913639c2de35748ca1ac+67108864 c96f556870b01298a200c984c1b6a485+67108864 9101f4c9077a4999b25a816c5c7d4577+67108864 ac67846ccbaf979a06869dbe9b339b67+67108864 ae04dd6dc18bccc506d4a6c154ab0984+67108864 a7b1d2031b7790d6285ec6ceb9bc7caa+67108864 51cc682f1e4ca25e919b805449c99db0+67108864 dd675b1ce02a4cdb5010c23b8b8effc8+67108864 5402750a6d0d6b745bfb62cbc7a0ef41+67108864 abb173de47c177a5513b6eafc8e067fd+67108864 821c634696374641e713d263226f585f+67108864 312745c720c1dc832b990494ec21d328+67108864 ec41d509395490b699cbb5371748dc0e+67108864 571af59f889abd5666ebddc4acbf26e1+67108864 d13ec87ec18d2fa7c8594a7a683b2b87+67108864 c27783a42ed1b39611c5b4e56b898f89+67108864 256e0bd60301b630d4d9ae492fdac4ca+67108864 25e79caf8013afb12230195662f86195+67108864 06a2e7c753f64011e34974e7c2190b41+67108864 534e4134093ef3418c0546c6e4365783+67108864 4f973a9f34db9004c06c99a3e74b3586+67108864 37f3d57d8b0d46fd66be22d20504c3e4+67108864 f36e3a3343c70e8f6754d037b029ceef+67108864 8998b57cb2f3a4d4478b18b1a915ef22+67108864 e9a0de38f943bbf35ee736689b88ca2d+67108864 9f4b3c74f27a057b41ea1409c4da74e3+67108864 06b20eaa64f9d3dab4c529a8272248f8+67108864 6aea7ee9338fd608ce1ca71c8138dd16+67108864 add48912237a5d2310566a14e713fb5f+67108864 58471508500ae0331ebed69b053b3c1f+67108864 0f9c0aedb6cac897b66f462066aad408+67108864 7ae5b8003a518e6e73e0789744698564+67108864 f4997614ed3b49007c5ab44d55f9b2db+67108864 dee630fde59d34a16f13e06d696ff961+67108864 b24b9f7e6ef3ce54460e0c642229d8d5+67108864 cb98440ebce12bccf65c6b0d03dda757+67108864 1657ddc3534304afa69ecf8aba171de8+67108864 8136646433203d0f92305094371a044a+67108864 86980ec972ce5b94fd523b0005ae531e+67108864 25dc54c2c9ef76bf73baef782e9426ef+67108864 426c837fdc0e955984bd4abd2af4bf49+67108864 41fa3f238068fbebfeb9b5158ac624b0+67108864 c8d5838a82ffe2b1d160d9ecdac6dddf+67108864 cb8d6dd2800967ce59054dd528a34523+67108864 9d217fcc11e3c1cc0db31410b9725c2b+67108864 11fa34b137060862b6066b0c38583c64+67108864 47bc8a59acaac90e96cd61fa2c0f7dae+67108864 26c46447138d5f96e00b9a3501c1fdf7+67108864 3d28f5a3dfbdc118eaf758d5ff3e4d06+67108864 ff609548cd6699e0c060fedff5bc0ed4+67108864 9a6590c4f9bc00bbf1da8e0f06a5e152+67108864 7ebaf211e7365e86f04e9a8580c64ed6+67108864 0ff0d02833920ca107acb6f023f561bf+67108864 ba4f70a5ac053173f79d60ce3b03930f+67108864 055da2c4cee6215dea4d53f147b23004+67108864 b5e1a6ec785aba278cef39e0a6448a44+67108864 5512c19387b1dbf55b0966fea0996859+67108864 8a7d3fe39533cc44993f6f7021b101b4+67108864 b4b285ea1691d70835d477003691f40d+67108864 fb143f5264c383b1f9bb0aecd311040a+67108864 d990e38d43a13dbd04583b2509464048+67108864 66589506c3fdb72c89b4d57d2010f82d+67108864 fd266e7a1f8a3075f258c1884e8aa28c+67108864 148463ea6fed5dda988f4dd6cca2386e+67108864 a486f29b29d8b941513c8c0fb1c6e228+67108864 8cae319912b2b63bac4ca1ff0293cdd6+67108864 c2175b5c46aad00cae0dc874fa78d735+67108864 94fe5a4d987ae23d32ddd942654dbf34+67108864 366fee4ebcc1c4436e09886307d594a7+67108864 145819f02f913ab57b95aa6e687c7b65+67108864 021b8921b8a0be01fcfaf6a8f432ed1e+67108864 44aa8530fcda59a7386ab753d852af90+67108864 84c95c93dba1c2335c5132aea1bfbde9+67108864 ccad6eed58375535d5888a94dfacf051+67108864 8c70c1572583289f4f886b3e7e36f4ac+67108864 1f282e0018a1ad18505c191cddea2cab+67108864 c95e3457eeeda4ebe2db198892985d53+67108864 0cadf623f5acbd4c9bd62125de6a7a84+67108864 88bbe2bd406b21002e577196b7dec646+67108864 01a85e867d8f17994c267ae9f28802de+67108864 254a198d49a7cd9abc7ae6f9472ff938+67108864 765c620ae8b180b36e1dd15b9e46415f+67108864 b3bf18d1866f90046c14b21e1a49aeb6+67108864 64586178bfbf5919d7331c5417f343d5+67108864 b9780d2f2273ff02a535f9aa1d3ebd85+67108864 3e91ea1b91d943edcdc02ec2d2748f41+67108864 9b98df65c1d3b5b33e49b97177ec1b00+67108864 06c841928cb336b39a3fbef2ec27a411+67108864 f8d137eaf5bda678bf7822fa07e1d5f9+67108864 69843b1ca92cf826a802693ef2b2ae44+67108864 992a200bb822df303b43a2351bf302a0+67108864 3d20c7b9643d676eb2d2618d34c98270+67108864 31a88741befd885c16bcadebfd5a8cee+67108864 49ad328a8d37103e54a16dbae4c3055c+67108864 215e0e9cd05d16694b8cccd48dfa5abb+67108864 987fa48895459fc6f33853ab57fb6dc1+67108864 a65ac062423a006d76fcad051d72a65d+67108864 a7f805ac6a34145fc3b8d667ec43d576+67108864 dadabefe4c732ddf56e988a0ce034093+67108864 00e73b019e926773bd8b44e55ebfe89b+67108864 87869dda7d5df3ccda8050b44416b584+67108864 76c6be6d066a24b1b9b0aba1e3673f70+67108864 2c754bd71ef5bd4f72ade61d4434a7d4+67108864 7550d3c5d929e2433f167307593e468c+67108864 38113aec6f56566d7e9567c3a17f28f7+67108864 f7be1e07c0d71923a78e80fe90404bd3+67108864 7cd4833b1898db8aa453b9004e25c4ba+67108864 c68a382c523a60f0fbb0ab6073ea910e+67108864 0b771a04b9796e9f96c8da6c0e6b0fd9+67108864 8693722335dbf3c7b959c615e0fe6924+67108864 5cb47ebfba5a7db4b42dcc7fca2bb870+67108864 636be78a2c685b4da74e97cbcc124105+67108864 551b759b58ac10312ec353b7b0a267e0+67108864 ceeffeb5315ea9e54ee2050526b23bb8+67108864 c4e5252e8b0c6089c22ff9da672467dc+67108864 ebbf8c423715faf70b3af0077088bc6e+67108864 991c7c2448f265e70b0ac16ca43472e2+67108864 7c47a1a33a8d8e9588fa45f42518cc20+67108864 245c2168cde621737996c3fbd6c470ba+67108864 79047629ccbfef51995de48da7f3725e+67108864 e4bc4d51564c761c1f0fcdf1c80c6699+67108864 f14f733fe67b971cd66fd8b36564e29a+67108864 7eb7a28632a62a963e209bf8bd38f332+67108864 32ac8dcc12935fdd2d9171bcdfb015de+67108864 618f64f3ab4219f2b2923086be90d457+67108864 c2ce069535fe3beca8f628f18f73e653+67108864 39f9feacd8ee8a9165529930e6fa944d+67108864 6b97fba9adeab93c0021133ba08b2b47+67108864 5fad45965eea86e0f32f7631f852ced7+67108864 5c2fd36a54b250d15c4a04d6f4c9ef1a+67108864 46e18f793a73586043daafe0e013799b+67108864 1f5d09b11cd22a8f39d80e6162754274+67108864 a7333383f35cfe90ea57639ecd5e0795+67108864 2092efd510100ab4a93d08b240d4e551+67108864 6c8aa179f44bfd647c47eb5b5e04face+67108864 848379844fc6af50f86090526fe222e0+67108864 aff62043ce5de2aedcb03544e6e5db57+67108864 cd791e50bb80cefe40b2ef1b2ca85526+67108864 2ccde8a4341f0c8775c3539877e53fd8+67108864 7e2c4ca905113146e57920b996f8938b+67108864 c1ab3da1fccde2e04b990b3f54e7e7e0+67108864 2f152fe6cd43ff4bc5f9870189c5fd91+67108864 362763218d47cb2ad1be8827c7b51aea+67108864 9525519796bea462fd67a883c547d60f+67108864 13689b46f56b81757265379951f4bbb3+67108864 b9a0e158c2ff00011baff7d0db7e0e35+67108864 ba1a9edd255ca7957fa64b6c578f0de4+67108864 9b63e5f3b4d733d33de31dc2a6a79b48+67108864 5ef12f13166b39bed94977b68310fcd5+67108864 a515cd79b3361536326bcf2abe9b48f7+67108864 cf2eb5da8e95bda57de0836808c3b40b+67108864 65570f99a823ad70b69fb603c400f355+67108864 02c5238434e15f45094ca29aeda3c35c+67108864 596b6c24fd2f6414fb2d29b4bea5b104+67108864 3c017435093274bcb039d83568a7131a+67108864 47c1b455a5604fea60e6b906fa91488d+67108864 e074b3d5b5c478986b821cbc88f9dd77+67108864 4408bb38d2012b2cc8d856a83f8d9dc3+67108864 7d5a0460cc1d38fe20c194a673d6461d+67108864 3e79d38393f59cf7eaf7eeeb4b5ee82d+67108864 452a7aefccfad5289e42a3a3f8709ef4+67108864 6d0398bd5641472117545815d7906eb4+67108864 63f43499c4fdd70f9ca016a997777d63+67108864 ea130df56228d6d0e99397a1e5213ba3+67108864 da9b41f4c338e33f0a913e925f5ceaab+67108864 e1c79b8efd129398d6e8dc84daf0c65f+67108864 727b14298f8be46bae6553c7705eaef2+67108864 d7d9955f80c1b8f7f22d75ab64599aa9+67108864 fec64254c1e06da83c769861a48e0abf+67108864 2cc3c9076ca84d0b3d1424ff23f5e18d+67108864 be0315b12077acb758fb6d607ec0d9db+67108864 609a5aed43714ac588ceef16be5cf0bd+67108864 e8528687518544546f15d104e42ab36c+67108864 7590add63fc192d47386d025f96a8bf4+67108864 010654a6ae811cf3c911c34a7c306282+67108864 ff72a1acc0072ed5dfdd967da0830b6b+67108864 df6f44a67ada6387036e07449b2def8c+67108864 14405c4605e26f7114cfee29afc12e8e+67108864 e3f76e3d0906afeb8e3491e7890386ef+67108864 04df6b259de0b2cab861456dcff4707c+67108864 e9c9c7dfa8627d8b69a3d37361965a8b+67108864 a8f2caecea074252e7d7268f161c3ad6+67108864 a3f0b7cf9f5285990bd09a991e9046fb+67108864 5f08577621f017b31fcb40955fc95fdb+67108864 e4f5c8e2127f0e6c3387acb41b84f331+67108864 eaab7103ab17f9ef849287e24857b9c6+67108864 01dfbcc8bc5d7689f5f9b0969b1f50d7+67108864 dd0e627ba4e7cd97cfd07a89f34ab2d3+67108864 628e500b4a1e5739827242184dcc55f0+67108864 c1bd7e3cd8f6cb893c6691a1c132e41d+67108864 a8ce57e72e494f6ff16c01416a715388+67108864 e5e4d21216c2be92faead68dd4a7cbb8+67108864 fba18a980ef743d02623bdac1fdc11bf+67108864 fd35ea347e664d2bba5a06cc5b5859ae+67108864 c7c3772463ffddc941f864cb561196cf+67108864 e3d4d8d4c0582df9c4d9d7df34eb7254+67108864 0e96910a0a2a49a887f7c4afd7c6111f+67108864 d7d0ad947a996de69cf6d20f34dd0a75+67108864 7513ee25f082164fc1e2dd9303fd6dfa+67108864 ba35f2c1e66448e18ee94a7567e901ca+67108864 ab894212dd52e4048db5db39267af038+67108864 0fe5d7c56c2cf526a0fefff4361cc41b+67108864 6a72fbbf322b4efaffec4c17e16f0337+67108864 7d7ec2f367837d0a99c45f13e2a72ec4+67108864 e531e5a0f983b625b97dbeb6c8c75e39+67108864 141cca3d2ef79428c45e31b8196680ef+67108864 f5797709703765504615673f3a7798a1+67108864 42a7d894d9bb750583981d721fc0b43a+67108864 3538a8dbec2f3bfe2752d45a71e70b89+67108864 c80e07e5d0ca589894bf774edc001f37+67108864 47337a94882b8e782b9f52f5cbe47df7+67108864 989387d2308d63dfa14f1984764daa1d+67108864 ec0611f2664f96a0a146e82efe340723+67108864 fdfe2ea11dcc789a16d09e7f57bd724e+67108864 ed1c5b025df4101ce2d0453c52bcf44d+67108864 c871836dba44d682c2e9bd5068184083+67108864 da3e03f6d48247957ddf1138cffcb069+67108864 f0a4baf3d0c7b390005017316d5f2466+67108864 6c4a4f37c8d1c2e96ebed82c2112f2a5+67108864 b16a1edfe99ff8fac55049dd1838e3a9+67108864 830da62d52b0970e8486e09146d3c380+67108864 4f8285c1c54f0bb0e2a3548a709ca801+67108864 715993ca1791faa1c2318927f99e7cde+67108864 4620f01f02827a655dc211e6d19da234+67108864 ff6e868275e6ad24c573e4c158b5d36f+67108864 6cc04d5bd2dbf95a8bf51c3b4514e756+67108864 c81fcfdc4df6edb054225b043a3d0006+67108864 7eb19468c9690ff9f5ed0606e76f1ecb+67108864 df142d2444521375a0b3419e4ddf289c+67108864 85839ad5cd690b60fc3c9ffde0b16a48+67108864 a48acdc808f3f6871c5b1ddb61b856e7+67108864 0702af55adf78844d8447ae2edc612b8+67108864 6df29acca1f5512741144462c9965066+67108864 2772528da2379d504ee0a2f2f8f6d654+67108864 9a48c99615d21cc3803bab1109d01b74+67108864 c43285f8335705ff5db5c273649c1abc+67108864 95f810aee86d91c03e830b0b07c35802+67108864 fe441d6f0df7600e82f39bb8b783d689+67108864 a5fb5cd4c8bafefe7295d832d54d153f+67108864 fc3d46569b39762465fe0a52a4794e53+67108864 604abe47fe48c7ee596be8cd8593a48b+67108864 b363fd001c92188e289b26d54a1b2ff6+67108864 fe882c56c35ff21dcc34f96531254c5b+67108864 76ef911b4fdd4f71d93b74bdcbc9c090+67108864 e2ea51150b31146e112e909f64164267+67108864 d2c5b720e097815e664f6c87f0cf4ed2+67108864 97e7507df7dd00de336a42dabfc2c2a3+67108864 4491090d96143ccd1901779e9f00aeab+67108864 931d4a0940ec2eb34fef01cfb8dd9595+67108864 b7b81ce4af050cd0a01a0dc86a794805+67108864 77b4e0d34dbc3684fc6cca5120bc3456+67108864 ae2f71e229dc701bb379bcbfd00fed30+67108864 03e3daacfd7829c88c098033ccab1fa6+67108864 a6304b94717ba7d063489a01fec40e1b+67108864 cd4755b79ef2912a216fcac641af5635+67108864 3add1f551929e2bb8465bf103566afe2+67108864 575f0da37b5e9d2041faa60d5949af03+67108864 ab5a61475c7255b70370fcee5dd9d94f+67108864 30634d01c7f400d31d611cc7a7673d92+67108864 e31a7d521389ce6891f698cf268437b5+67108864 846f9a6630755b7a8b882effb86d2020+67108864 52e1eaf4662a1bed7e63d5310a893a43+67108864 8d09fac511f0037c94037b818268a2b6+67108864 774e3872523f78fdfd6b6ebce2b68c2c+67108864 ea5f1d513ec4f40d70c8f8e0a8726002+67108864 ac296ba33f17bdb4e9159024f8e4cdd1+67108864 c9a195ffbfd1259b22153695b36a2bbd+67108864 38b4931a017a78fcb6fc16b0e72f3ce0+67108864 702d41530330c10a587400342185ed59+67108864 05ab5fde413857fed335ff0deb326a30+67108864 2651b1e0e66b3014f752bede5e011723+67108864 a12615fc13b38ebdca36487bb3591724+67108864 e2992d9c680a9f7e11270af1b133e529+67108864 fae3470df006aae3f2bb2cfbb0953ed3+67108864 701c0290354a64872ad219a9244fb8c2+67108864 b93e0aee1e4f38b7f7fb4e16ff8d1ffc+67108864 8964e1e6e989bcafe111522b00d47713+67108864 51e9e5dab456ab0eb7b59279c4ec06ca+67108864 725c44c44536b8ac3fa0a9ae25d9baa1+67108864 4f39315c0645812f34179bde2133fa19+67108864 7bd82c0b2754702ec71a0634a9dc1d17+67108864 5de934e6e85aca1a204a89c3e99824a2+67108864 7bfe74cb7feff036a0ee21f370712821+67108864 0fc5fcddb3c53f5b53808cd958295800+67108864 d3f20cd29545736bbd57bd50f07b1579+67108864 0c64435faf1ab3845fcbb0e32d82e999+67108864 ea441e811b3520c2822c6a953144413a+67108864 d2b394a3ab92e0856a230fc11ce9065e+67108864 aa0c5f125034b3dba21fdfa3695d6a01+67108864 a8f0b4118ce8b8cbd6b7774679032512+67108864 339ee569c3993742cf8f7be8d874045d+67108864 1b38e6bebb07ff094362ee79fcbb6989+67108864 7c0e35f1ecceaf8a7588dfc8d3aa076c+67108864 fe15358161d85528453800ca110a449f+67108864 e13a94ecade9c1334056f51001161f0e+67108864 5ca9d755a0db3f62206b87f8489fa84c+67108864 1005fd30a8e26c4c04f5b39911722e7e+67108864 21d7603eb5492c507fca121f14b8d132+67108864 b1c8f98dee262a8a2084e1382454942a+67108864 1be9511623dfe333fe39bd2677811406+67108864 e6439cbf2bd93d45b11141be27fa80fd+67108864 12e41768e400ded7a86e78ed33446b65+67108864 3d99f810b556b5eac4247c3317b8616d+67108864 23632aa22a1a08cfe7a899f568511e87+67108864 42b10f75f25df8d105919717d081a850+67108864 aed75190ef2bd0c0fb2fad3e8a943934+67108864 fc57fc90c814367b5b34429fd52b4ec1+67108864 53da1e5576aacde23c1c86e1624d7b18+67108864 ebaf7a0946e3fc47a636389faed47d09+67108864 7066d5399a64e1dd669506972b53bdf9+67108864 077a6247a557f0f648fb492308f4cd74+67108864 0e0a5887a578b38577464d267e2b6df9+67108864 577478f28f10be523db58f30a56c13cd+67108864 df2840943cf215aea832ffe6252eda34+67108864 da6f29900f7677fe60efb9729da88c66+67108864 fcb41e115ba071a96b0a3b49e8bf0103+67108864 8a753ce795051334874fdae207855829+67108864 b13343e612361dd8e39ae29e41176e5b+67108864 704361fc939d60722e7bf633ae91d53d+67108864 a1cc9ac3b719fd41cfc2bc0d94c1bd2a+67108864 cd40ded00769b3b4bd845841c20c9b34+67108864 7c72ce73e8a5ad438ba8cdf99a65f9e9+67108864 d932108c4d3a8476491092cd581a905f+67108864 910a317116b2dcba43584187fdf33ad2+67108864 71178cdee4f382d77af80021d908457d+67108864 847b3fbb23de4510d6a10f216b1a210a+67108864 3950c35ae2852796a55eeb319f22f6f3+67108864 bfb8801bef84493b831ae34a5f73ca4f+67108864 955a59ef5011ab222a40de7cf3b507a7+67108864 e519cb7c85edabb54261110d8060de80+67108864 da074887680e187ef1db4ba128997cf1+67108864 b18567fc4c64b4aa0c0c0f204a01ff97+67108864 a41418794f481bf2b8409ec6e4267ffe+67108864 eb9b344f48720cefc217da3cce124e8a+67108864 ff92e78105ed0cb6cc6cdc3270faabf2+67108864 a2d0285e1207083dd58bf9002abc7fc2+67108864 147a9ed33d92f99fb89b685140a732d4+67108864 3f0138f29d10d2a73cdfa2a83d5653e4+67108864 4247f786d38761001e7fcdaa703e9b6b+67108864 e2974a5009436d37873f17608e406605+67108864 d491febe0a56b11ce2278deec0f617b0+67108864 8d9439a2a051307fd164906bd5c6bda2+67108864 8c5cfbbbfb6c3666eb4459a98921c954+67108864 d5ce682bd4f98463fdb3d773f518acae+67108864 e9dd69a37fc45ee1db12d0162fc7e165+67108864 52eeca7caec19ca02a391ce6bfa9be07+67108864 d64c37f9d21f14bf9487d5628a30eca1+67108864 f033519daee269196bfe92d70af67b39+67108864 c1282ca6aceba771a171f726d6a6ac16+67108864 1f2ca1e52852d72e5a57a1b16cbc3f74+67108864 54636fff02ff0270fa05e4d8cf29d7d2+67108864 0d39d0910ead42e48d1961bddf7ff5b2+67108864 44da2d272ba8acd54826bf012eb40e59+67108864 d80fe9f849963e7eeae297a546fcb845+67108864 0b9047663ab22c2a7dead55b79a368ea+67108864 0e0fd7eb8ae183106e9983da4f46532e+67108864 aa6d7435fdb5b00a690e3582f82cd6df+67108864 59b2fd0c4db35ecb13fa5e4dd3160df6+67108864 d1c103ecd75bdae037a7e4697a41333e+67108864 1d8679726c26461224e0a2e5cb0acc5f+67108864 f61e8db26fd4758f7cc84569ab2b028a+67108864 a2cb1b000b5332b21f725178bba5c4b5+67108864 0251aba53ffd3f3ba6da0fe53e80f315+67108864 0adc72a031deea5b21caeb4d49dbfd1a+67108864 da64eff1540d79841d656ca7191aad04+67108864 fb2b14ddb10660379ce55f727527ff2b+67108864 d4338af18b258bb8ce4931ca7ed05064+67108864 b59bccdac8c48e1a690fec35b6f8a8a4+67108864 e44379d4229ff8d166628aa4ad441afc+67108864 a2edd9b7190d04989e9ace2fad760116+67108864 efb094f97d5e1e44ea6534fabc8bc67d+67108864 8cfbbe4f5451f1a9a7667413b67abb00+67108864 8801376aba9e6efec6549b0c3e29c978+67108864 f8fcc193b53dd209a896d007327bee7d+67108864 7de9ca296a062cf33faf9ea8f312ccb3+67108864 8d69bae5e32e7a4ae1f6dd57e0d4bc27+67108864 09cb36b4aac4a4230b0f1933ff032e99+67108864 1b69f061dd6daa99762976d61c707490+67108864 b54b276356bf5e28514a9224be56eb30+67108864 2d95083444f68bd2384ab2482c545a93+67108864 8169dcd201cdabc17a0c651d69ddb5b9+67108864 8fea21ea4fde472cf5b9222bd2966fb4+67108864 bbf9e95ef3f9cd4eb43f143091ea7d31+67108864 081f91f057e44a5703a96560bb310e23+67108864 b696d01611c9215f4a93fc7684638ef9+67108864 498b72c742dc776c2b0e1525dbe16c2b+67108864 56b958bd8492a7d864ce60438db57311+67108864 333bf76155e916a6e4d72531c12b3b61+67108864 512c2e49f16514c68dd06a242c3006f4+67108864 0fb48df3a292cd784d3a4e1df2efea3b+67108864 b43562e4baf12fda2ef2f88e655d24ca+67108864 13b293acaea889c5672a10096d0b70b6+67108864 87232993894f0684d3071118528acb30+67108864 f7ebc10f8e47b12167c3b18a9b40af4a+67108864 7aba76f27f35b5c0c1ca8750c5895f45+67108864 889d9bb6f270501204b48e052c763e3f+67108864 8b574864a6d5ce579793d4d232bff6b3+67108864 e19cf0756dc6b54f2ad8bf750415772e+67108864 88e85afb7736347d28df444a048af72a+67108864 06bda3da34e66eb63f1699fef8c70047+67108864 a06ad6b51927d11c35f8cfa86980a713+67108864 68b3204eba15d00312166c99fa33eeab+67108864 98b12cada97a8843dae7448ff380298c+67108864 5ce5c167e9f03631d6d30cb6e45a52a2+67108864 348633f0b38db8610b435ce4f53fd049+67108864 8651641cdca73876e3ee249973888a4a+67108864 ba2e2453070570ecee21e549cdba4c4c+67108864 4c14365d045edf5f7db25a6077cd18d1+67108864 199d38530de4d2bbdf644082a5cbfaa3+67108864 ea8e9092d88d7a2ded26dddaea3e35bb+67108864 a981d1b9cc878eb2825e6ac397204ced+67108864 6c8a6a15f249927de8c391bfa9a13887+67108864 5fb6f06f70251fab1474bd5cf827c4fb+67108864 51336a8ac8e66b4cf183a87f83184a58+67108864 5e7324e999bfc640e1e4b8e193c2704f+67108864 ed0372472af294f0a70ec98f4b12eac5+67108864 97dc3c2bf973b650dcc1b14e4e307049+67108864 ac2dbf604592e32443f698243cf21b43+67108864 4f6b487a03217b6e2db48fd609ac3fdf+67108864 19ce146e7d279ace45a056b40c8b0e2c+67108864 7d22fb7947fc9a8f389ae5aa2681b767+67108864 9dcebdba72dfd50659747073415dce98+67108864 10982d30d47b5909b6ae40d073e2b7b8+67108864 bae58060b6dd999976b61149d05b2d9f+67108864 0fd4d4d81968a7e683c1e736af26ca08+67108864 ef25f3edcc83b407d40b406187c7e73a+67108864 befaf13aae2e584e9e3ad1c7a5e98f64+67108864 6880080067e109394df71b712d201f4c+67108864 485c60ad6add65d4eac035768e43db0f+67108864 902c2519a1d327098a8936486f237c3b+67108864 e0a1ca44d4a9b71efcb95a8690283a77+67108864 5cb09dac3177616dea86b0683c63d379+67108864 5a08f945265debf4ec6cc61e51efa702+67108864 f0179759fd17199bc13902de79374753+67108864 835e93df3ad2cfcc3b6b0b0ba26ecd52+67108864 7ffd15a96469d15bb821573bcb406900+67108864 282105da606f63b2bae3bf91b8703ad1+67108864 0e67f64306d1949d07c301077279a257+67108864 1ff5dc38a6e08ac8737f7b048883ba57+67108864 0c48efc7f081f704b5e81b37d2fcf78d+67108864 48cb575c08d65b849e336a528fd1fdef+67108864 2f89ed035978f3eda7402d3a352f172a+67108864 ee96f8e4c159fecd3167f2d5f89cd1f2+67108864 44c196f7b06576ca183baba37c07ed0b+67108864 c394702750ec25144bbd50864817f412+67108864 ecb5f5dfaa3ecbeb44f850c3f39e75ee+67108864 9189dae667bd9bb7efc65a468657e108+67108864 cf32ab8783731037450e3b3bb1a7e24c+67108864 970130f22be6713fe29ab1b4654508ef+67108864 3eec0cba1a8e599c776331fce72eb135+67108864 40f024255335221fc9b5e35147956717+67108864 0a23f0e47c7aff96458df2d8a4337b43+67108864 aa4dbb73141da37741ef87f25908de08+67108864 b4374046f4aa89c1bc56ab4ff243712a+67108864 5b3b59037994aa980b394d984a107364+67108864 1856eed7f374fef9a6a2d54855646dc0+67108864 c58269b0c2137b24b4ab73d017382af2+67108864 3ef84af204b89170bde51ff5b2251580+67108864 47eca65588f21d57d6826ab968ce0ee1+67108864 c1c8a064e8c5cb4998db2f63fbe38aa8+67108864 7989eaff82ce0c9ce44b07d30004f736+67108864 5c550ab03a78a8e95591466c98e73c35+67108864 a9bbfb746766c189c72e01f9c5f06893+67108864 df809e32f246382bd14dfaba10f796f7+67108864 c08575ebfe83afdb9d99c8b0a20f204d+67108864 342a1fff271f3b35fb476136ed1046aa+67108864 be53838e3da15290c9450dbf615a23a8+67108864 3d15a3114e259ce18e60f7d162b23b49+67108864 3fe7e7a6840c16fe922ee285073a5e34+67108864 0a98281ef6382ece1136ed68f1276e4a+67108864 d8242a4c2212e41de5d3871407813a04+67108864 89161d238c95e16e74ff7e98df4dae3d+67108864 2cd9e980b0c040903d723bcc097873fd+67108864 58c233f19a14fcf85763483343f68ed5+67108864 cd9ea5cb906ebbfe11989f6b2178b1e5+67108864 1ed5d50de85e8b1aa8a9fe351c17ffb3+67108864 b207e05f95076c307944bcf66e8271b5+67108864 1e16d3914e27d93a30a1fe14900d5bd2+67108864 3dd41cb336b5dba6ce1ec12a12d70838+67108864 0553a8a876bcd08a9e59b672dea3cadf+67108864 3270098cdd3a653802b64b97e2c013d3+67108864 74a4d14216f2e95c3c762fa899810245+67108864 c8f6bfd2f49cb68d2c12bfcddb7a3a14+67108864 d57828eafb6113e1378635fb7d811ef7+67108864 4ea5da07cc79e707cfbd46a05e71f626+67108864 fcf7ed7fd390b222f75879caeddc10bf+67108864 a9b52d652eb5b057507664e55d8c2065+67108864 1952492c2a5e553538b01b8c68754fba+67108864 1876ea6728d7f5135308bfbb2f907f7c+67108864 637eb85d012094cdce43454ac707b186+67108864 6146cbfbbde640b0ba81721ad730518d+67108864 339994a175ee6af23dbbadffabf5abaf+67108864 2512f64ca51feb66de4943bd63de1389+67108864 d45850194dffeeace32b87161df8b637+67108864 70e746b2c7a2785abd140bcefb359d5d+67108864 a675ca70dfc82b1295b8e934777c2963+67108864 89b608bcd8e1ae97b1b54a4ce6534f25+67108864 93fafcf8ad42d73962773d45aa703e48+67108864 b2ee61842cb6035d392cb39a9f9bf3d1+67108864 65201c355aec6c098ecbc096d807a94e+67108864 efc47cc3b58120afaa5b12f9847bd9b9+67108864 815b18871706defdbb32f52f79888b6b+67108864 cb212280920fd14d7c083e244fae8f5a+67108864 0563576707787553605de7c2c3b43f69+67108864 706b06048a3c162df3eb8c76fa4b9a1c+67108864 39ce8aefa48b3e1ed7387d9374ebe607+67108864 b68c296491338234d53ee8d7b80e20fd+67108864 6a604b76699c7b5d1f28c783ba7b785f+67108864 29ef63b9fb1236065d93d649c14a7696+67108864 3c42c2f540d3961241f8ca697ce4a15e+67108864 0f2212e11f9abffbc05b49c7085e6692+67108864 66bdf3d4ba4a002e86e81a0ef235c6e3+67108864 84dc9f05a6292ae7a1a368b39ec5ba61+67108864 3a9889f10bf0f526c8de4d9ad875cd25+67108864 8953d1e682dd25c8df3cd5b659db5d4f+67108864 ab7be59c61570efa5dc2b78cf83676bd+67108864 dba9ac0eb05c49a6cb308074a19206d8+67108864 f06d174cb6ec6a6ce10d0149c62d1a0f+67108864 11d2f482b141766b0ba4f53e59c2188d+67108864 9fb4a1a3ddf95e2c7135947af3114e2e+67108864 275bfb636251eda218715c1aff06e503+67108864 a0c89242b3edf53737070abf6ad87afd+67108864 8b4763a22fa4a3a9f045d22e4b11270a+67108864 2cca0708700a877794c8590619f2968e+67108864 b9d7eb594832c2b65a93d83ac35a19cd+67108864 d9f5beeed66607e179827b0fc6185139+67108864 f97887998347fd8619bd35cb03da2a4f+67108864 52c088791a443a992cba0ea3fbf12e1c+67108864 78f71f4bb45320072b2fbad6e5725055+67108864 652e98cbe0e04a9dd5d2b6ec2fc7c04e+67108864 550cf40ee7d2487066f44d7925c7b709+67108864 00d1a4f4532f950c85668a7748d38cad+67108864 f085dda612a3afa91c9582b0c5d4aaaa+67108864 fe1b1abfdc6bb203c38909ed0d124709+67108864 7316d6a08a14e2d840d524ec3700e837+67108864 e00f6b79c66e8e1e23f52c97467fb161+67108864 a3a1206eac6c1439baece870f922c446+67108864 fc677aeb5ae37d160ce7f658672d0352+67108864 b306ac3b86b18e3df67c12af3432f0f8+67108864 de6bcace6f2bbafbadab0830f970c581+67108864 30fb4d875646e3ee8b9f284e9761de9f+67108864 c2c9e0be88875d7fe8b93ea622f9b44e+67108864 dec0fac2abc6f1eaa399e5640819a000+67108864 2aaf7d4f5cfec013454efc6b1e702d3b+67108864 5a9d28e7cbf92495289d4719810580bf+67108864 5f618fe336664eb82b35dcd1a3527305+67108864 52b997bc9b934183e5b69029d3c90dc6+67108864 fdd35a04ec9d5938f7ba040422f09b50+67108864 b8eec7e11bfee019bede22b20fe0b59e+67108864 fd03ab91847277757e25c4214545378d+67108864 0cebfabcbdc53c4366f9fdb3c58da369+67108864 37fc9e297586cbe133823b0dcd74151b+67108864 5413d6e80ab9b9a4995238f56387f920+67108864 b65fbffd1cd35708767b822d354fb6c5+67108864 369d2566eabb30c55223261f52561e05+67108864 f6d5f3b3c0a22b93e1dda7a569813098+67108864 2097a5b95515dcab7170d6e45f15644f+67108864 651a39b728d858e73a97317f28f01570+67108864 1fffdd51f56f1df30310ed957e7b6940+67108864 1f3ebeb7f6da888bbc10daefbb389d86+67108864 0446df2210a9bb8aa78424ea8890dbfd+67108864 79ba0e4df90bd1aeb12f1521fb29150d+67108864 5eef6de0afe26994aa2c7cfd31c97c85+67108864 250e64306df679e7a1abca0302c796ec+67108864 64e0ef4c14f7c913b48a2a2ee2610077+67108864 8207b3d6919395a87fd4705d83033dad+67108864 673ff19dd35b2c519c4bcfb158976f56+67108864 ff2885cbdb66c872885e44c43bd8e68d+67108864 5a2e771e4b0338c1dd7647d693da555b+67108864 ee9f602d74e0fb02bf26d60025517511+67108864 b53d146b9ad8d8470a9ae1f8982f693e+67108864 0ec4753c965090fb02b2d4881cfded65+67108864 258255645bdc327b3f5aff2f5f308b86+67108864 eca1fb054e55cbbb106ec7b77987e4f9+67108864 67ff2f4aa9246afeee983520baa9f28f+67108864 9088128cd4b27cd3e49f2cd16605cf3b+67108864 eef9ef8b7d292d53b512844eeb1f8c14+67108864 82fb940c018616328ad5b1f40573d0d0+67108864 d91badac0cf1090d6b9e0c4835095d41+67108864 8cf02691ad8e84eab3f166cbf21fe456+67108864 d046af96ff4ea58d232fc61a0cec06c7+67108864 90fc539b57ac863cfbaa510a6d7a8831+67108864 c56a89de61ab11e03b57fe0652a9f824+67108864 ac049420424f3a7320ffd25124e73097+67108864 b331df4098dd950ed2127110a77d69cb+67108864 4169576a334d9d6d0ad1acf785de4beb+67108864 8417499360a366a19e803eb4bad782cb+67108864 ce9c12f73a782f7d8c4adb1955f5041b+67108864 67ac94f8d9252daa60fd83fdd485719d+67108864 b6745ef3760ed93c751c2a6ad463c453+67108864 3e8ca95376b731465cfdad3137debd5b+67108864 3597ed23a677c8510948fcffcaa32174+67108864 e183a236e71b541e936697989c953fdf+67108864 adb1b761eab093ccf4381e483767ad87+67108864 61fe4674dea00f7dfffb572981da4eb6+67108864 59d5521497d7004b1734f8f12d50995c+67108864 708ab80ea89529c7a828b27240bae2c9+67108864 d5546b0d36c4618401eb534f8ef9ae67+67108864 146b5cfb47f585add7fb91b3eb792b3f+67108864 98af89fbfc592389f4ee90001b73b7c0+67108864 f1f7e7a38ae792b1676958c38a079302+67108864 dee5b4e934ad4d820084abe03ec82bb0+67108864 233ac5529b4bddd3357ba114cecb8265+67108864 2ea7c26b43543828173d265429e5e9a3+67108864 554279d68a2ba24f37960716668f57c0+67108864 a652774854ea89b90addf1e5558b03db+67108864 2f32839c01a3178754b207ccf2cf774f+67108864 19e1085e4a9428f54516d2c9763fc69b+67108864 b08e52aa55ce36adddc8c827a7b56a90+67108864 ca5a2cfa551dcc4171a8548b2e75737c+67108864 07d0c5d3b9da6f020c49a09ddab88c11+67108864 297c14087d08799e3cc758bebb35a998+67108864 36745bcc5ab5e3b6db6d73d685486632+67108864 145570316045be126fb77dce478c9e7b+67108864 7ae40af44f48b1940ae5f1eaf7149d8e+67108864 a040da7c89a88d383f6351a09881e5c1+67108864 347090b7962725c04375d35b1426a922+67108864 969ae2ca582911e37549ab48999596f2+67108864 b3d6aff2b8fcca4b97080ec1a393d4ae+67108864 f2cfa7ddb99bb01b7fe0e380bbbeb743+67108864 459a7ea1a6c0b502ae06f87bc8a22d85+67108864 d6c18cc1df3b7973d1fc1dc994e90e7f+67108864 7585f0be7bce0bde7246e93f26d9663f+67108864 e75e85ea0ca03510ac43f793a012f71b+67108864 69b99706e27164ddd6bbebcc34704e2f+67108864 6d54e0c179d902173ec3550561baf014+67108864 6b8d7b1fc0b2db4a2eb4a62026df6f07+67108864 454dcf5308ef0d3cb8d50963536b47cf+67108864 00934415280fbe98d2f428ba54694504+67108864 8ce63e908997f169795175c2aecc2800+67108864 d2adddeec7da9fb3b9eb973b7dd62301+67108864 df334feb04844764f8d6deb418c8809c+67108864 c400cefe791d3885f256df23f156b790+67108864 3fe67e7bc24ff07ee05f1d5010f65e59+67108864 52dea4a8d0b2076971c0d8662f4012c0+67108864 4a361fd451f8f4f645f300880d6fbea1+67108864 a47ed8eebf37a796eaf28fd5b8a448f3+67108864 6333c635f5f8efd608837c9c43a54809+67108864 c5218502430171c40ff8329ea81d6142+67108864 e40b9b1935f2f8242ecd722c87876905+67108864 0fa2bd707cf80dd38937420c88ad07e3+67108864 68a7f964eb4f4f32f42d2fc0e4b4c168+67108864 d7031b0d93a9e6ae52fefb38ea359d0b+67108864 a8cb2892fdaa1ab06152ba266271bf6a+67108864 dde2ae6293a78d523de73934580adb98+67108864 64b31b76d0b3f567cabdb19ec8a6b5bb+67108864 5722cd27250a7b4f387f689eccf1387b+67108864 9a5a56f5ad46ae4cc425b52f5f66b0e9+67108864 2c26af8c2644c3130ce2630c11b55f16+67108864 b15cc2d99d1814ea3269caefe5aeed1f+67108864 845936a2571203663e760fe969cb7cd4+67108864 cfea2e5fd2d21b39a0279c0ad8166b0a+67108864 012754e502a0a7ec6ffe98299592e496+67108864 ca04a3214136dca30208e625288d50c0+67108864 fb015bda0345f425717e8e2da41515e1+67108864 6375587f9cd8b52f461e457d0ba5e6a5+67108864 18ac65bc4f38174709f34a54cb0bfdc3+67108864 fd3afea501ea6dc765eba676c7668d1c+67108864 9e576d0ff9d0f7ef7c55a9142627f48c+67108864 aafb3340fb59fd4e1ecd15b71486cf71+67108864 f8579bc6ec704e334984c5ed7626061d+67108864 67798d6de9a3a161763560a25cd5fdad+67108864 0895eb15394b8f3eb87630ced9c54374+67108864 e65992f5b348cf08f7f94c83abdbce9b+67108864 6e3a01b14054b64f85bee2b089c82183+67108864 5a02dd377136f74a88f37478d1465e4d+67108864 6d6cecaaf9e0446e484448dd62ee963e+67108864 4e1cbc7a34dc967a034ed3e9cba67b64+67108864 a7cc7e1328300bd8f32504136b084c80+67108864 648ce1fecf63126e094fdfc123f549be+67108864 164e1083eabbad57b7b4f72810a655db+67108864 452a5be2713d4931248299a6c563cef1+67108864 0ef85624c448f6e4ffbf5c69f0df6a63+67108864 636f3cf22e32d186b16700b34b93f34b+67108864 1005aeb33da4e9fc72bb3a0fafd0c3f4+67108864 6ce50685ad27cfbf0b83cbde29adf870+67108864 f8079b077ed19ab55860c83f0a5891d1+67108864 1e4471af55b217dbb89bcbebd476dbc1+67108864 0941ab2b20017001185b8f7d96b3da45+67108864 218c586c39817c072148fc1c9323fccd+67108864 c6ae2f238bec21d75c463888eb9eb34e+67108864 17e43a11af424ed19ec3e37208ccd7c2+67108864 088975b838d40639b140cacb48f24582+67108864 d3499a99641de103b50eec6391c8bb85+67108864 3a3925e9ce09b308b7d686bd35ded3ad+67108864 4be279b24000df6b90f3b21346a95ec0+67108864 dff78a508fe2d378362073a634fc8568+67108864 7e030eed13b4d92570bf27cea056de2b+67108864 e575167c80666518e1a1fcc37e5be977+67108864 59bf806d56b3f1da60d7e1ea1e326732+67108864 7ab1600fd8e0ea33d4a1522ccb4b16ec+67108864 b486802203e725f884e67662d60a4bc9+67108864 e2747727cd64c67213008cb997a182af+67108864 bbcd31beb88d98d982fae985212cd467+67108864 63e43c3dbf8b07f8823d6d6d8dca4e5a+67108864 1aca07c1a84f7370026399e36371059a+67108864 59da7470ed1691daa40159076f00aacc+67108864 2c67c1687441dfe2dd335aeace7960a3+67108864 148352f31cd89f167ebecb37709763e9+67108864 b15ebf827a05848de50be331fb5480ff+67108864 5a5e03fb89bf5dc0e4b2ea691944f772+67108864 8625d91e288b99dccb7b56561ee74876+67108864 3656bca4544924b720b82283a91cb76b+67108864 5fd2f6f1fad7d20c0e973703ac6e3669+67108864 b33f494062f3cb679c36337ffd306507+67108864 707dba27750617a509a96a2000893ef3+67108864 c9db9063eefa8c0c352ef3deb05af1f4+67108864 77ad729701812bfe2cc39ba1750c2051+67108864 632b1bef7f64c435bb783d1ebf397196+67108864 b9137b8556e1d9d3c47c79d756647182+67108864 146b878dac4ca3bb0a728d395b42a74e+67108864 0e65fdf451b17e75be0f91786077f848+67108864 2926796ae841754b2dd1815cbb707043+67108864 c0cae36e934007991f9d56a0caca8158+67108864 b8ec664a45a543ff21447729cc4799ee+67108864 4d3ea3381baa8a14c71628a95c4b2d82+67108864 a1ee68e9f6381a4c1723d2b221d182fc+67108864 51383ce68788f81c011a8b658f425b8d+67108864 39738b40415353cf36bcaebbddae28b0+67108864 49a9a08e0f163045fb4bdb87d94c7470+67108864 bf896038315fcb8c40a628ea135cdc31+67108864 3e5ca3e8307931fed9854d8bc8d6b04c+67108864 1c7a2e1225976dd6ad6b6c57d6089da9+67108864 8d970c8f25369d3ddee7897a801b3600+67108864 4099f0a3b81e8f24097fd8778a230cd6+67108864 1190633b75127dc6dcf564a81d903afc+67108864 aabcc49889ba529bbdde745f7a4e2a06+67108864 4006b0b18f997427d6b01d7488875b0a+67108864 6c02ecaf633dc323c688d0ac7c6a42cc+67108864 341c3efd284477d4540fd53938af4a40+67108864 070d5b5a0274690de3b95bc09ffb78dd+67108864 ebdc0b5a4395cebc98e0716965c47a3f+67108864 2ef29fc6ddb8118b08ee3960029506bd+67108864 5958bffe4d6506019c1596d8ed1ba6a1+67108864 9c15577caab6a3ebed7ea299e32e4dce+67108864 6cb435f99aaa250079c37629628d5520+67108864 4b3c90f76918b88372745d89eeb9e084+67108864 837c2eb79a671d35aa881aab520bbf4f+67108864 92f0463f8c9dae280f9a162870679502+67108864 95f70fcce64cfabc5b8707b62e6d245f+67108864 64ea34ea306f87277008613ffc46e4f7+67108864 de302fcd65dafb52be9e0488631b0de1+67108864 feb4e1556edf524f8020b14e432fb73d+67108864 f7cd9b1aa21cede7078a6dde4a9a6d91+67108864 061f4e1dc36630d27d4ce11239aba66f+44772532 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:117550693231:PG0002577-DNA.bam 117550703475:8805224:PG0002577-DNA.bam.bai 117559508699:936266194:PG0002577-DNA.bam.fa.gz 118495774893:465904647:PG0002577-DNA.bam.tdf
+./PG0002577-DNA-jlake-germline/Assembly/genome/bam/realigned e3d7a51e4c0ead03e3e3896342f471e0+67108864 42ba06408eb1234fae80cec40e58427c+67108864 1af5af879cc2eb0a208852e0211e7625+67108864 30a07562440868426251ca573f2d9ec1+67108864 e9e0de7a6665939596b778ac72043926+67108864 e0ef7fc66317baaf856a3f9145731d98+67108864 b2ca4708a9098cdafcb7416705031238+67108864 1bbbafc913e7312f709f2cbe02037bef+67108864 1d4e3218c9bf91ee9b6de69f0e5559fa+67108864 8a089574e5a9a8ce2882b06bab082314+67108864 7efed976b32544fa933c38b1c810521a+67108864 cbf8af44acde3ecc1cca2bd98777832f+67108864 19d0c9bccdf28f7cae5dd51cb6f3883b+67108864 3220af4613dd7dde52fbf72208dedf65+67108864 41447742354119644391fa6e8ea27a92+67108864 1b003e733ed594760ede9079ab5f0997+67108864 5283d84d614113b2732c931d01f47ddd+67108864 db29a5faa61e4301c6bf71012ad1d4d5+67108864 ac0387a2bc339dca235c1f0eb1d6e3d9+24178596 0:1226675356:PG0002577-DNA.realigned.bam 1226675356:5462792:PG0002577-DNA.realigned.bam.bai
+./PG0002577-DNA-jlake-germline/Assembly/stats a3d360c03f346f830fe8c247fb6b6969+37754 0:17936:Reads.idx 17936:17936:Reads.idx.bu 35872:1249:coverage.summary.txt 37121:633:dupCount.summary.txt
+./PG0002577-DNA-jlake-germline/Docs 5b614c0ac2f1b7be407433860ee2a553+5625667 0:277564:1_IGS_Deliverable_Layout_gVCF.pdf 277564:304833:2_gVCF_Conventions_CASAVA_Release.pdf 582397:332651:3_Illumina_Annotation_Pipeline_Documentation.pdf 915048:3765366:S1_CASAVA_UG_15011196B.PDF 4680414:903872:S2_CASAVA_QRG_15011197B.PDF 5584286:41381:S3_bam2fastq.pdf
+./PG0002577-DNA-jlake-germline/Genotyping c73ad23d5e2b04a552ba4172a5efb1af+67108864 6bfe5a7cb20aceadc14486a51eaa81b8+67108864 6c7822f824a85de9c6e5a2e1e222777c+4019621 0:138237349:FinalReport_HumanOmni2.5-8v1_PG0002577.txt
+./PG0002577-DNA-jlake-germline/IGV 3ab66c4b117c21a43e1b3e921d32cf49+15934904 0:2898:.igv_session.xml 2898:2898:.mac_igv_session.xml 5796:2526:GID_session.xml 8322:171972:batik-codec.jar 180294:202:igv.bat 180496:43:igv.command 180539:15725768:igv.jar 15906307:42:igv.sh 15906349:1150:illumina.ico 15907499:26167:license_LGPL-2.1.txt 15933666:1238:readme.txt
+./PG0002577-DNA-jlake-germline/Variations c8be77db5bd5c21d538743aca8977c14+67108864 c7fe6cd59645d31b4de811ff627ef4b5+67108864 014529a882403f6d7f425015980848c6+67108864 dd7715f352e78bf2da032fa1ce2b1377+67108864 7967b6e51b1331f996358898955a0baa+67108864 f7491669a7747705bf63780078a8da60+67108864 ae300af114c7a1adbf251db77ef188b8+67108864 a5d6beb9c88ba5abf7003d34ff44bbe2+67108864 f1cc90f7192ffd09197c429d57400f07+67108864 13051c9b1052878f8a49992355c48860+67108864 eb964b9fc0515b5f6248fcc83a6af015+67108864 30bef0e73685998399da830c54ac64b4+67108864 0b3c289822fda2d7837bdc140ef18b62+67108864 ec49a958f1ec1bc94fd40b99d3baa0f9+67108864 39bf6e6bb4bbac8e98e46eee0751cb56+67108864 f6d35577522c66c11d96b0bce2359b87+67108864 3ce124d6ce052da22b79aa82277879a9+67108864 a6aa4f7b73c9bebc05b9f2d71a118dcb+67108864 de2132d64ee96ca4fa5c8f0bcf29529a+67108864 2305a45a0a9a219b0b992612f1954808+67108864 2eb2714435221611c91295d4b4036cc9+3233741 0:1209008791:PG0002577-DNA.genome.block.anno.vcf.gz 1209008791:3563086:PG0002577-DNA.genome.block.anno.vcf.gz.tbi 1212571877:131236358:PG0002577-DNA.snps.vcf.gz 1343808235:1602786:PG0002577-DNA.snps.vcf.gz.tbi
+./PG0002578-DNA-jlake-tumor 70ff27bf1044858b30152546a861370d+1848377 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:1834705:PG0002578-DNA.pdf 1844949:76:PersonalGenome_Mac.command 1845025:704:PersonalGenome_Windows.bat 1845729:2648:md5sum.txt
+./PG0002578-DNA-jlake-tumor/Assembly/conf d6de8e335f14bb99df74e40d37649876+159230 0:61440:dirs.tar 61440:11017:project.conf 72457:61440:project.dirs.tar 133897:25333:run.conf.xml
+./PG0002578-DNA-jlake-tumor/Assembly/genome/bam b27667638ac1b82166eb4158a276203d+67108864 58eeb2e10cd0859bb96886a205bd4aaa+67108864 667e5de545718d41403ace0a1e52572b+67108864 1c5a59fcef82d558aea7f8402652c97f+67108864 f89963d6b7d2dda6c5859ec38fbd39ac+67108864 8446e283f77664263ba79f6c3d84b965+67108864 d3b974ed13fd0e3f4daa88611e9cf97a+67108864 cd6f8cd41438efa36f836963cabc14ee+67108864 dc4c3d32095842b4542cdbf678f8638e+67108864 e4163f2f75001d911c55291c6557fee9+67108864 a152baede8c988a2b79edc26d92859e5+67108864 203df769bf775c7a903427b47cd50058+67108864 2e1fd6d98da402f7057d9e42f31a6855+67108864 15694d0809b5cb0b9b62e278030fb9ba+67108864 c5659c15bf0a35bdd7c9ce9521caaf17+67108864 4b331f97efb322bab7823f60dc33cbaa+67108864 ef5df5e9be8cebe67752c1ec7b699360+67108864 195e68b05f0175fe26abfe17a52133c4+67108864 d1df9508f48008dd0897e986d5e0a095+67108864 cb9bb9701109b42927d21dc0c441c106+67108864 3c544db7889c09ad04a683ab65dbf905+67108864 b3059eeaa6c4be575ef6442d7d686044+67108864 6f7d0657e6dcacb680e2c18c7ea969f4+67108864 fd19754ef54a96294fa0be3c235190d3+67108864 d403cf9deb5d300471cf491270728769+67108864 c11e3a5138e7e73a8d462a817284a578+67108864 31a4eb0282b6a20b1db03ab537f27d90+67108864 d2925bfcd512c54f774b28371321e80a+67108864 b10985d7e9eb98adc7bcd47fd51d74c7+67108864 cd6611da61290e6320c85aa2e64b573c+67108864 790643150532ca3b87fc87b398463bda+67108864 d73db36a115a9d62d3dc624a68263299+67108864 428b2cb5bf196058426fa5cfb0ab1f55+67108864 0882d387d810173e5466f1659f8ac150+67108864 c5602579109a9527c299cf23ff8e90c0+67108864 1e51d370499018e399e9da092061c13c+67108864 e49696b1edd093737fc543f594f55f6e+67108864 19b700f5e309bf97a4bfe5cfbea39768+67108864 bf0f28dc79bce5d2c5f1562558f27447+67108864 4be9d66602af08697c4a1bfc3c31841f+67108864 b6ab46ebedea1f2232120c1d390bf39c+67108864 ce12df5c0931d07646ba92f9c97ddbf5+67108864 d4afb665643b3b448998e68bbc9a91a1+67108864 92ebdbd5ffe74cfc842bd6ae44d11bd5+67108864 57faf1813558e0a6c25dc13bea541fe9+67108864 20851e6a10cfeda7062427e52035dd8c+67108864 25a70e81b78029f0be4fd632f57aa7be+67108864 419283b83cbd3202499bd4cefb0a1cdb+67108864 1a4a73a0778b404a2436684ef85a97bd+67108864 917f420880916197600f18d403b8173d+67108864 bd83a677cfc43cb03a6abfbc30b43d8a+67108864 f76cf33b00cddbd5e19d18bbf0fbaa2e+67108864 8d580e6c30d80cedf72fd3b60fca4a64+67108864 ba25a6f251eeb30321f807c9429f51c9+67108864 d003d7b82f5778e576a4a6367b5117d0+67108864 f37d3c41b2fdba068359b21666c4c81a+67108864 ae93098d87574a5f0b86413e8cf5bf40+67108864 7c1d4fc584f7adc3b778852776eb00f9+67108864 e1db975356914650ba3e29d13cd125ef+67108864 853f89dad1549ae9b0ba44899d4ccc8e+67108864 8a182ad3a2444ae6d6da7f6f7f4df167+67108864 a473f2377653cf34cc216528c883a12d+67108864 f0614b5c5954fefc1b2e6e2c4eeb5114+67108864 095fab3d4eb93eaf3662e3d307bdca46+67108864 cc5ef7122b2566b67fc5576a8f5f1f21+67108864 5e495a56cc1dd12d2349a71a066d392b+67108864 ac7834fe596d03c10adf8b689f6f151b+67108864 ee09ce2a05fb11431d2682631b702e26+67108864 e8893b94fdcafbee3257c27255512899+67108864 ef566dc982a25f7723106d859971ae15+67108864 bef22febb5bb8d15b8760795bcc70dd6+67108864 736d1b506a8e928e1911c58eeb5de21b+67108864 8e21577e579ed20b42f0feea41d8bade+67108864 51ae0fbd5bec7e4cf8ffef8a32c1ce40+67108864 66c69c19a031d37c5ce3cc256dfc702d+67108864 503d43e6803675b8387234ee64564ac9+67108864 a29454d0d616e51caa2b352a968d7dd9+67108864 f8c68ced290518c6e754b0e2c45c64f2+67108864 1bbcedb7c16ee806a82bd8b4a80ce339+67108864 7742487b63e051606b7f15765025e69f+67108864 ce33f57c6e8405d27c611991f55aea65+67108864 97e7ac624da63ef827ab9622ca1f5013+67108864 25ff51e2abedd04c71e7337e49ddd2cc+67108864 d92bcb904e10595be0dc35838bfd01b3+67108864 b5107a1d8064062dfc20cf915aac6691+67108864 1a35c6f5f345d085c5b1ce13733b6c3a+67108864 aaf44b9e1927563d13da3e9667624594+67108864 eccece5552f7a4aaf887fcf1a9445fab+67108864 e842b55a948fa0b492b69fd2c029f3d1+67108864 67c57be4efda877f9b44f4d1a2069a1f+67108864 6ab8cdae41408d27e4da3682450177a7+67108864 7aa6e963e26b2b987ffcdf223d42be5b+67108864 999b6b6ce0fcea94a4261395eff99038+67108864 acf4418cf27bd8ab5e40e2d6004ee9dc+67108864 ae3f390d34b4a92c23053c44a39f0980+67108864 b2df89979a416c6703222503a8722b7d+67108864 325a9362025c52c3a93daf3a411ff54e+67108864 b6b885513511313a706768655b770470+67108864 7924347c70e23e71392ac3be1a8dd202+67108864 81b852cfa07cdedbc467204835f346ec+67108864 c14a909737d44c5ddbb31d8bbc2301aa+67108864 8eaaa6ccd27b0433a16379fbd46a46b1+67108864 7b515b1799a09df1c53ba8a42afaaa44+67108864 da78adc26bc2739f4778ec04cf4eb545+67108864 f89e9a83a12f6c20c99e2a8128b24480+67108864 32cba62bd7e4e0d6c669059d9e9e8187+67108864 9c98cde5507452e6e66a92a47f0b1177+67108864 3ec80b900ddc0aa5351ed295c2519506+67108864 3e440a25f00805e345904f712459d05e+67108864 c6d5b9c92034e7a3ae676db0f9440410+67108864 a459a2ef3c7a523d03641d4eb5b19b9a+67108864 38af85e2bfdd36f39e871d8ec77e99f1+67108864 3ef6789bfbe65b73ea91aabaf2814e51+67108864 71d11439333ded47932b26a0c1291ee6+67108864 ace062e48a19df1816c912cf14ec73e3+67108864 96c0bd7cd898216e1de2de81ba431fe5+67108864 b7b0bb102bb512b191d57dbac648a9ac+67108864 1beb107a743ddb783f1df684a770d3b3+67108864 dbb7ccff44d8a1fc3e2c2f8b56feef69+67108864 376856dc7222a5d06fba46edb8d421af+67108864 936cde3ac43bcc6fabc0dcccbd8c0bb7+67108864 fa0064d0ba4b197ea1e043b39214ecf8+67108864 faff642c6445bbff6766b2173caf52d8+67108864 329226588c99b28febbfafcd465a0506+67108864 b1053c66901f8c9ababa599a215f5865+67108864 52a1e0723389874dbe26cda89a7ba074+67108864 651fc0d719d0ecf654fb68fcead40fa7+67108864 11a5aae83b1855f665d1fb649ca77551+67108864 49fe9f4b9374b20cb104ce019597458c+67108864 ee63c03171c5ae71845f9ec36b5d7810+67108864 dd0eaced2856c38ca14f4b4e7cd1283a+67108864 031b5092c185f12485b6fb60f12f1bd9+67108864 52624c84bd220136bd295b728fb39ee7+67108864 619ce6edd5b950b97b0144fd8cc0d846+67108864 2a326f8467fcc97e00feb58a2d282e60+67108864 afe28a3567c3d8e22357e54767a66284+67108864 43ddb4ef932cd779efa7803a8c682ff3+67108864 d714bd4ea327fdc41bd030ba0dacfcb7+67108864 cd0b60019265606af1630fda58d7fe76+67108864 245bf3c257e85ecc61c94b21f199c3ba+67108864 cd57ef218d5dab7230df2ed458400f4a+67108864 05c0056dc807fae33dec61b84c21e72e+67108864 a5dbee91918ee2fe8ca60b3935a36f66+67108864 1763cd31e4ce10548f0cfefcdd389b27+67108864 f27c37a2d77439dfb851a797df640be1+67108864 5c2afc1352ac67ca338a2bea4daacb9b+67108864 26b65a719765253a2248f1cbce10ae42+67108864 ccd5dc52fb8f92f467e99bba643e962b+67108864 d6f46c28ec827a571d03d44c1b3f0be6+67108864 835810e6e8acfe67f9cc53f12058f3f5+67108864 fc04ac6ac1bc8f85696cbf34d47e067c+67108864 180ef12c3f768a9347368a3d68f83a76+67108864 6e2cd549271844576032f654ac3cade1+67108864 b0063ee8160312761b2d95ecbe61add1+67108864 5601decb3b13f98a1246f4841254d949+67108864 c2ebf8433eaa439e8a7101e8ce735b30+67108864 acb6486284c8ac5ddc3a5cabe10f6ec2+67108864 f2d534128b117a03e6a1b528fd7739bf+67108864 3aebf3617fcf25db123b20011a96e9d6+67108864 dcef10edb820661ba7e708c61789d9ef+67108864 115ec18ea7aa397236ab039332c60ea5+67108864 8fe654a590ccc870c5aefcd848768480+67108864 d343ec7f640ce4a8b1e9bb49d9c16f09+67108864 3a24e80c488cee7f138a7c9357236997+67108864 3f42b92c722326b05303aac018ccf3dd+67108864 b91bc0cc81c1a2886034555e0de77cb9+67108864 b04373680126dcd17847013f0a1cdc5e+67108864 771305894ea402b6275fb079baf1e176+67108864 83b902682a699d2e509f6148be44cc81+67108864 2f2dba45cbc0d0a3b039bbb15e642806+67108864 28c35daa91968426f743e02c1df09ecb+67108864 ae9c053be0004a45a04bd7c684816eeb+67108864 b779e49cfe032ea858f04a174d5f7915+67108864 1cb18ea4dfffb211cf2e1b3b89684cd0+67108864 700a140d8be71a3a4111efa7676ce751+67108864 0e39fc71ea3aef669e46273e879a9cb5+67108864 eb075d86d17bbe570daee899e531a0d6+67108864 1209317403b766b79db6e691aa51d807+67108864 06511508a5f7af30d2b1a25355709ddd+67108864 23415542c3528187dcaa667ab27e910a+67108864 b21c149732221ddbb710146b6e0dc641+67108864 d6cb43cea2c25237ff24c957ed305c4d+67108864 130240a70807018d8353a49ecd9a114a+67108864 02ee615b4caabd6b65be38fecde69e36+67108864 a44590e6dbeee583ec4e2e556fb4ebc1+67108864 6690720a79e990e60bb53330f2381b60+67108864 938b829c29b361dd9df3345183ad8a2e+67108864 c0979fc234519c48ff714b533b0ac84f+67108864 2925fd6e9204dd1e120fa0b1f81b3d79+67108864 824dacbcbab1e6e4bb3bf19f0998bd07+67108864 31f33dd52e63b4d1915be40521805ac7+67108864 75721f7a0f736b2e89c3660dcdebd669+67108864 8b5ec2f053d27d497a02bbd4430fd680+67108864 ef45bf06c76c11c1e018508ddfbea33e+67108864 46ec11a6412019631b342baf9bd8cb1b+67108864 a936e7b58f6453dea8eeb240f78436f7+67108864 cdc0790f7b0e0fb948d2fd72eadcc1b4+67108864 e4e68a5f78554687704ed9a6e02c587e+67108864 92be6241e5980eeed5c52922e5fb506e+67108864 cbadb73e5d13adc78f1cf5128cf2f071+67108864 a7d0e1b8cfdd33fb776e040a6d6c95b3+67108864 22c7264616a56cb9051f42d601d59e6c+67108864 47518cfef61f5870fc537240738134cc+67108864 57d873bc31e939b64fbde571a81807b4+67108864 e39e5c68e7527446b4569cc06d4173c1+67108864 445a9a6abb15410f43093da9ef20d14a+67108864 c03b8fa6fcfe2a4952410468e684052b+67108864 fee487e4e6e506f97fd349b827a973a2+67108864 2b13ead8d886deaf68c310893dace3d6+67108864 ad5e842788cd9cacaf6380cce4666e4b+67108864 818508d6056786e1705ae6b9c5135752+67108864 53f0b9bc1be7cb1b7a329c22f2ac4b92+67108864 61e1042f92891deb1a3ca198ed66111d+67108864 be5addb257cfa18b00ca9b2e394cb289+67108864 b527844cfeee11e254b667885581f483+67108864 67b373734db9d69ec644c1c7db2bf5f0+67108864 8b23c1262fd32884dcfee4a801af3e5d+67108864 000bbb9236cc0ceb98913aa92fff5e99+67108864 7fc2ffc8c1b6009d078595b74a62cb85+67108864 5de77a68570a727b13666ef7aca9366a+67108864 54bb5594fb272171844bd33cb42ad9b1+67108864 590f8787314bf797bbbcb2787f613835+67108864 fde9b2191cbf07223459ad210a5c428f+67108864 503e56d97295d6e0a12045c0e12002ee+67108864 03a5f9b183dc9569721ed453644ddc18+67108864 2be6fc27fc31381276862d2abe1ac5dd+67108864 5947b0d663c0a52c05875ec5ce61bd84+67108864 4ef80e9e4dae16a5aae51a9501c616ec+67108864 153df94cd79dbb2dc40303907ee33b80+67108864 09104bb5a00744a104ef3400b21f20f0+67108864 2c124e205a229a9e99c2b304be89294c+67108864 854ba0a363dabefa28e8d360250b2ba7+67108864 95ff4f4b90489ddbc14aa33c7c867e06+67108864 f9373ff3a5039c6a5d2257ebef59dc3a+67108864 7f1abc805d8f6324c34b912af0ed0493+67108864 86ed37c6ba43db6e1b73df095d2767a5+67108864 09b0a46851e882230861ffc4177b4ed2+67108864 1092b56cc1a2172b7084a0d99ec6d0b7+67108864 f70c26b2a62dd5a914208db67d341064+67108864 830ea49cee6d701200b3c08b39be989b+67108864 4255d52ed73e6e02f8bd4c431831adca+67108864 b43115fe604e7741234d9d816a29e89b+67108864 e5ac0b97870c3cadc72b85097df78157+67108864 db9645ed90daada987e1d19ff8ec56ae+67108864 ad29235faaf4367473e2af9fda3d5080+67108864 191eb8c2cd7957b63474c53121e79e70+67108864 8503282417733089b18da8530ccedbb7+67108864 5e0889c82ed4c2a16d34c7ae9b87a5c2+67108864 d45d6abe6f8dee6cd021619e017fd63b+67108864 2892b58d187bcab6e8878b849f43445d+67108864 dc541d6a0af76dfb86a6770bab90e408+67108864 d1d42ce6c4d0499eb502f2b413b84e8f+67108864 b14d93850fc3eeca8d6daab0b577a408+67108864 593d932d2839b798fe3dbdbbf8c3a7ea+67108864 f384a78c5361f354cff3d6abbc08de67+67108864 6cf3d9317a0c58fcaa96a06edbb4c34d+67108864 c2ac341f1ffe67c5f729e0f3318ed272+67108864 08b4299b0bb7afbe68cf2af3aebe1a58+67108864 7dacdb347bcc696d50683b5947de2941+67108864 bf115a0e40b82478001cc3337d05f4a5+67108864 9772c379171a80eaf9167a5229eadb18+67108864 b2999f960a78a00649e7775949e7348f+67108864 3d99cb6768c58bc98dc55785440cebd0+67108864 b41f747b1d5f7428faead1391f9aaca4+67108864 1287df2ffb45e0415a9a368d52f8d508+67108864 7d6e7553dfe6ba49963519eb068713af+67108864 14a8e108d5fdfd7eef34851c695bcba7+67108864 8eb7f420eb13a536f0392adb709885ef+67108864 559e61efc2da01fdb0986679bb1dbc37+67108864 2cec25bda3ad1d892fbee59af1163685+67108864 315616c4097c448b9cd9945744cf6cc7+67108864 6c7fcb4dae7790a43a4cc22204bb6a84+67108864 ed2551b5c8662c3a57dcc0c712e4fcc7+67108864 15b54667bae409ea2d760f93ae26bbd4+67108864 f95a0fa90b01a5ce5e6ade63c331305e+67108864 1cdd6995b63579938be9aec26195d995+67108864 23215d1a26ac2696e27f03418ce138a5+67108864 7b611d9a0f689a1544df8132347a56f6+67108864 8dd426c07fe5fb724dcba9c2a15b2931+67108864 2ce2ba723ce6ebe056066a234bac46ba+67108864 c4e8594a8b7ce23b3da167825c8de7d1+67108864 4e7e54bc389c33c968c46f1a124610e3+67108864 f4f9aceea1e08862d46dd717d8b4f8a8+67108864 a2aa43ceba29057bc66cdb9a361fb419+67108864 5e886dc968de49b8e6abe6d151d53fd0+67108864 99d00caf3c14fd72f776c383c90ab631+67108864 08544e774a050e67f03e202514d1ac42+67108864 b9d6faa9b8f2811ccb451d2910a6a589+67108864 f977340bde7d733ba35fc998963fa8b4+67108864 135ac1eee41a279e3d000a146adb1fce+67108864 b30fa80fdd9ff4691d796fdac74f0093+67108864 a41417fec9fee6f189d834402a9ebc8a+67108864 4ab7b6f850183e8d1af3dc0f94a1c83f+67108864 6c1635db222313655d1a61cd9264299e+67108864 348a8d7f292e600ee53dc9c0cf48358d+67108864 2c2101c9815e64d46a38a5c24720b233+67108864 be8acf03fe71d9d43c3cf59e77ef3f92+67108864 a82c39cf815336c4d230429c9e53ea30+67108864 7c3c4b4dc62efd8c2f3597e2ca1b4c4b+67108864 15f7175e532a484f4be80d6f7e15b808+67108864 d44d3e0dfa084fc5e888e1503fb2d28f+67108864 3fbea3c0a4c7876ce9358de20795b751+67108864 735b8c3b26ea942e166321f4e0bc5127+67108864 286e2637c88ccf5187dde287a4ca9990+67108864 bbf981a586017a3a8c323d9d0fdf1a43+67108864 5a809f908b75792c8589eb2b23a9b8c3+67108864 d6e897e6dd3ee464553d3c835cafae94+67108864 6ee1aef6e62e295bc7dd79e2ea6e0ccf+67108864 f3baa399c5e8bb8b3db494b0107e5af1+67108864 99e401e71cd20430f26df9cabd414658+67108864 f1ba18255163e40e1a7738d5107fa214+67108864 9be7a2b18f11ada848d5586e353cc2ff+67108864 4f453ae5dfe7744f1f355c8c360cb7e9+67108864 70348d7e74ba8dc70519172645d98b51+67108864 1149a22ebe1be33590c5d531a974e79c+67108864 379397789b87b9c0e44d46dd9a7b0927+67108864 0bc9781da20de560b00f9492bf9d26b1+67108864 965ef8040f95c779b567a3c2c9319e7e+67108864 d86b8775c719b63188321560cca2b076+67108864 6ce9dfda8310aefd648aedf750c23900+67108864 68b06472825519dfb5f743d759708c8a+67108864 9617ea0542a1ebf7496efd4477bb67bc+67108864 6335e90273f14ca152e88a986ec40935+67108864 35f1d34a24d889cd1d86b10b1b233c42+67108864 49ef382f65fe1ec46cb2a8a4f8a960cf+67108864 a3f0fec2599be5ecd19b360b203e5876+67108864 743dc90628f546541daf0cf89710fdde+67108864 d167ec2708343b4b13fba5c2f12bf9b0+67108864 bd0bb42bd6a7bf267b00e81479ce3de0+67108864 c92c45f21773d3fa66afe741fc40c79b+67108864 b31bcb75bf11f2a25a7829cf0d8050bc+67108864 63b72753cfd53c2cd9ab94bcb5902969+67108864 3ba309561299c6547c52ee967fb315df+67108864 aab873f87359a7dfc85b0fe51f01b5a9+67108864 a150926f927cc3e60d45b8b18f3f57a3+67108864 f195bee4c6bfba67434e6204ba7c082f+67108864 96c83f753f9b3ff62b1d097d7ab5161d+67108864 85ae48853801003271b8b7a113e3c439+67108864 7279992bc5df537682a25710e83cb6e0+67108864 9799fdb7db707cebfc2fb23279d1328b+67108864 28b4a1cb9b7d06db17482190b37eb014+67108864 bc8887be40fda9cec1cd4e405dee6784+67108864 67b3c19c0cfc1f897d082fc32558c0a2+67108864 66b5d0f197a5d622cc8a46ebfb91c831+67108864 aaa8c95f2e065adb74a630951644b69a+67108864 1df2ba81a4082700248ed31b49a78d3c+67108864 c641804541401992e665b6d71b08676e+67108864 43a751e4dd67528826b1c8b48a208899+67108864 12e4cfcbdac91438cae64b2cd7c9780f+67108864 0d3aae2988c4714fa91537613c5e616b+67108864 2f95606f37db2656ced4b009d6905ca0+67108864 a3053b1d8868bcdd108d9bbdf8987667+67108864 f4c6f20e3e6f4e9fbd11325b33c81f28+67108864 079ae4f891032ca44ccad706c3210fc5+67108864 aeb92132131d06d085946802bc63379f+67108864 0c51a101a9015707f28be913c549ea2d+67108864 a3adbdde43e893554b73304849163d31+67108864 1124b485aa2be68ec06067db378caadf+67108864 105a90949ef8a77743739cf6912ddecf+67108864 2d6eb744f7babcbd30e17f709cefc92d+67108864 c6e8581198ad5ba878697e95b4c55dc7+67108864 6633b9c706e53b649cc5d0eb5d7474bf+67108864 a0a8055971c9c2698af9cae2dcfc1de5+67108864 7afa27ead5036742aff637977742bce2+67108864 2a4689ec1a63cf5ccb4ae3754ad143a3+67108864 c50953205baa8180b24c33b3430e1895+67108864 43fa314ee5b342bc7aed72517cdff282+67108864 a3df0148323a0004acfe8387c67cc7d5+67108864 1c29892044dc4854651c73296d0e0c41+67108864 9d75a69bb257970ffaeb4ffbdb890728+67108864 b1b22a2463ee4106c480505247037743+67108864 7072621df84141b650e222049e3c3d4c+67108864 638e4160832bf12ac9eed8dc2d1aa57d+67108864 b88afc09886457810ba20ed4bc9725a3+67108864 45bce56d25f3612a8fa4d15adad97539+67108864 5421b2945c952a99b69ef6a6a70cf453+67108864 1f6f2dfb6224182059688f5d46e61ff9+67108864 359ac6d0f79d5e29b465fa12547d45f9+67108864 7b7c538226d537326ae3c8a334494425+67108864 86cffb84184aab85f109d155f45a6289+67108864 1913f3176b9441fa5adb2b5feb112a36+67108864 6020e46282904da6b914d6a37756c9ec+67108864 e7fb9206f7d4eafb469b3619a37f64cf+67108864 5c4eb1a7bc507b0c4fd9147bd6fe5b3d+67108864 39511f0f76cd426f1a8b55563a393090+67108864 228589eaa12f741ae3d63964e552ceb9+67108864 e71fd43848d544eea1440595225383bc+67108864 5d9d7af54c2b43e70331e74346de7be6+67108864 11f889041c34528b4f32b717fcef37f4+67108864 41f9c5578d0b11e7d4e96d951407a495+67108864 3164503cb8eed5eda0f929e89b4ba41f+67108864 e8232889c0ebeb8b95efda8a0be0fed5+67108864 f831bb088817b20fc665cb152ffcb7f7+67108864 c317567be4a749a38cee367bcb4650be+67108864 535ff483eea4aae8db6693c6fcbf4c96+67108864 4c3667b429aeab2cbd1c3764808beadc+67108864 f2d7e4f9220a5b144ab935441ee5a4c4+67108864 46376eaaac4261635cc8d4c421ad989c+67108864 8ad8c6ba6994354296efe3b271c83d68+67108864 f3ccadadfcc75bb032d33af3f218136d+67108864 fb7f377672c6ab9a6d7ee640de0708da+67108864 455e5b21427a1e0c1166a77888e3db84+67108864 56c1bd2a46e9c510c0f9df14a5e35005+67108864 729f094efa399f560639698946c924a5+67108864 f3597659df39a5489bf09e9fee9e5f18+67108864 6eb0404e6575741c52f564ed127a9e58+67108864 15b2142e9955e2a7f2597942e891d800+67108864 24a2e5ad926d978f93f1a055bac2ae82+67108864 6da3be627ed06f3302422a3571912798+67108864 faf50ff859c11e516516a396fa90e370+67108864 f30c246988782a97f8fb045f8562cd66+67108864 3dcfac0f2b47e7b54dd42b7593c11b3c+67108864 8c0cca153ed7b0298c272a38659dcd35+67108864 24d100860a2522d686b11e841e6bb3fc+67108864 a0fa91ad649ba8bfbd39edc032164aa4+67108864 4873a584c9bb9b86df2dda047977223e+67108864 71af7580293bbe70550235da201bc5b7+67108864 95246229f010e909205f973c336ef913+67108864 760718000bfa43f87faced458fef8a81+67108864 b2bad6b04dc56bf6d486a389c3a89f69+67108864 f131bbb77c16d5e8bb37241b38aed448+67108864 e93e069d21f6b5c65ad59f54e10954fa+67108864 8edd8526ceeae5a2f73898ea94a2637d+67108864 89998b502934966809d3c3c5e9fcae09+67108864 2fce7d4eb38c05370d1695cb5fe2b283+67108864 19d601c0232c10ba5dbfd1ad65e26dd2+67108864 9c7b6580cb0a2b603d05117f559453c8+67108864 72726a8bee94ed66806cce11af1e2371+67108864 2b5db38f1eab185a91777d57ee86d2c6+67108864 ec6db4e314d2fcd9740bb0fb6a05204b+67108864 94edf1746c8aa716099d6cb69c065a40+67108864 4836f61b8d29f86f2cc8b7f449587ae9+67108864 436d54ff31569849e9b3b861a8d7a2c2+67108864 7ab27b51efccbb501ec8b0524d862005+67108864 0c23eb2a65db550177f8a03356b7e726+67108864 fae904ea4bded822adba6a676481440a+67108864 07f9c8f81344f0cd051bde9c3bb20398+67108864 57f5fabdd15e3deefe7e8670c015ab49+67108864 27812f4f1bf76cac4467e94bc517e2b0+67108864 eeb1e8dc2b9efb3ebab313e848ba415d+67108864 f2827412ad0b5ca07628a44c28fce28d+67108864 27127550ebc9c8be6cd30f08c20d3e77+67108864 827a8e19e46b575e698a03f2f159929a+67108864 d6da49faa10324de4f345e0ba7eb963f+67108864 48ee9edb8e9be2824651378cd71bcf60+67108864 d42fd051025373f0b395f9ee413e133b+67108864 8f72b8ed9aacb48b4d008fba4da38055+67108864 4c382522d1fb5e5e70a7641b11cbbc5e+67108864 dc40ff7e5dc3419dbf06ce5d89db77b6+67108864 b178799b404d1a0f2405c45f853bb87e+67108864 aad7711a0c588e389e1d898837c77975+67108864 dc92a289df1ee20c2a50c5cb34b9ee9f+67108864 c0b506a6540b190c8de760845d89a20d+67108864 95d57277bef8851c91053f8930557783+67108864 f1fbd42cb8a45b97fd3e1befbebbd141+67108864 343af058b01d8c9cb6deab3c7041be9e+67108864 ff56bd3a487164a446760ba4691ec93e+67108864 4ff72bbf0fbd4429341cd6c0633adfac+67108864 9b08953e5766d8cc5665c73f3066b9d4+67108864 c26eb39cffb14c5b29464552158f6f9e+67108864 2597fd02083990464f66d77950854dad+67108864 da1df4d16834f62ee21f8f414b63264a+67108864 9ee6fbfd042c057d1d700b51a3d6620b+67108864 1728c736a8598ede8333ef2359d3c9bd+67108864 a1a10371aa32905f2ec8ddbcba5fb065+67108864 ba2ad4e1729d11bdce0129d87f2a999b+67108864 c41113602add81f97ffdc93b5c3ee306+67108864 ee449a445b76a3f2590fe5ccc313cd55+67108864 7190f9a48c2b16ad0b93eccd0ea07e5f+67108864 d825ce686c183120f818411877161caf+67108864 8696a1e8f5542fef24a8e7a1b4eb667d+67108864 eac9cc77117da7eca4e691815d440570+67108864 4eb5252425e2342053e27721ef419033+67108864 bb6ecfd5c6c3404d9a4c1f44e75397cd+67108864 7dd9c32e758ac7a2a6adbcce675ea9b8+67108864 711f558f7ccf54b12910e7f4646d3ed4+67108864 7867a303bc4095bcde227661553682af+67108864 9370f355a3193b46bdecdafa6cbb9c8a+67108864 016b4584b44296de38f1d102937c59d5+67108864 07261c25f8ca01b3a384f590673a3380+67108864 e6994e3638f2e12ed52a104c14d88b1d+67108864 16e57ee081f408f8dc74e909efb63879+67108864 235866ac3b7d83e27bf8f4a6bdb56c02+67108864 25efb32940871da66f4ea5465334825d+67108864 f79d26bb161e83bd2a26b8c35b1d42b4+67108864 2783ea5a5721654486aaa5efbc83fb74+67108864 461881362fa60250adae60d3c49dc131+67108864 f4b68164f31c286249f1e5d2d7237c35+67108864 659eac0ad8a3e7e597458709bb795680+67108864 034ebd61fd1a2c7b8a283fffbdbb6ca4+67108864 50d93209850a3d6bbfe509ef813ecafe+67108864 42a97fd6584b9ca3cbc7dcb74cd405a3+67108864 3d0aa7902822df7f39feae96b5344f1a+67108864 f5dec5ba880a6d6392e4cb7605ffac0d+67108864 567809b2e7769647651efca42fef2e8d+67108864 6b12c06ff734e6e58b9eccf64bf1f911+67108864 7bc4c900e5d59443b0096866a80a700e+67108864 b8add59db3b248dafa75beb2700136cc+67108864 6ce79cc7c998349ff12c651f464b4d42+67108864 6cbe5e5e6b19de4a0b715e0d845d5719+67108864 e59a47b51498f2ce7f276a9a0d63fa66+67108864 4f5ba032de58d21a0cbd20ff0c5ca2cb+67108864 9d5fdbdf3cf4cc4fe2c32e5678417bcb+67108864 606d871a7812d8d61717bb0d6de1d3bb+67108864 a7f59b847684061ae94b5e0d9bf70cb0+67108864 7704eacf857396f10a46a5141067f26a+67108864 bee69de8466c8b7ed1d8729658a40027+67108864 3c3c55ff9c9cd05f1fa4104140fcc901+67108864 4456b7aef7afd6689d3570b66ef0182e+67108864 50ba9fd74ef8d07e23dd96b20e1c1067+67108864 ff2c79e2bcae5c9204cd64cdf372d3b1+67108864 2ce25d963ba37dd5b24c2df65dd1295f+67108864 20b81f528f70c9e5f920e7534afc2bb2+67108864 a0b0e42685869944d3768dc337580428+67108864 fb0d0bc80635bda7b81030998c5bdec3+67108864 bfe86f22ef97ee20388699c8d4e824fc+67108864 5041aa0dc3a9b4b86f5aca145314477d+67108864 68e608344390a442be79c53395223e36+67108864 86bce8f646770b66dff7cf43751b77fc+67108864 de0b67d42b4b6a7825a3bef956737e5f+67108864 4b88b81edeb09740420ec66d79249c6a+67108864 755132bfea966ea29dec0b5703174ff2+67108864 c80f48e88aae10238b1c28647c7ed739+67108864 e001ea8aa95caab94f82df29fdf27e4f+67108864 8fccfa89d4af86d60e9f97af9936ecf2+67108864 bb01dafd921f67f6cd859d329eaa5463+67108864 931a95676fd8ba998400abeb98ad293a+67108864 3e0b9148a86dc17ee1ed7b5d44f394bd+67108864 63b29d2a9bee8cf79d63ce337feee7a9+67108864 bdc851ade507ac37df5fc3d3d869a996+67108864 fe054d07167887c82d439c841ae352c1+67108864 0107f8ff057ec92321c25060cd4d865d+67108864 ea6bb18766a302faa044cc2357ad1f3e+67108864 487fcc9b756d31eeb5ca6c2006bebf91+67108864 233f7ea448da931b0385a63aa0b3c1a3+67108864 fe1d37c64f7e48c69e4565428bc78857+67108864 4990ba572fc9afa7f325052b51d0a47a+67108864 423266abfb6cd3571e8e66c2b30d972c+67108864 8a83c116090deefd716b983e63ecd5af+67108864 8e9b6bb307f89902d414223e9cbd7d5d+67108864 e18c8fd868d41face78005a8b255bd3b+67108864 4a60796fad1c71dc21c23a74ee658421+67108864 2d27fff66b21eb557946c09e9b930deb+67108864 524dd7eb012fd8b3b3f64b31f43115ce+67108864 512765c1f1685768a0d8bb47ae5225fe+67108864 1e11f5b0124b427d1bf462e7632296b4+67108864 41faf74a022e8b3a583cee0410be3c32+67108864 0b4a883486509fa81c8c19bcc51532e2+67108864 f708c01017b33cdc5f4d373da7fc90ff+67108864 602de9b6714a2d9b55b81670256bdfb3+67108864 87346a79929afc2fb5029dac7a2279ae+67108864 e1251b9b403eba79863fc82fdfbd943b+67108864 189117239220e41f0d02e8d7620a04a6+67108864 736705421734ba02be110da2808cfa81+67108864 295f181993c67d894145c2c071b4301a+67108864 ff84adf713707bb22c1eee95e4e27dce+67108864 d3c2aa31c1d8d790741e828df484fe17+67108864 7aa19cd3f187fa5b7dd623c2c09e4236+67108864 779595e6c757024f5fb2fef69d9b048b+67108864 eb3fc60541f45f8ed8983af5c2bf3b95+67108864 b8fe527fc513cd3f60237c6aa87b0573+67108864 1c9d80f46710684355577738e5a07a61+67108864 98a2d14f30435bd37e68e18fd13879a5+67108864 dc464b8e7cc454900bd1af2f73167ed1+67108864 3fc4b913ade156db548d8db91e03389c+67108864 677c1763fe8b931e01b5d76c66119051+67108864 33bb1e8bc7548970a4b3f7055fed6cec+67108864 43857b257e194d9a0ba6b68e16d81dbc+67108864 4662626ec479033f2c67f00c10f4f3cc+67108864 82b053bffc39afd1835da5593026c5ed+67108864 f090aa4a714e943af26fbcabfc5a8be8+67108864 6a655ff89d9a9e1f034837de7291fc2c+67108864 f74ae1321c93c6d3d0ac64cee951518f+67108864 d4c8f4b673100f15076763b856e6261e+67108864 fc516957b488877fd52fb5723aaf1d9f+67108864 6729a81638a2518956a3eb5a6a18fd91+67108864 f13eb8762ad4935d6cdf6c872a5afc0e+67108864 8f0d2b097917b4f052e5f9533a55fe58+67108864 fdc3ecfd8eb73392e1fe1aa8edc916bd+67108864 337387bd132aaa183ed5843f272a49f4+67108864 6a9db804b6f2087c186fbc8ff77c79f0+67108864 5fe678e3a9e49f4d8debcc56a4d04dc8+67108864 89f2ad15d704ded3af481430d26c00f3+67108864 f803716b4143c361087ab2a034e9abe6+67108864 4edf94e4dd6c5c6bfb72da385c2b981e+67108864 c5f51291f77b31cf93fff9e294ac9659+67108864 d938cf9124ca3f39eff966a98668b3cb+67108864 aff6467b5d36539c8c6444107f08e959+67108864 da8a0ad7cd02d4f78df8ef8c8b61f8f7+67108864 9d33faec81bf171b5dae27fb380dfd8f+67108864 e4093751a7136d8d17ccb5034aced419+67108864 94fc879d3bcb98f726b2cfe018f3be3b+67108864 474ab1d4763a090f4a361d660544d8a6+67108864 8af4c1580f8c0bed92d3b860238a37f9+67108864 63b45b164b25c4457f672ea2095bb234+67108864 3ebf649e0b24f32487778caa41f24442+67108864 9719bd418db412587e6d66c75c3a6205+67108864 346d35f73e586a9e5e2573b336f194a4+67108864 efc845c053c037d89d9bcc059a025db6+67108864 186c5aa2db96fc6b9393d7d675a11e7f+67108864 631b5eced91f74bbfc206cf5856e812e+67108864 916386facb14f03f9f3251ca7b07476c+67108864 12844c228114136ab850dd20e11ace4e+67108864 ed3255e55123b2ef21f4a7d2f2bdd644+67108864 0e31381b92d55f99ced79c14281ab52a+67108864 fed7f83c927dc4e5b2ba63e21b1736b5+67108864 22ab11bc0606f0d041e34f4c1139a209+67108864 20bbbc8375b015e4a89df74a1566ef47+67108864 7d7752212e5226ae570e03c9b2700fdc+67108864 cd22f0b787d25927062cbee33bbe3371+67108864 40baadfd1426510073508dc0628f0794+67108864 181e4bf30a8f77da23c8959e1be70fe8+67108864 622a02d725eb682b81dda117da700ed1+67108864 8d9f0c71149e9c1199a951d452ea0d02+67108864 1814f3c34ea4c42495a79205816f169d+67108864 4f4298fbaf1dcc46ed7c3f410a065d45+67108864 960e592059027311a32d1c1e97bd7260+67108864 de9ffdb227add673b59b10bc19796360+67108864 2736cf83c1bdfbbfa742472b5af5e651+67108864 1269c0373263126cf938de481aa9b897+67108864 fe3fbc87e7a23f0160a69c0ed626082d+67108864 e276c8357f973f36b45f51f0c71d5aed+67108864 1fc4414ab833f2b0cb43c5c067aeb39a+67108864 652af913139e3e10555bdfc56ae16b9a+67108864 c3119097a1400ec0897abf917ef8a108+67108864 69a8272c1d1f14078e1d550b611bc985+67108864 23675ce9f0120b2d89289e253e92ba8f+67108864 f959c9a7b0272c49ed1f084208694276+67108864 b7e9bebb22287722cd96cadcc4eea2fe+67108864 15acb83d077045ffc1b8458a48a6398c+67108864 c4f0640ca93090b7a06df55c66110547+67108864 275e73ff5e9266d5d07fd4c9f0b6256c+67108864 2532625da1f8a0a444ef53dd0d196db2+67108864 7f05e653e318b82ff410b83e6bb0b556+67108864 d6e133ca3f00f63184ce80a53fec3107+67108864 b8daa3bb4165f81d53aae24183eb6608+67108864 0a8d711d220316af33b0ec0c1818f403+67108864 f9c34cdb6b97d2c5dd58430d324e7f05+67108864 ddcc459d76f637cec850257ab5fd711f+67108864 a532803027b7545065855b827ca760ba+67108864 c5109e5fbf248ae1145b45ff2e137a86+67108864 088bba73713732e628ed664b45a80878+67108864 720af32dd389f70a969d6e75c8442d05+67108864 fdb232e6af0bd7d28e2b5b07c62e1aa5+67108864 7a50af59afbed238c6bb61dc3ac22675+67108864 e3831c594aff61c21fbdca24228c70f5+67108864 b64ee908abe5dc9b59db0e928833f81e+67108864 7cd627c229001bfe2a2771412125ad9c+67108864 2eb73d895bbaa6e6f704712c5dfca62f+67108864 5fbe4f7adcb52ef8afe2770c3f444d57+67108864 cb00a9a59fe9adae4c19c55d0f67cb23+67108864 21309b59a02bf3956b350c33f5ce016c+67108864 fdc90f4032fbf373b66fd1a1d16cda0a+67108864 1fb14dbb013e05b7df0ec7275dac673e+67108864 a287d100a50408ba94ad3c1956445e44+67108864 bbf5b0e3c7d1f855d43c1b3ae5713cc7+67108864 66364beb6181a393943f61726e06a63f+67108864 71a199035090172c2c48dfca851fb77b+67108864 c85f3d46426e932c3ae5f20929f1d412+67108864 413053233729159c37ca20b2d325729e+67108864 16ea95b5143fac3d4894db0f6761bc07+67108864 65e3ea0a4c2192ba1b3dde341beb8b8e+67108864 9be1589739885d435b8208ec3a5608e1+67108864 19a2d0b16817c6e11cbfa36f0fa17cc5+67108864 e5b38b84741d4def69309c9c66532c92+67108864 1cef5fb113965f16ff73f0d74ac5f6f2+67108864 57a91e408fc820b8cfec6d2733f5b573+67108864 bbadd120a6bce9d182009445d90bf403+67108864 d498136e6a6b6821df6241c945245a72+67108864 a3a7f8dbc36741da81350f2fdc6f33c6+67108864 0459e23c9c447e5d6cf4a733a04de961+67108864 2dda2d49a05aa1fbc58c3ab040281bfa+67108864 30b7695c33688f894a83f3cce0b67da7+67108864 8689b86276aa67441da2602d13484a16+67108864 a77f3e13c2744bd912ecbc94ad5aade4+67108864 10da7c2337eb393cfd600368862ba662+67108864 ce7e2fde61772e393ed0d7c31e855be3+67108864 096b521bab8c247f74789d070e771807+67108864 1019c2b793990d6aaf7d81bbb2176ee9+67108864 54ed19ddec9c9c553f59ebab3a7d5dbf+67108864 094fdf5c30c3d805c38e769d5ee928cb+67108864 1d7ebd59cc5d0cc9400d6f2fb9e29594+67108864 d5cab35aba572e070cd870a19c0beec1+67108864 e418e25ea0e2d1dda947ff716d916529+67108864 a568b248c881c740a444dabe4a8cba1b+67108864 25688b79ead2563dbaa5d21e512c1aa7+67108864 4fa44475f1dfb2fab6dfa8347f43abd5+67108864 c37860c180fafb91feb9187acd355c2d+67108864 585dfedd5202b1084484ee85dbaa5ed1+67108864 ac00a4785238f69fb5005977d36d02a0+67108864 a97c8edd59cea65c679a80966ca03482+67108864 2b1207bbb44508f0738dfc60881c2e99+67108864 f0f698122fcff73e939f6b6bf22f9cf9+67108864 792a5130188e5504e23bb19dcd748fb5+67108864 8a50dd1f26086f2eba93a7b6e7035b11+67108864 7fba73104b7d27b8ca512746161aa95c+67108864 52f549dd074baf3fef610d2a81e8d1e2+67108864 e8ba8562c268fcd5b2edf5bdad76d319+67108864 6fae9fa89a358ce5c80c339e6b576d78+67108864 48f1f2fc7d52618658bb8f652fddf9a9+67108864 5c6a4bdc74ef2581d3fbf391a8d587ae+67108864 411afdc89fc1435d7ecd1569e474d6a8+67108864 a546bb78c4913f26cc4b1c8ab91ad277+67108864 fa45ef65dc70d14160f1efaa0a8e2687+67108864 481dfbeb675e469846f0f69cb7d36d73+67108864 1ceb17f51950b94673950f088842f33d+67108864 d1e4769eca2b7eb98a84a87cf6803cf2+67108864 f13d8deca0cbccb51fcd1a4ab40ab0da+67108864 20873c5e390a5552511f8ed98e2d49de+67108864 767eadf955a415988e7e1f83c74f7a9b+67108864 85562d1acff2915f0ff6d45ad87fdef1+67108864 12547b8f30465624e8883e25e870892a+67108864 f2dd149abc688678a5132be30af317b6+67108864 a5190593f6608e3747e929105fd601c8+67108864 101835f2b47c37fa0740525295019838+67108864 107c3a97bcc74c54b565f025068ee9c9+67108864 bd04bae6fa6e5a8fbfa9dccd2e467952+67108864 cf907435f4f5c0446fe211049e211984+67108864 16d183f23f46c6cb4e492c2a496901d1+67108864 c831967b607bcc15f73a8334ea4f1501+67108864 edee1bbe2a9f0126e45526d9bb421861+67108864 011ffbc947b2aeca6a75939ef16e4b1b+67108864 68d847961bed44f8d89bb26f1bd0af35+67108864 b6d56c74fe7d25dcf0c28df056090306+67108864 24da1fd3c8d4b91e3ae907a25d156a54+67108864 4a61fdabde6a07f7e5b2a4cdcc26d013+67108864 b861eeaa168860e759d5c8f24712f7ee+67108864 a266e80c9a70f7d4937384aaeb5ee360+67108864 f8a5c5d06023f4bf75eeaa1eb5bb3374+67108864 f228202d9f58ccdc24c1dca57fa5d77d+67108864 4e61e6dba9ff5a6a4051d48b907be4ba+67108864 2be5bf4cee45ab1c4400a680df60f81b+67108864 dd66f99ff5054cbc8cf54d3c8fefc5e6+67108864 dff0cfbd2ff138d4f9fea60a96fb2df6+67108864 500ca6e6c15d85070900b6fdf3979b76+67108864 6f36698535293c13016b1abd0ff46139+67108864 e9dcd509836c212d2f6d8274891c3c41+67108864 70d0934888c6c587addcc3927bad38a2+67108864 0c50fe89bb20e3e9efffd1a0cab6d084+67108864 d4e349a2a674497462202d061932508a+67108864 6bc71e5e68fc5f83370173073ab4ae1e+67108864 a00a2da32f91785cf75e6482e59892e7+67108864 397510c84c36e9c234e4c280b06862ab+67108864 e0c00e4cab678098571417c9fda1cf87+67108864 124092481a8bcc63fd71fcc8cfa84795+67108864 0e87753f045eeb278f2b257fa53ab510+67108864 8168fbb0188889addbb3e9b68d3b3593+67108864 f6a005c9723b4ddf4cd5bc1811279fc6+67108864 beca840b56f1ee8eab55366750fde36a+67108864 944d4724fdf35af10976624297ff3343+67108864 d82f71afcdf80d43ac70d43d18d039cd+67108864 7068a4db9fbe9a06e59350f634fd8855+67108864 bda72fa7ed6c0f07f149deae16120901+67108864 b134a0ad3152051241bb63ddb0ffa0ee+67108864 afd1344cd6247583b2e08b42a5b495c0+67108864 6d71dcd93c01a8bdedb1a384cc1b47d9+67108864 525f86b84d3f9602f1308d2c82206019+67108864 3c5edb580b5d56b8fc92b49681e22db6+67108864 8fb6f50d1873a1974b7c5947ad8ffd5b+67108864 fd510dc5ad3116a7b4424a405c9e0333+67108864 a5e463f2679cb6f4bab936e0e372b8ad+67108864 7fa2c65ea82c0bac249684bbe4ee3b33+67108864 d3c23cb68b91c0d1852e133c82881eeb+67108864 0d23ac035da7e96d139f6b6724fe6d94+67108864 e4bdc1e7327eaa9457860179765b1a2a+67108864 3838edbbd807bbe5c75379e6f7ef53c2+67108864 ee09b2bb5466e520d98348f0b4e5a272+67108864 4f5015d61f01512653964ae3d58cf2a3+67108864 c05fd85f363942029b17fabca47b21fc+67108864 93ba1a2da4ef91a9460fd65895b867c4+67108864 d6430bc6ff93632f3c571c57ced084ba+67108864 a59c6864154616b9704d769f4437fe48+67108864 29f7d541b7fd363162189ff4be354ec3+67108864 142b095b33338c13fe6a0cba04ab8982+67108864 9e4fff3f8f5a272c4e55d79e899acfed+67108864 5d782433b0615d0c81f40d45b5ecf4ac+67108864 a1ce4f9cf8db339593655aaf432dc372+67108864 814f4ba561bd3459a7da97a7fca5f3a2+67108864 9a663484b49fd5a5465906760f4677b1+67108864 b4fe2e513982a4efb17bf442ae8abf30+67108864 36ab4fbd80eb1525f5f078c0bad27961+67108864 888389ff68d5b71d3a38d44d2a5f5321+67108864 af94eb7d687c5d973c3ef6826986a377+67108864 7ff861173bc6125ac2729f82fb4f76cf+67108864 f04b119f090e7dae55e982c94dfa5197+67108864 67b095174cd6d4d5a97c4c10926488c6+67108864 54963c6c726ae113457ac8628493bb36+67108864 94b063d6778371dbdf74c81ec6e00705+67108864 efd856b38a4127578ef3b30cf2f8111b+67108864 e9a78aaf8290e786d31b52e3d280f25b+67108864 acf39db50c314e81d77063e1363b0d44+67108864 f5ed13b763e78ee4d4ef45c754ddf2c6+67108864 f0a522930f01ccd16a21c4c836d81486+67108864 638503d44d5f70646c90cd9c1e5ddad4+67108864 32375ece74a773563332b435121f1f69+67108864 206a739575b9f0864abd4f01c57e7a6c+67108864 4b8ce29ba1e51c563b67abf72ba2559b+67108864 5ae2edd2acd049b2bb6d2bf398e0ea9b+67108864 f142e90252904e409bf460290d3ef7de+67108864 c168023fec9fc0d340f647a31b49d68a+67108864 d46018096ee511db7264de70de2de130+67108864 a4ec756f5ec69838074d7c51e3ce9366+67108864 657b7662f9fa7ce790eb6b6788bf4fdb+67108864 88b268dbcd8cfc68e322a6593a3f85a9+67108864 f6abb1603254a9c704c214208e45ecd8+67108864 53ba009b6a5ccd6e5001c099c97fc3e2+67108864 5304881bcd7ae73781a53a4fdb8f013c+67108864 fcf742e76f89d28953997eb339c4d2ae+67108864 e732f6fc4718c7ab9d6832641ed9850c+67108864 de4c892de00874bc9cd07918393ef6e2+67108864 9abadd8f9c1dcd464f17c45d65e6c4ab+67108864 7f1971a1fd92794421c07d50771b404a+67108864 349595e945b11955f64fd8899611d58e+67108864 d58073ab66e3f676a560748bf9898ba5+67108864 8f8b26f5d44408c19d0a3cab03ef67cc+67108864 38bc3e0c4925c47fef039917090f69a0+67108864 0ca5058c7c829ff871ca3868332b662c+67108864 5ae9155c4b1a25c37ea00d671324c6c4+67108864 7028834108fafc13240ffe2021cf7027+67108864 f2e194ea64bc8a0dbd5a86f085f1e934+67108864 298bffc3352973f28307d993a8973c1d+67108864 5e43691e50baacabf4a3c295649f2b2e+67108864 727520dafcd59f806f52ffaeaff02bc1+67108864 706789ae77dc03640546c8e83e129528+67108864 b5eeaf5cd6a94607ff7247eac7eb6455+67108864 7f8b151b8ffe117beea3b1934f9f419b+67108864 bab5b9d0cfb54f33e79227643fbf0b65+67108864 a59d082d7207bef94b4991215b134a10+67108864 785056e6a4426076b734e608cae86737+67108864 47117a5988bb128528b528e4d541eb1a+67108864 1b7085a666ecc988bd37eccf3e93c4ed+67108864 bfcbe4d5f5d7e4678f198d60996f13b9+67108864 3f4f1303a528c16d0f930da14a16b230+67108864 9e9140f33f9ee9b5fc3907b58eb051c8+67108864 4fcb1c3af4d85fcf0ac1095e14a42bd4+67108864 a6be2e79bff044c13ce21c2aa9c95e08+67108864 9e03214d420e916b21e2324a432dbbe6+67108864 1f0b58dc5d2b1497cd83fb9b1a2176f1+67108864 44c835860f76c96f9e0837e5bae8dd54+67108864 f9d33a3817dc95252c3385a9b620de96+67108864 c9fb1942eb1ca6f2df777a537bd717f4+67108864 e56f8e279cf628f2575b9f0b8df32df6+67108864 febf0b9a281b32a2a20b9611264f93e9+67108864 e6d5ad53ed0acd1ff102f22fe495a7ac+67108864 75435e66e302aba8c7da88b547a2517d+67108864 3bae3dca82efdf59e588345138b08498+67108864 e2e2c0f5e15558e7dce55f20fb52e254+67108864 f4c5394e402d38d8d4c5725ba7eef920+67108864 2580fe3b582a0b4c17a4ff4bb9ebd673+67108864 ed5992cc4a9cf4daabd5cfcbfeb4b896+67108864 488d4c76a978e3265bdddfcde78fcc82+67108864 04fe93c825d53638d60d38953b372557+67108864 ba6e44ea4f79f61f907e36253c0bf958+67108864 41213b819b4bfff9f17581950193ebe0+67108864 269b6e49b5593ef54b3d49032e4edfd4+67108864 e267c0d9870a736ac13c5a2a01cb23fe+67108864 804a37500b781cfa66441090c323c677+67108864 33195f727565738c7f2e4a70c6920d7c+67108864 e7c94488ea6b2408b8a81d404bab5408+67108864 48b336d69d1188f5ef6523151afee6f3+67108864 6cad6bf178aeb3f6279911a1153b0bce+67108864 9a674b1c710c52a4994a0670610cbf4c+67108864 f6a38265f9473e02057ac4be6bee7170+67108864 833b1f028e71ab42dbecc4d183da4b07+67108864 6839ceaa235277adae36d5587640df0f+67108864 c4835d24adb63f08ca4311469d82587f+67108864 040e2f87dedfc549528ce3de53688dda+67108864 2984c84d1d32b7f52ae49bd838b8c016+67108864 33fce73dee1e203326ffff8fece4816a+67108864 f59f8f10cfa7548df73184ff86acd661+67108864 fa82b6721c15118eee3ee95b41ebc19f+67108864 7b6db4c134dcc09f5a851d81d6fe2fcd+67108864 178ba3074b954c0aca5910dfa78e341c+67108864 f44afa76eca4fac8b8bfaca8b14401fb+67108864 90d717e686fc0cfecc8b92bff5e3dc3a+67108864 4c497db5abb4e6c08762c66b51be2864+67108864 3f5f220f3749bec9664f2a5c22714f0c+67108864 36165d8271554ff80e9ad204d6f41c86+67108864 0cde3c478fe6d14dfc7238f017af453d+67108864 d22407672d47ebf442725a0365a4f9f1+67108864 9ee04e14feb275234673409d2efa97ee+67108864 1431699e15fa4cef7dd54e6ed69d9cd5+67108864 a91b69896d9cfe2be169f4dd1e585b24+67108864 8c35d02a5b3961f7717828239b49d21d+67108864 c4d11536015b05bf84926e0cef77dcbe+67108864 b14f3f000f174c9b63de76d6b7f7e2c3+67108864 2732612254c0b8fec0c533017bf946bb+67108864 87b760ef4b34ac9d5ecf0c32a2d07aea+67108864 7566c2a133f0f8c5dd31d49a6ba8d5f5+67108864 28de06e61335558872188129073eca90+67108864 384ec6e77e4581804ef5ebed60c49806+67108864 460f0d01ee80f6936a36202c52d1367a+67108864 f475c729d470dba96ad5a355223fa3f8+67108864 75951867b767478687a0ed04031005dd+67108864 988e9032fc26f255d5e65f896f97c386+67108864 c72d6fe3a565167215c4b8725c71d20c+67108864 03f2a74dc6d301e7eb1440e7af0e8aab+67108864 0cad7f4c65c691932c7490ec31f32a2c+67108864 b86eaab28570a18618fcc5551589535a+67108864 5c2e755aa09e7eb494fe2f380cfc18bb+67108864 7b0314633fd0276ba4a7cfb18faf2a63+67108864 805596626907ffea2d90c29d1a8bd183+67108864 bbe42cc13151e29901f2ff81cd16ac10+67108864 96f514038c76fbc50b3325ce0a50e260+67108864 73ec6c69f79cb39a8c6813d6f27082db+67108864 9ab13bcf77c6874f6843b2ce9bb18cb0+67108864 4570482366c5811d199f47a2e92ec727+67108864 dc1c567a96316d293feb2fae90a7b8ee+67108864 7b2bfabe105b5c19e70bc18cf7c62320+67108864 513593f9a7b1b1465c8e20a4daaa8e2f+67108864 6442521c08fd3b7b6772e71404c09d9d+67108864 601eec17c302eef232cdc707534a6c61+67108864 2a7822d5f641502312296d507301ebca+67108864 0805d7819623d6af391ea9e072ce9bc4+67108864 152c643d2ccad0674c38a23368a4a1f9+67108864 b1a4f45eba2a13984d9b095ab5a16e73+67108864 9cf64a5302f8b9c21c01dc5f068bdcd0+67108864 0e82e1e3f2f1390be5ca4560b18bd96e+67108864 6213cb25efa2a9bb524be9a4f273355b+67108864 a04a71578d23bf26517ae21d101f9ce7+67108864 c875cbbd27c30d3e2be026fd11b28011+67108864 1375d33fc6949a826a321ba2c2d21d05+67108864 aec41b74054aac99980f2047d462757d+67108864 e672ff1a260ac97238a25a95747a346e+67108864 1cf87c401a5084c740466f8a724132b0+67108864 771c370c42eb026842a56f533af4b8e5+67108864 5139242530c8feb094f7effef56e7305+67108864 1d4bd13959bb21b28b48363fd367fbbf+67108864 e920e35f2fb67d9b34b012b95d8e2c89+67108864 8facf133a1ded0c098fa41c953f63b89+67108864 6c32393c950f9062a02ca4c13bd858ae+67108864 6b2e28a4fd767ef70b56589f3d2f29f5+67108864 f2b0bc69c431be32a87fc6e1c097e456+67108864 adf88e147e6089df2e1f44472d4523fe+67108864 74aded23685105b0a360e006946a403a+67108864 62b19d7d4c3342d653f780bdeba26e90+67108864 0ec5b45cd3a722baf10c683c3be0f1e9+67108864 9da6829a21b6b75cef6b5c6f17f5e07b+67108864 826ffa05850817239f4fe38d60c01ea5+67108864 adb1cfacdf09d7041a55433f32d32c4f+67108864 29e8f3c27f97a7ee5f00a15196000f29+67108864 7a5753093a96c622e7bc5bb762bf65c4+67108864 1307b7902955bc0588261c48d684dafe+67108864 88d27c9d828382625103e1b1b75ef6e5+67108864 7345011181afb92c1990ed7656a0eb9e+67108864 22ab24fe279e19a3773a70f42a4b37d2+67108864 27446b06a710072e7439388ef138749d+67108864 27353af04e039140bf5560bfb12b49f0+67108864 2cf867ed85d88a901025580c0d42c163+67108864 a2cc7481a86b8577cd9b1c36d81f9c54+67108864 63fc71a9df52da41f1879db40ae4683c+67108864 cba5ff71d3b6c3499e8c9f9ef99484d7+67108864 621818664122058078b2669d0efb60d9+67108864 544590a66aeff05521be3c0c51009803+67108864 a56212f4d9e0072dd8b2c6eadf7b2d9a+67108864 2b19824bd9c1f9bac0c87163f671c56f+67108864 c039c43ad2190e1be0cf68376bef9e76+67108864 6c616beba20069803ac30b1be56aeae6+67108864 28c4d7d0e0f45cbc9bfac0bf72beca95+67108864 a3b74f9d7d729e0443b2acac559aebe2+67108864 67d7c31bb19b81d190cdf58c92a0762d+67108864 6ba1fa534fb5ebd4382d2f37ab9bc16c+67108864 5c8e4e4404b987be63f37942f38b4d17+67108864 b3fdd9a8d97720199e9fc8bdf5464ce8+67108864 9e41a0725995633c4199fb5c79705146+67108864 d6ed794aaa78d7e338c9b88980798d57+67108864 bec7ca93e898b711f8705ed27815bfed+67108864 73d9523c774903cce44f37b36e350b33+67108864 3bb892044528096fb01358fbcb5689d2+67108864 ba6a6d4afd22b52213df85f79a30d251+67108864 b1b5fddd4d4dfc8de7b6e9edd888f24e+67108864 ad1cfd8c575b1f4df0d9481bb25bfe10+67108864 eb42bc382bad6c31248953175c57fbc3+67108864 f7ff3fbcddfdeff776851bf7f0b91708+67108864 096d0a35baf62c6267a588ec20b0b295+67108864 213f9667d41a694c24edeb6723817425+67108864 a1a3102d7223910a50f343bfa9f567fd+67108864 cb20b98b9cfa88ab1cf0cde7e92a963c+67108864 cfd396b2c4fdb101d220385445bdcc6f+67108864 feec74cb51b17c68e03caeaf520a013d+67108864 3b0eb66cd74ca88137ca5f3ba474c653+67108864 4e564d3d903811897e5970f31e20cff2+67108864 4b811ae252ba5070d60c6dfe5f6374ed+67108864 139ab8e8f46ac3bf236c23de49925413+67108864 e8fe58e8624815a50badebfa4d6bec2a+67108864 3015e8601d6eb6e42e9cdbc64664f32b+67108864 d77de37630cf3560ad50d02352d0c514+67108864 abc01fbe76ef45bdf2835ef06e4541cc+67108864 4dfb8d4243e6049028dd1fe43fd2180d+67108864 3220ab3a86613f63e0e5d4bc5a02afde+67108864 fd77865c46669253284d781a553b8a57+67108864 851ca8a64dca3edf8eb645a0007aa9c1+67108864 16b554ca3194d8b58cd9be8df8b7a321+67108864 9d0b9cfe67898e7c9f58050a5231e4b0+67108864 757a90985b5b92b16c02642f7c36fd74+67108864 3857a258764c94340920f45da85541fd+67108864 40326fff73456df9b895e6ff137fdccc+67108864 63f1d9a514d48754916bd33722911176+67108864 4820a4a74d6d4be30f4cca1eea2fb507+67108864 636d058d10bf9791c3092a8443915c9a+67108864 916f4f6dd7635b871325c288fbde0475+67108864 e7b2a6379ba0d49e7017bae5d4e5469e+67108864 c17c35e312dba5bd13ffd23cc2c7d939+67108864 7d80a58afa6735d113ffee5228a87775+67108864 99ecaa8b85b99e85427fa76657681647+67108864 602ae0f4abd8be52e76e9749ab8d8766+67108864 546e66b992c13e1f42fb03ebbe21d20e+67108864 46be78022b380d3b1ad9a7d6d48d4ffd+67108864 632596cde5b56aa0ad4e5a364669bf30+67108864 df8e7efd77d29fc70afcdfb355bb7002+67108864 ea0b3c954abe83b54c8c2b82b4b29217+67108864 1b7ff88f8491f39efd6bf74caa1b8ca2+67108864 208469377ac08802354de7a56d216c99+67108864 b467a3def115f956b617a4b5c066bd38+67108864 1208f0cf2e1953ff2d218525a0e5267f+67108864 44d7cc36d50d20860983f5a0869248b5+67108864 ac5c5552be8df8b790863e15bb50f3d5+67108864 0eee3089f9e662f5593a5ebc27b00532+67108864 5debd7be66b6480762a64e42f03aba81+67108864 e6a2bf3e08fd8ae2532b5dfc07e61f4c+67108864 64cb0b5d826b614aef5d95f17da8303b+67108864 3df707ea75c92e036c1185f046d5a2a6+67108864 50a7f1474a09c58d74d64605bddea6ae+67108864 874d107937f315b0b8169346b0630074+67108864 8e67dea16e96a62bc9902954d98a0f66+67108864 8d97c8703fac17ff05d2493233a67db9+67108864 0b21830cbcf73b7742837500b4c815c4+67108864 f2d055d36a926d74b87235c598cb4bf8+67108864 8aaf0b8589579bdc8da894fe167df948+67108864 fb64252c12e1fe1b64762c7f2c476dbf+67108864 3e23aaebfe46253e265f1cdbe55e9ca7+67108864 3ec7f774ce71e3d5b05a1b428357879f+67108864 870c74fbba8c1c5ca855374e34227770+67108864 c847245ea26999c205496a2edadd1f0e+67108864 c9c01dbd3df4a60e85d49a09c7a78cd6+67108864 1c5409de5ac0dc6c56cbcadc39de2c88+67108864 73081aa5140b4be500081ff8097478ef+67108864 edd831590a9ea992557dfae2ced26ec3+67108864 45d83cbc0c27d34f54e8ab01dac78acd+67108864 29d0340872b053b07c17781af001d700+67108864 6c707bdab47d7f86926f3ca83d46a5b3+67108864 c83a0779a209c7b6d36008b3b5bcb094+67108864 022481f50f42cf63f4931425ce841e6c+67108864 3727c014ebdfa8fd044c11fa7e5fcdc4+67108864 2cc1df6a91333b4b9b1a1752d298d7f5+67108864 6455c34e8cb08007b5b538aa96335cd4+67108864 c611c113c44dbc38f2b7dc0e2c8e789e+67108864 72d91fa8c3fd9ca40fe9ce4abd6aef3d+67108864 269ba993ba237bf6fb38573d129e29be+67108864 c2647496d2d5e294a4f59d0b321e597c+67108864 8652c87dd888540694383e6c37fe61e6+67108864 ba173508a36e70695d67f82bd002ee75+67108864 6b24483f7cba1ee77e92965c96eee803+67108864 7dcc165fac7975c3e390ab595ea84215+67108864 34de3797f59cb379bd7bc54eb07c2159+67108864 2adc45907715d755fd4f422b841609ef+67108864 7a46f799c381c18cb2ff2812b5b845fa+67108864 1986cd180cd8e948cfff091ccbb656ad+67108864 622cd1910b7f81f0c7170da1a3362bca+67108864 3aa879b74c7dc6bb0858636638d2b5e5+67108864 0dbf410b5be606ff739a480dec8161ae+67108864 f4086a5dac293be83bd889b04b7fd312+67108864 1f63c6f777a762cbb96169b2278ada67+67108864 96b5c1defe59dabbe1a380a8bc6056d6+67108864 84b13f87fa6c3ab2d04eb93f3505c98d+67108864 dfdc2aae1c1f49fbc36b5ab9573cb64e+67108864 318d865d048de4b484291b88bf36af44+67108864 651188cc20eb0df1c8945c533fa2892b+67108864 a8256ef0104dad714359c01c8059499f+67108864 5534cf3170d3cca00e8926e811ed31a9+67108864 35097b20dae2e31750cebc3a4046ff7a+67108864 08146f4dbf346719da9351d806363c3a+67108864 dda60404102211f6f8958f1f044e2a4a+67108864 f0353b389211bb2f3787147643e0293e+67108864 42a778507d32f3d16563d23b17c836e3+67108864 770af5bcc4f42e26bfd94f11b2de1111+67108864 f1d6738a1979688177fb392dbc3ea8c8+67108864 40be76d3cc1e1f0700ae7bee5b084b88+67108864 8560b693afb6ec7ab75032819e1a0737+67108864 9ff467396156130788b0de8b3abbc835+67108864 fb81044f46ee376b4ea7067c0e9474ad+67108864 5aa3aa18a8eb9fb635b218b5ba85713c+67108864 4bf897273b900fc96efaec8b27266257+67108864 601cb544933cb27f9b8322d609c24379+67108864 0f845c76f2cebf936abd4a8acee42076+67108864 771a5507849272544dd28835aee1baa0+67108864 82ad48d9641075f08789f7e9dc4fba26+67108864 30e0fbcc1fd402d6d4ee077dbbed9985+67108864 9c3f4cff8a0292cabccfe492cc978c6f+67108864 6413966c9aa54f96e2426600936d72d3+67108864 cf903ebc0656b705c26d869d455405ee+67108864 0ed47f82adc7a5e5787715e6a43d9fcc+67108864 2a92d60161816494b4ccbe4d727e021f+67108864 6d4c65315c56f53ad4955b2cd402286d+67108864 3c6d8a7ccd14d9aa75d4ad2e74b7bb28+67108864 982dc1ddfea2915a10dde69ec06dfa76+67108864 03b9c546519fb29aaac3a4bee52414b1+67108864 7803ff4e38dfa625745189e39876c155+67108864 844ea317c0f0095e2f51182702c393f5+67108864 2ea4a37141cf743d79fd884b70f4de05+67108864 c7e2a60485c19ffd6ae4cd5b61b1cf48+67108864 791b84800ed2cc0869bf33b7d85cee77+67108864 6f2b1d1f71f26c1f5bb606034db357a7+67108864 ac185d4ac6b4a85d2320662aef29f060+67108864 2c30efc1fb13aa0043decfb8bc1d1463+67108864 b421b08e5745d876953098e2f2baacd4+67108864 307e458977fd32d61445babb4310030b+67108864 3a6d5736dcc1ef9db6c694dd36e6cfa8+67108864 7ee43e9000accce3462428668a696cde+67108864 a15a30673558f6070996b77e5f975285+67108864 d358ddad7d06cf521cf9d6657b51cec2+67108864 eac6f5c1a8e2cb363c89305c7bfdbd6b+67108864 e5a87afb73e69adc46d79f57b192b7da+67108864 bc7cf84f4d954131ac5e2df31b31144a+67108864 5a9e7bcc8bbf16f3d64545f77e446642+67108864 b1bca9126d855cff7680d960f7a67a45+67108864 15eefaf5212822bc3e0e06916681fe4f+67108864 2ec224ffa681bb8ebaf65b984fe5ac2d+67108864 c5a94822c9c89f4f6627d288747f9b66+67108864 52aaeee57527a75a3bbbe911100c0361+67108864 80fa3e2b96c7823d8e9979d67f41e98c+67108864 b361c3601325ae469be51356e5804d72+67108864 78a933d60f6ca62c1df56b9b11a19160+67108864 98c26a9f728291a50055330fa4880f93+67108864 4f5030e434da715a0e5e2b6f85f0fd46+67108864 06f2acf241fc5269f94d85533b7417fa+67108864 8e39b0432f784ab407735d0d9667db20+67108864 3d6d01636a230edc77fe2d2cfa8c822b+67108864 af54747b06874508412e92815d05fd54+67108864 5f9beb261ee7800d96cf0068707b1419+67108864 8f2cfef40aceb50cd10e37d3693d340e+67108864 e8871ce0905050da9fdd140f96219b34+67108864 1c907eabee3b1eb051aa5bd16ea8c40c+67108864 aa50715b07f837cc3f9e0047ea08ba4d+67108864 d4f89f368239b077b28902df2291ec7d+67108864 513c7bc368665fbbe551708bce375f54+67108864 b5ec7a608fdf46f63b2aace86b22ecbe+67108864 37180c46fdbb4da4ebf7fc85604cb6dc+67108864 5594f89127ce8cd5b5b65b15076d0017+67108864 85ea487408db514720f43e8e61013a9e+67108864 a58bf72af3f393a3bca9f78502f39f09+67108864 3d63ead9a1ca876fcf472a36e437d5d3+67108864 e437dcbb269983af98f515fdff1b8c7d+67108864 76e1b803f60d796a15457fd006290246+67108864 eb4473b76001e5c65ac412099965d200+67108864 18e85dad25031b80d870089342d3fb30+67108864 35a7541d53a64feb7eddeeef2535586f+67108864 c2fbf1f0f1fa1359db0899683894f33d+67108864 6794ee809f5e58111792320303f1a8b2+67108864 d848092b6abefd75c4af817805da1cfc+67108864 2f7471f842739e00be1bdee2c3af96de+67108864 33d3bf343d5703851cf571cc0169acf4+67108864 73d934696c7ef7d873c13a61c6e93610+67108864 eec4ab22ea8289a12fede9936d06c47f+67108864 f280a1f0d2afe437f560e2d37b3d6f98+67108864 d636a57ea0e9b1018424a2acaa6d6a6d+67108864 4025879d6bf913a95dd289efbbc1a289+67108864 d7a116cdc7c52a7e05658d3ed9e24124+67108864 e3221ca9702dc74d06f168840663f73f+67108864 4165ee4fd1dbdca4c8879aed8a2b47a8+67108864 81c0cbde0e9cb9b1b141b48ea1a308cd+67108864 bff9166a75515e5f10af5538dc303212+67108864 45b4ca425e17296da803a51e0fed70bf+67108864 60e983493191b0df26b65c8e7446e9f0+67108864 93590b8affca8daf37b17844b5bf9040+67108864 8621fb9f9c1023d9faaa6eb74fc54a4e+67108864 23275e63a4acd8c7af597fcb76df373e+67108864 f6c082b0018e2c7cb81b97dbc76d5276+67108864 c1de2367ebbb8f821924cc9829caf433+67108864 28d7fe0b01343515c98152458e7469ba+67108864 d30ecd76efca9a45fb6541bf982bb043+67108864 c19fc0b7c6131911ec86f395b32942a1+67108864 fee25c10fa170ef91d6716546c325503+67108864 ba852a7069532ec2b50d30a2b8680369+67108864 eb6f12e52f9742085af5b111263a8a31+67108864 a3c5a938fe579707f06bbe71a445f57f+67108864 103fef8dfcc65c29b0c4c348d6b7cd5c+67108864 891dce541cc4c9fc0cff89231b7081ad+67108864 63434747d0b4b7677fc028fc842a01f8+67108864 4debf4aae27725e624d95e8d8f092790+67108864 b8b34bd97e512895a509222db622ed34+67108864 37e2b5b9f31dc0ed14f45af72827b850+67108864 fd1093368c508a9ccb7faa058087080d+67108864 3ec967bb33e9b55fcf32b63986399481+67108864 608325c1df26b43099ff3f539eaa8534+67108864 b3bb50a8d7a2af13df953ccd64770037+67108864 85debc01db61ef231b17d5adac609899+67108864 8e088c719257d21ca6c9c14c437ef72c+67108864 fd1c96ccde6743a911fb60a4433fecb7+67108864 a8f426581b54a0591d2bded643fde0f0+67108864 19ceaaaacbd811496d178ab8be3d721a+67108864 603b2fcacaaa2903771c5c0a7be15c6a+67108864 ee44b1f92ff9204befa52460d6ac6dda+67108864 366c305dd4378452ab3e9c7ed7b75e5e+67108864 ee826c5bf9d9d25c12d74cca0cde7cde+67108864 cf9ac2e024028adaed51a54e6dc70cd6+67108864 eca814155c1024562187292140170356+67108864 ccfcc357515eb46b9fbadbe80e8ff20b+67108864 3699ec34d183a068422d10b40aae0631+67108864 8daacf3c17a7273ce86e75fc555ecf70+67108864 b23cdd04b9fa1e61b972c2caef1577a2+67108864 990eeff6f56cbb12ecd6df721cac3115+67108864 2de9439dbf73f13efcdad3bb1909d5d3+67108864 e2cb816e92cff445a13fa9754c2bd2d4+67108864 892fec0f10624ae2eab5e4a833a53e3c+67108864 838ae0ca9da00f5c2621af49ec562448+67108864 7278188b1c82dd51460dc4b8c4557cd5+67108864 3c9d0798f3827f8363ae677bafbe9a69+67108864 910df3804a999d1414eb4806bd3b91fe+67108864 5ec3deace8be01db561690bf2ad6fcd9+67108864 42245551b2225e0185aee98a3bbc96e5+67108864 024d1b37f61012e8b11bce86417c5b66+67108864 bf9d48c28504eae8ad723c1e723e6c1e+67108864 daae1d560067bf2c461707a8063fd284+67108864 f9663b2d732c9e954a7cb728978f15fa+67108864 fa95a4fdcbd2128496b72c4ad304065a+67108864 bede18218d8c3c019b3bd2c873b94543+67108864 5ff434025926d30ae45c2114dc816257+67108864 b2cde4f1fffd89abdeb9f585108ed9e7+67108864 d54e21fbd16a3e8c38055a1c10ff6c1a+67108864 d49e4c6e77ee68fe23d34a679c792535+67108864 c30788084c79acf5bdfbf2a44a6486c7+67108864 142696943bd0289004de141776a4ebf3+67108864 23253a95877919a9e15b9496d171a990+67108864 a9fc4555af8829d2720a2b308772ff19+67108864 4d97246ede727b2dfdb72568397a127b+67108864 55b1cc3eb96ef940a8872d2a828abf64+67108864 70fa925c5553dd08873444c7d0ef1b8e+67108864 76c3db8722b92364f03f87eeed8fe53d+67108864 43ee24bc7c125362797acd869881fdb2+67108864 6c28ceb03969b041c1c189068f12bab4+67108864 7078ef25b761e30d51aee9116c5c59fd+67108864 88cf2cb45ee2711c1477e290a6cda42b+67108864 6cbf85a79fdeb004196684e63e45de3f+67108864 0dee67d87e5628caad9c3cd19e8bb38c+67108864 5313f75103edb9ab1eef9d0d9e987609+67108864 3d8454608337215314df099fcde636d9+67108864 2864e53673cadb6e1ed4fed72ef6360d+67108864 ef77259b68a64e9db567fcedf9709612+67108864 8bd0df0b2999c64d2866152d868dd4a6+67108864 3853cdaf6af0a85b0cfec54cd0d2e7bf+67108864 26d95f4758780a9eadfb0fa4b9fa9e85+67108864 bfaba46d337bfc45ae3dff8a5eed0d2d+67108864 52098bd03d910f2947747f744a7e7cf6+67108864 4cbb3854d9975b3e3d3105257aba7ef3+67108864 e06499f3a15a3dd2699f6942c2c0620d+67108864 efc4c80437412a4d5b0baf65a3b5550a+67108864 c574766c117b409c08bd12d82229f8c1+67108864 52af621352517bc7b5f7a37bba1e9a7f+67108864 f2258eae9b00f0837441c0e9ca024308+67108864 e57077ada195464b45117e44ce43292f+67108864 50d87f4c461824c081eeea80f9405783+67108864 2b538fca02bbe2e7d32bd0b58921e82f+67108864 2db2d0e983e1e3507de4cd9976a31017+67108864 7fb8c5435dd3eb83ce94e88278123613+67108864 67e61af3278ebd337d0155843024df19+67108864 9b65d127cf41e54ca6eb215806c3c78a+67108864 76b9faf2c24bc448817c1ff94f7e0aad+67108864 c04f7e671684c411d736a4f217e8c285+67108864 c14a4e1dbef1112fe60bbc8515e0d6d6+67108864 851819a0f279d5b0958e5d718c5865f0+67108864 d1986a6720da6e1abe012637b5547d57+67108864 11b0f93f0f5373729978dbc17362ef97+67108864 8c1f042f1c7f465019182de726f02350+67108864 2a455b07fbb041db3cbf54ade540752e+67108864 647e3a6dc6b99ac5866d0d1c4e2e7352+67108864 7cafb10d4d750b6a70b6de5f064eeb0c+67108864 d6ae66882a0ca9bba70d847e66797896+67108864 0d01a0eee71ef4bee88f9453f632134b+67108864 ae2140209ba681364275a26e8eac217e+67108864 40b675b2b11e70de68407f0f70f1f03f+67108864 130d9d5d51e8cc5096ba667aa9364c97+67108864 380a783cf03f5a3cfeebef66aee5b28d+67108864 e56360b5cda00f91cc3ba4b51633fdb4+67108864 24d4b59098f119b5b9fc5818f16d6aab+67108864 0b593d049f80991ae59cd330123d42be+67108864 30826cd5f49acf23070a1ded8022808b+67108864 cff3def8b5cbd55e5a1679f9c8934923+67108864 2d4a11a94554b1040cf8904396b0680a+67108864 eb9a072fa8e648ff5ef10b288af5fb6c+67108864 8046144d1a921d88683205b893c9aebc+67108864 7730f957a9be36db7abd4962afb3054f+67108864 742a9f4fcde793b644c13fbb4ee3e975+67108864 a35832757441d24aa04836af07b4c3b5+67108864 0ba07a50ae988c3506b7fa07e509f5bb+67108864 b39743548f0a682c65f277764ff9b097+67108864 21098bc8efe18127c66082e6222bf7ba+67108864 6f21d4c116fdd221692ee09036c8e985+67108864 4f3e46fe795367a58ddb917439850237+67108864 3531ce816ce7ff198f668c7119d7e339+67108864 799778902558e13feecdb35008f64335+67108864 773a7ea46671db445dcff7cdb3641fa6+67108864 97e489ed1177fc6db932af81478b9a6d+67108864 987e799850a5a5027ec095af93e53250+67108864 3a531df05dbe8224cec6fec6f411baef+67108864 d2ee610b5ebc5e705a6029c74d86710a+67108864 672a73b64ae5009c06438b755549241a+67108864 1e7185a9cf5ba378826474bccc51370b+67108864 e489af9d0c3ffef982125ac3e016391e+67108864 5dc7dc12e9cf09150aa39798f16dda0c+67108864 764fae2a1a5c53bc9b7725b8d48e17a9+67108864 4fcbea54f6867700008205dee1a55ee1+67108864 2d7af75fae4b8fafc9198432a127d3e7+67108864 71c3f297de8c416025af0cde00cefa6d+67108864 cad383c3f7341c192398820fbe86e665+67108864 d3ee90d3f0e49c76fe7d5ded5fd8a13d+67108864 52b2a629ea9a32e9b3cd3bd153989b6f+67108864 267fdcb1dcfb1b721352462501444d53+67108864 283ac9aded198745e16d3e2172fe59f7+67108864 57025c826ecb06f0321ce3e0ed7b2e4b+67108864 7cbe3a01c92f13807b15ce870a160fec+67108864 de7f59f08ea07551bdf2eac2b5b346ae+67108864 e70e70e3dd37ec0e59e1109ac6f1b3b8+67108864 a64486d04c1bf082e27e323f3edbb6b2+67108864 3af2e24eddb3f079a43222f2c521a4b7+67108864 4a3ec05b28d6c7540660400f0b5e2132+67108864 d34ae3feefb83c1921cba43e1902a624+67108864 aa0b774639159abcdfd4e87cdbb22501+67108864 bb06b89f2760bae1e0205fa5030827af+67108864 e5f240b2ddec9bf3f9b685bcfe095dfb+67108864 120b67b713bdb4092424f299143ee075+67108864 f9ffd0f5696ed790a2cb0a3bfe7eac57+67108864 6c8d86f09658c53d0b0b30ee42482271+67108864 d9288b957174ccb7fba72fc4ea3d761c+67108864 ca4aa05a80f2a62000d32b9b3bec7e77+67108864 ce9809f906a6c05a0ab0fd87238d083b+67108864 ab0c00cda4e5e7e5ab426c40dff7b705+67108864 ec73f7e70dc8a41524c25bb3a48c34a7+67108864 1f073a660524c25e2613feb6f05eb72f+67108864 09542461e2d26915c2d6ad6d2ce37c22+67108864 2ae249fa2dadd829eb68f9706c6fdd5a+67108864 34a60d677721a4f6106aeda9486731c7+67108864 d5c3016a253edd4be3df2a2d9e205fa6+67108864 aefca039b38116199e66f3ca685ff711+67108864 a952a7a8e82db93aa5ece90dbfeb558c+67108864 df3e8d76a91b95534365ad17c7fd80ca+67108864 683acf860d1b972852697e75e82d4377+67108864 4b48aa46659ccc66948b1948920c8bb8+67108864 75921520ff8976da048e4f48b65713fc+67108864 b1f9b9541ca3df1ff6d644527853a56e+67108864 f482ed10f9bcc8839226c13374dc24fd+67108864 89f6a2018a94e0dc75efebbfb3298ea2+67108864 1116d391c6aa0168db01ae84e4a6974e+67108864 3638536201513086e8a6ab97f6b28013+67108864 e0ae416fbd2edbea670fd4ab33f0dd53+67108864 cab05291139f2f9695f80fd4f56eb9a7+67108864 2074dc1e5b43560a7f0dba097f798599+67108864 499e50cc59a0282465d7f5d7ab00ca3c+67108864 c8d5ea8ac6383e69951ae03b92f4ef6a+67108864 028684dd6cc229bc52bc421784696473+67108864 9a05421a96b3657e72306ac3cc91a14c+67108864 c28927241aa6dbad0408cad3384d4887+67108864 37e3154fa87dbfdfc1508a17d9a539d8+67108864 bd57b88617da02c1c547a534d4cea6ac+67108864 cced51fa202f8410d4b0c0cdbd839e43+67108864 b2a3991e08ed41183e4e3693b3de7871+67108864 6442a997170bbf2f64c2f423c3e58a02+67108864 afd77bcbb63d84d2a59a6ca20921187b+67108864 54eab8b24e4e9fa88256f15c302a77d7+67108864 d3affc2d9a34a998d7d189b9e5a7055d+67108864 c457931914fa11b139fb745f900bcc5f+67108864 51ad6a806b0eb57d4f9e43d713a8eb23+67108864 42a1e629c0fb3f26eabfd15560f97b32+67108864 f878ee0ae55ae5e0f57611ddcef39b1e+67108864 fcb4593f27accd8efdf42afe593a29a8+67108864 d61ab1521fec675ce644ec3426f49833+67108864 7844dcc6c4a53d645d8d6be11fdde7db+67108864 cab883dc96650743eea243f830ddfa99+67108864 e412e3496b8e288b0f84f3915f78682b+67108864 3c9f0bbfa22aea749ad1003759f90871+67108864 6476e0c1c60207f0c5f82179cbb1eb55+67108864 39803ea71680efa53d93ecf071da128a+67108864 746567896b262ea67fb15702772272ae+67108864 3ebba6eaf8d320520c1ae04c69c51c54+67108864 60ec5ed8a833afb4065e39dbf649ce3b+67108864 378ed3ee18558e18b02e9d864c740192+67108864 496ee9cfb30dae1cb7bb7d7be99b4f9d+67108864 32c5ca216b69a9024cd785f3c6f5727a+67108864 101aaf4e6cc6fea9372116cbc91295dc+67108864 26da7649915f412dc9e353ae77439e16+67108864 81f8dff12b4aeabe610806080b56cdf4+67108864 b305bf18afbd40bd57fdcbfe80a1f6b0+67108864 34dcac449fd85a37e468b5b747d860ec+67108864 dd7296825e900f1a5c48cc77887b6d26+67108864 778bec1f5aec275d7a0747fc2f1edb69+67108864 a1c96dd0819cf0eac95edd0ab5c05a58+67108864 2bd7d0ca76c8f4d0efda3935575cc45c+67108864 1aabfb4e8d4143022443335ff4326f1e+67108864 ab939e65a4d32b305261829f2abb8b2a+67108864 d336dba0e326ae1e24000c00c10a3e3e+67108864 91da82d2cc82f55b859e2233c76e60f3+67108864 5e8665e03aab339a9f7da3c529a2b612+67108864 d13a86b4688790e57a32e87bcc209ec4+67108864 05712bd5603728d3f9016791d46f18a0+67108864 0b9a1a50d053843bcaa8c2c92d2738d3+67108864 a155912e0e45b54521429e739096d7ed+67108864 dedad0d2a05e7f7786e90370e2f4aae7+67108864 bb3b9f9b905ea40b6aa7360ff26c251b+67108864 f1c1dbf5e0ca7bc398ace6d215eb1b0f+67108864 56ce951826892637bcf276bd7e234587+67108864 b1b20e8b062f0a22375754b6f3170876+67108864 942532b6c96f7b51859d39ed123b2509+67108864 c4ead5a64baa6a6fd16c6d9d639ab84c+67108864 d7d4b85f843e2f4a5576fc7d9b6f8c4a+67108864 7eff16af07bdf8470b0b19f843c22ec5+67108864 1dd070d54d64b07d27fb7625a94b47bb+67108864 b33c17ab40dc191636dbbff51c1b0aab+67108864 d8cdd7ca5fdc00505f405c9ae54947d3+67108864 a2a68c3d5ffa4d9660e1644fc784c595+67108864 6419d072f83937007be14ade52127892+67108864 a21e2c355fc15c5aed0ec8bbdd72ce57+67108864 5a39145dd181812527c28ef26515ec05+67108864 fce2f7b28fac75292ad68c9e158a13e5+67108864 ad8a8fcea664c1f0e73e68f1747a926c+67108864 6a0475795138c21b49a7f64a83830786+67108864 fdb58610320a6c1ac6b7a65621d0e1cf+67108864 7b00d44d90b7e2c303442017f4c90669+67108864 05d5186afda38885eb02558047b4de5a+67108864 1fd3dadea7b7325a1679a3bd64a0677b+67108864 02873b880b7c81807450431f38578adf+67108864 92f94a574cf8f897b0d5cdcf81a36757+67108864 ab23dc595b61727226997bbfc81d0ab5+67108864 d6641c1132d04737d040216305a9dcbc+67108864 0ef5d9a5a279b482fa6b97aa2039e423+67108864 aae63d13b1d9d8405d55c79f38a562d2+67108864 c676127582c2f5324575e7044e8e8e13+67108864 f4828a91ed7679c403f0711910adb8e9+67108864 9dc446c9842db3d526567b1b07a55c4d+67108864 c4192c0f0cb5a42c1bdb49530e3560bd+67108864 98cbdb65458086ad86164661fe3113fd+67108864 4d4c5ad721ba1ec22aa044fb5cec743c+67108864 7ea9aa9309936c10635dd5a5d1b986f3+67108864 7dc4a6d69b3ae0704a25fe6d0fd75272+67108864 f749b92411e4f602907a4c2e0853d914+67108864 89384497e0c6b5ae900d504e6f4e7348+67108864 01a97fdc4c69bb61d00bb0e0f175ee9c+67108864 f2920e0ad57f64b1c2eb48af5387321b+67108864 a8fa1e0e5aaafb37766bd135c818a3a4+67108864 7d7bec841a2c0c4214727cc83787733c+67108864 73d6603ce67a74b1b4d7346ce5442669+67108864 43e728dd53e31a4c6e24ae69e38d5482+67108864 427dd6b12aec18c38c5b8a8276160cf0+67108864 313574f4f22e7dc8b4d3f6d1b5594146+67108864 d0720964a956512960d5c147bf8dd280+67108864 8a35b2b23f9cf7fd81c6739a13c26add+36834981 0:95527142234:PG0002578-DNA.bam 95527142234:8761328:PG0002578-DNA.bam.bai 95535903562:936266194:PG0002578-DNA.bam.fa.gz 96472169756:469864841:PG0002578-DNA.bam.tdf
+./PG0002578-DNA-jlake-tumor/Assembly/genome/bam/realigned 39f862840d200c3dbc166ad29dea6807+67108864 34c3d868d8bd1134dbbde930b5a0161a+67108864 0c44e83c212416b1845cc0b292bafa9c+67108864 3f9bfd80e8eccd4fe3d26f3a74f3de35+67108864 9021a4781b43910f1952000e72100689+67108864 48083a918c1e46c64b9387a334c1faa2+67108864 1a3e290d82c15d770ab030a806d4166b+67108864 16aacdcffb8fb09724e517e76984463a+67108864 c4b69566ee2213c9da499588e27f9b37+67108864 77616a26e1711f0f0af2afe361f07559+67108864 23afc19d408c9e2b918eef3254899dff+67108864 ef820a07ec1446e36dcfb54f4b1bb4d9+67108864 85804989c1a609229f2100aca2a5164b+67108864 056c41fdc0e02e32e6a480ce2c401e78+67108864 d79df4b7582af576371a42cb17de617c+62074987 0:996151435:PG0002578-DNA.realigned.bam 996151435:5447648:PG0002578-DNA.realigned.bam.bai
+./PG0002578-DNA-jlake-tumor/Assembly/stats 6798533a7dfcc926ab0d03ca4b1fe9bb+30924 0:14525:Reads.idx 14525:14525:Reads.idx.bu 29050:1247:coverage.summary.txt 30297:627:dupCount.summary.txt
+./PG0002578-DNA-jlake-tumor/Docs 5b614c0ac2f1b7be407433860ee2a553+5625667 0:277564:1_IGS_Deliverable_Layout_gVCF.pdf 277564:304833:2_gVCF_Conventions_CASAVA_Release.pdf 582397:332651:3_Illumina_Annotation_Pipeline_Documentation.pdf 915048:3765366:S1_CASAVA_UG_15011196B.PDF 4680414:903872:S2_CASAVA_QRG_15011197B.PDF 5584286:41381:S3_bam2fastq.pdf
+./PG0002578-DNA-jlake-tumor/Genotyping 74a29b8642eafbee843c51f46fb526c0+67108864 c659bb8049be3ac13768227a5ac78228+67108864 388f348c50d59fc16bfb566f088e85aa+4019621 0:138237349:FinalReport_HumanOmni2.5-8v1_PG0002578.txt
+./PG0002578-DNA-jlake-tumor/IGV 1ae944d18638508fece1b0a8bfcf8c9e+15934904 0:2898:.igv_session.xml 2898:2898:.mac_igv_session.xml 5796:2526:GID_session.xml 8322:171972:batik-codec.jar 180294:202:igv.bat 180496:43:igv.command 180539:15725768:igv.jar 15906307:42:igv.sh 15906349:1150:illumina.ico 15907499:26167:license_LGPL-2.1.txt 15933666:1238:readme.txt
+./PG0002578-DNA-jlake-tumor/Variations c16493faff63b09072395f9131e38152+67108864 f8f61d884faf2a66f162af422c2749ee+67108864 75b165151b45b7aee32b4690f0644b38+67108864 a33a13617e75d300715e46c57162fc33+67108864 ca46c7a4c725d0efe0568dce26bc5213+67108864 18f8e870d93a271f8a894fc4c4351e60+67108864 adb2c4f3e1e99be24e3ab19cf5db7631+67108864 fefc2d2bfafc240776c7b06ddc8d475b+67108864 adf6ee298b575c59143eda731f83c1cf+67108864 78cf4992604945f97209402eabb2eb7d+67108864 8bf9394ad0435a51e2b2e57a841300c1+67108864 a6f8c5582335c30d06d111ecd79d85bf+67108864 7765bf35e97576b071639d17d9bdd090+67108864 8b9b746b76b18e32ba8047176e9f74d2+67108864 56590e49063b1f814005d9c02000ddfd+67108864 10a639f273764b31451b5e8d95a8e98d+67108864 1b1c8235e66633bb95d2a2a2bede6f3f+67108864 4cc650efa04c38fd0502287a92786737+67108864 4433bac63122a8f9058334633904e988+67108864 3b470c2e8bc8d0ed981ea14353f08eaa+67108864 13de261edeba63d0c330f85bf3544656+67108864 7aee17fb67ed1a622f2bed2b3a50ff78+67108864 8c7ecacdbd99ec2cb3cbd276984bb361+67108864 4bee2d90998217971ef72b2f94054966+12779495 0:1432964600:PG0002578-DNA.genome.block.anno.vcf.gz 1432964600:3650551:PG0002578-DNA.genome.block.anno.vcf.gz.tbi 1436615151:118084986:PG0002578-DNA.snps.vcf.gz 1554700137:1583230:PG0002578-DNA.snps.vcf.gz.tbi
diff --git a/sdk/python/tests/run_test_server.py b/sdk/python/tests/run_test_server.py
new file mode 100644 (file)
index 0000000..739c754
--- /dev/null
@@ -0,0 +1,335 @@
+#!/usr/bin/env python
+
+import argparse
+import os
+import shutil
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+import unittest
+import yaml
+
+MY_DIRNAME = os.path.dirname(os.path.realpath(__file__))
+if __name__ == '__main__' and os.path.exists(
+      os.path.join(MY_DIRNAME, '..', 'arvados', '__init__.py')):
+    # We're being launched to support another test suite.
+    # Add the Python SDK source to the library path.
+    sys.path.insert(1, os.path.dirname(MY_DIRNAME))
+
+import arvados.api
+import arvados.config
+
+SERVICES_SRC_DIR = os.path.join(MY_DIRNAME, '../../../services')
+SERVER_PID_PATH = 'tmp/pids/webrick-test.pid'
+WEBSOCKETS_SERVER_PID_PATH = 'tmp/pids/passenger-test.pid'
+if 'GOPATH' in os.environ:
+    gopaths = os.environ['GOPATH'].split(':')
+    gobins = [os.path.join(path, 'bin') for path in gopaths]
+    os.environ['PATH'] = ':'.join(gobins) + ':' + os.environ['PATH']
+
+if os.path.isdir('tests'):
+    TEST_TMPDIR = 'tests/tmp'
+else:
+    TEST_TMPDIR = 'tmp'
+
+def find_server_pid(PID_PATH, wait=10):
+    now = time.time()
+    timeout = now + wait
+    good_pid = False
+    while (not good_pid) and (now <= timeout):
+        time.sleep(0.2)
+        try:
+            with open(PID_PATH, 'r') as f:
+                server_pid = int(f.read())
+            good_pid = (os.kill(server_pid, 0) is None)
+        except IOError:
+            good_pid = False
+        except OSError:
+            good_pid = False
+        now = time.time()
+
+    if not good_pid:
+        return None
+
+    return server_pid
+
+def kill_server_pid(PID_PATH, wait=10):
+    try:
+        now = time.time()
+        timeout = now + wait
+        with open(PID_PATH, 'r') as f:
+            server_pid = int(f.read())
+        while now <= timeout:
+            os.kill(server_pid, signal.SIGTERM)
+            os.getpgid(server_pid) # throw OSError if no such pid
+            now = time.time()
+            time.sleep(0.1)
+    except IOError:
+        good_pid = False
+    except OSError:
+        good_pid = False
+
+def run(websockets=False, reuse_server=False):
+    cwd = os.getcwd()
+    os.chdir(os.path.join(SERVICES_SRC_DIR, 'api'))
+
+    if websockets:
+        pid_file = WEBSOCKETS_SERVER_PID_PATH
+    else:
+        pid_file = SERVER_PID_PATH
+
+    test_pid = find_server_pid(pid_file, 0)
+
+    if test_pid is None or not reuse_server:
+        # do not try to run both server variants at once
+        stop()
+
+        # delete cached discovery document
+        shutil.rmtree(arvados.http_cache('discovery'))
+
+        # Setup database
+        os.environ["RAILS_ENV"] = "test"
+        subprocess.call(['bundle', 'exec', 'rake', 'tmp:cache:clear'])
+        subprocess.call(['bundle', 'exec', 'rake', 'db:test:load'])
+        subprocess.call(['bundle', 'exec', 'rake', 'db:fixtures:load'])
+
+        subprocess.call(['bundle', 'exec', 'rails', 'server', '-d',
+                         '--pid',
+                         os.path.join(os.getcwd(), SERVER_PID_PATH),
+                         '-p3000'])
+        os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+
+        if websockets:
+            os.environ["ARVADOS_WEBSOCKETS"] = "ws-only"
+            subprocess.call(['bundle', 'exec',
+                             'passenger', 'start', '-d', '-p3333',
+                             '--pid-file',
+                             os.path.join(os.getcwd(), WEBSOCKETS_SERVER_PID_PATH)
+                         ])
+
+        pid = find_server_pid(SERVER_PID_PATH)
+
+    os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+    os.environ["ARVADOS_API_TOKEN"] = ""
+    os.chdir(cwd)
+
+def stop():
+    cwd = os.getcwd()
+    os.chdir(os.path.join(SERVICES_SRC_DIR, 'api'))
+
+    kill_server_pid(WEBSOCKETS_SERVER_PID_PATH, 0)
+    kill_server_pid(SERVER_PID_PATH, 0)
+
+    try:
+        os.unlink('self-signed.pem')
+    except:
+        pass
+
+    try:
+        os.unlink('self-signed.key')
+    except:
+        pass
+
+    os.chdir(cwd)
+
+def _start_keep(n, keep_args):
+    keep0 = tempfile.mkdtemp()
+    keep_cmd = ["keepstore",
+                "-volumes={}".format(keep0),
+                "-listen=:{}".format(25107+n),
+                "-pid={}".format("{}/keep{}.pid".format(TEST_TMPDIR, n))]
+
+    for arg, val in keep_args.iteritems():
+        keep_cmd.append("{}={}".format(arg, val))
+
+    kp0 = subprocess.Popen(keep_cmd)
+    with open("{}/keep{}.pid".format(TEST_TMPDIR, n), 'w') as f:
+        f.write(str(kp0.pid))
+
+    with open("{}/keep{}.volume".format(TEST_TMPDIR, n), 'w') as f:
+        f.write(keep0)
+
+def run_keep(blob_signing_key=None, enforce_permissions=False):
+    stop_keep()
+
+    if not os.path.exists(TEST_TMPDIR):
+        os.mkdir(TEST_TMPDIR)
+
+    keep_args = {}
+    if blob_signing_key:
+        with open(os.path.join(TEST_TMPDIR, "keep.blob_signing_key"), "w") as f:
+            keep_args['--permission-key-file'] = f.name
+            f.write(blob_signing_key)
+    if enforce_permissions:
+        keep_args['--enforce-permissions'] = 'true'
+
+    _start_keep(0, keep_args)
+    _start_keep(1, keep_args)
+
+    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+    os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+
+    authorize_with("admin")
+    api = arvados.api('v1', cache=False)
+    for d in api.keep_services().list().execute()['items']:
+        api.keep_services().delete(uuid=d['uuid']).execute()
+    for d in api.keep_disks().list().execute()['items']:
+        api.keep_disks().delete(uuid=d['uuid']).execute()
+
+    s1 = api.keep_services().create(body={"keep_service": {
+                "uuid": "zzzzz-bi6l4-5bo5n1iekkjyz6b",
+                "service_host": "localhost",
+                "service_port": 25107,
+                "service_type": "disk"
+                }}).execute()
+    s2 = api.keep_services().create(body={"keep_service": {
+                "uuid": "zzzzz-bi6l4-2nz60e0ksj7vr3s",
+                "service_host": "localhost",
+                "service_port": 25108,
+                "service_type": "disk"
+                }}).execute()
+    api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s1["uuid"] } }).execute()
+    api.keep_disks().create(body={"keep_disk": {"keep_service_uuid": s2["uuid"] } }).execute()
+
+def _stop_keep(n):
+    kill_server_pid("{}/keep{}.pid".format(TEST_TMPDIR, n), 0)
+    if os.path.exists("{}/keep{}.volume".format(TEST_TMPDIR, n)):
+        with open("{}/keep{}.volume".format(TEST_TMPDIR, n), 'r') as r:
+            shutil.rmtree(r.read(), True)
+        os.unlink("{}/keep{}.volume".format(TEST_TMPDIR, n))
+    if os.path.exists(os.path.join(TEST_TMPDIR, "keep.blob_signing_key")):
+        os.remove(os.path.join(TEST_TMPDIR, "keep.blob_signing_key"))
+
+def stop_keep():
+    _stop_keep(0)
+    _stop_keep(1)
+
+def run_keep_proxy(auth):
+    stop_keep_proxy()
+
+    if not os.path.exists(TEST_TMPDIR):
+        os.mkdir(TEST_TMPDIR)
+
+    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+    os.environ["ARVADOS_API_HOST_INSECURE"] = "true"
+    os.environ["ARVADOS_API_TOKEN"] = fixture("api_client_authorizations")[auth]["api_token"]
+
+    kp0 = subprocess.Popen(["keepproxy",
+                            "-pid={}/keepproxy.pid".format(TEST_TMPDIR),
+                            "-listen=:{}".format(25101)])
+
+    authorize_with("admin")
+    api = arvados.api('v1', cache=False)
+    api.keep_services().create(body={"keep_service": {"service_host": "localhost",  "service_port": 25101, "service_type": "proxy"} }).execute()
+
+    os.environ["ARVADOS_KEEP_PROXY"] = "http://localhost:25101"
+
+def stop_keep_proxy():
+    kill_server_pid(os.path.join(TEST_TMPDIR, "keepproxy.pid"), 0)
+
+def fixture(fix):
+    '''load a fixture yaml file'''
+    with open(os.path.join(SERVICES_SRC_DIR, 'api', "test", "fixtures",
+                           fix + ".yml")) as f:
+        yaml_file = f.read()
+        try:
+          trim_index = yaml_file.index("# Test Helper trims the rest of the file")
+          yaml_file = yaml_file[0:trim_index]
+        except ValueError:
+          pass
+        return yaml.load(yaml_file)
+
+def authorize_with(token):
+    '''token is the symbolic name of the token from the api_client_authorizations fixture'''
+    arvados.config.settings()["ARVADOS_API_TOKEN"] = fixture("api_client_authorizations")[token]["api_token"]
+    arvados.config.settings()["ARVADOS_API_HOST"] = os.environ.get("ARVADOS_API_HOST")
+    arvados.config.settings()["ARVADOS_API_HOST_INSECURE"] = "true"
+
+class TestCaseWithServers(unittest.TestCase):
+    """TestCase to start and stop supporting Arvados servers.
+
+    Define any of MAIN_SERVER, KEEP_SERVER, and/or KEEP_PROXY_SERVER
+    class variables as a dictionary of keyword arguments.  If you do,
+    setUpClass will start the corresponding servers by passing these
+    keyword arguments to the run, run_keep, and/or run_keep_server
+    functions, respectively.  It will also set Arvados environment
+    variables to point to these servers appropriately.  If you don't
+    run a Keep or Keep proxy server, setUpClass will set up a
+    temporary directory for Keep local storage, and set it as
+    KEEP_LOCAL_STORE.
+
+    tearDownClass will stop any servers started, and restore the
+    original environment.
+    """
+    MAIN_SERVER = None
+    KEEP_SERVER = None
+    KEEP_PROXY_SERVER = None
+
+    @staticmethod
+    def _restore_dict(src, dest):
+        for key in dest.keys():
+            if key not in src:
+                del dest[key]
+        dest.update(src)
+
+    @classmethod
+    def setUpClass(cls):
+        cls._orig_environ = os.environ.copy()
+        cls._orig_config = arvados.config.settings().copy()
+        cls._cleanup_funcs = []
+        for server_kwargs, start_func, stop_func in (
+              (cls.MAIN_SERVER, run, stop),
+              (cls.KEEP_SERVER, run_keep, stop_keep),
+              (cls.KEEP_PROXY_SERVER, run_keep_proxy, stop_keep_proxy)):
+            if server_kwargs is not None:
+                start_func(**server_kwargs)
+                cls._cleanup_funcs.append(stop_func)
+        os.environ.pop('ARVADOS_EXTERNAL_CLIENT', None)
+        if cls.KEEP_PROXY_SERVER is None:
+            os.environ.pop('ARVADOS_KEEP_PROXY', None)
+        if (cls.KEEP_SERVER is None) and (cls.KEEP_PROXY_SERVER is None):
+            cls.local_store = tempfile.mkdtemp()
+            os.environ['KEEP_LOCAL_STORE'] = cls.local_store
+            cls._cleanup_funcs.append(
+                lambda: shutil.rmtree(cls.local_store, ignore_errors=True))
+        else:
+            os.environ.pop('KEEP_LOCAL_STORE', None)
+        arvados.config.initialize()
+
+    @classmethod
+    def tearDownClass(cls):
+        for clean_func in cls._cleanup_funcs:
+            clean_func()
+        cls._restore_dict(cls._orig_environ, os.environ)
+        cls._restore_dict(cls._orig_config, arvados.config.settings())
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('action', type=str, help='''one of "start", "stop", "start_keep", "stop_keep"''')
+    parser.add_argument('--websockets', action='store_true', default=False)
+    parser.add_argument('--reuse', action='store_true', default=False)
+    parser.add_argument('--auth', type=str, help='Print authorization info for given api_client_authorizations fixture')
+    args = parser.parse_args()
+
+    if args.action == 'start':
+        run(websockets=args.websockets, reuse_server=args.reuse)
+        if args.auth is not None:
+            authorize_with(args.auth)
+            print("export ARVADOS_API_HOST={}".format(arvados.config.settings()["ARVADOS_API_HOST"]))
+            print("export ARVADOS_API_TOKEN={}".format(arvados.config.settings()["ARVADOS_API_TOKEN"]))
+            print("export ARVADOS_API_HOST_INSECURE={}".format(arvados.config.settings()["ARVADOS_API_HOST_INSECURE"]))
+    elif args.action == 'stop':
+        stop()
+    elif args.action == 'start_keep':
+        run_keep()
+    elif args.action == 'stop_keep':
+        stop_keep()
+    elif args.action == 'start_keep_proxy':
+        run_keep_proxy("admin")
+    elif args.action == 'stop_keep_proxy':
+        stop_keep_proxy()
+    else:
+        print('Unrecognized action "{}", actions are "start", "stop", "start_keep", "stop_keep"'.format(args.action))
diff --git a/sdk/python/tests/test_api.py b/sdk/python/tests/test_api.py
new file mode 100644 (file)
index 0000000..0d81fdf
--- /dev/null
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+import arvados
+import httplib2
+import json
+import mimetypes
+import os
+import run_test_server
+import unittest
+from apiclient import errors as apiclient_errors
+from apiclient import http as apiclient_http
+
+from arvados_testutil import fake_httplib2_response
+
+if not mimetypes.inited:
+    mimetypes.init()
+
+class ArvadosApiClientTest(unittest.TestCase):
+    ERROR_HEADERS = {'Content-Type': mimetypes.types_map['.json']}
+
+    @classmethod
+    def api_error_response(cls, code, *errors):
+        return (fake_httplib2_response(code, **cls.ERROR_HEADERS),
+                json.dumps({'errors': errors,
+                            'error_token': '1234567890+12345678'}))
+
+    @classmethod
+    def setUpClass(cls):
+        # The apiclient library has support for mocking requests for
+        # testing, but it doesn't extend to the discovery document
+        # itself. For now, bring up an API server that will serve
+        # a discovery document.
+        # FIXME: Figure out a better way to stub this out.
+        run_test_server.run()
+        mock_responses = {
+            'arvados.humans.delete': (
+                fake_httplib2_response(500, **cls.ERROR_HEADERS),
+                ""),
+            'arvados.humans.get': cls.api_error_response(
+                422, "Bad UUID format", "Bad output format"),
+            'arvados.humans.list': (None, json.dumps(
+                    {'items_available': 0, 'items': []})),
+            }
+        req_builder = apiclient_http.RequestMockBuilder(mock_responses)
+        cls.api = arvados.api('v1', cache=False,
+                              host=os.environ['ARVADOS_API_HOST'],
+                              token='discovery-doc-only-no-token-needed',
+                              insecure=True,
+                              requestBuilder=req_builder)
+
+    @classmethod
+    def tearDownClass(cls):
+        run_test_server.stop()
+
+    def test_basic_list(self):
+        answer = self.api.humans().list(
+            filters=[['uuid', 'is', None]]).execute()
+        self.assertEqual(answer['items_available'], len(answer['items']))
+
+    def test_exceptions_include_errors(self):
+        with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
+            self.api.humans().get(uuid='xyz-xyz-abcdef').execute()
+        err_s = str(err_ctx.exception)
+        for msg in ["Bad UUID format", "Bad output format"]:
+            self.assertIn(msg, err_s)
+
+    def test_exceptions_without_errors_have_basic_info(self):
+        with self.assertRaises(apiclient_errors.HttpError) as err_ctx:
+            self.api.humans().delete(uuid='xyz-xyz-abcdef').execute()
+        self.assertIn("500", str(err_ctx.exception))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_arv_ls.py b/sdk/python/tests/test_arv_ls.py
new file mode 100644 (file)
index 0000000..90bbacf
--- /dev/null
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import hashlib
+import io
+import random
+
+import mock
+
+import arvados.errors as arv_error
+import arvados.commands.ls as arv_ls
+import run_test_server
+
+class ArvLsTestCase(run_test_server.TestCaseWithServers):
+    FAKE_UUID = 'zzzzz-4zz18-12345abcde12345'
+
+    def newline_join(self, seq):
+        return '\n'.join(seq) + '\n'
+
+    def random_blocks(self, *sizes):
+        return ' '.join('{:032x}+{:d}'.format(
+                  random.randint(0, (16 ** 32) - 1), size
+                ) for size in sizes)
+
+    def mock_api_for_manifest(self, manifest_lines, uuid=FAKE_UUID):
+        manifest_text = self.newline_join(manifest_lines)
+        pdh = '{}+{}'.format(hashlib.md5(manifest_text).hexdigest(),
+                             len(manifest_text))
+        coll_info = {'uuid': uuid,
+                     'portable_data_hash': pdh,
+                     'manifest_text': manifest_text}
+        api_client = mock.MagicMock(name='mock_api_client')
+        api_client.collections().get().execute.return_value = coll_info
+        return coll_info, api_client
+
+    def run_ls(self, args, api_client):
+        self.stdout = io.BytesIO()
+        self.stderr = io.BytesIO()
+        return arv_ls.main(args, self.stdout, self.stderr, api_client)
+
+    def test_plain_listing(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:3:one.txt 3:4:two.txt'.format(self.random_blocks(5, 2)),
+             './dir {} 1:5:sub.txt'.format(self.random_blocks(8))])
+        self.assertEqual(0, self.run_ls([collection['uuid']], api_client))
+        self.assertEqual(
+            self.newline_join(['./one.txt', './two.txt', './dir/sub.txt']),
+            self.stdout.getvalue())
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_size_listing(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:0:0.txt 0:1000:1.txt 1000:2000:2.txt'.format(
+                    self.random_blocks(3000))])
+        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))
+        self.stdout.seek(0, 0)
+        for expected in range(3):
+            actual_size, actual_name = self.stdout.readline().split()
+            # But she seems much bigger to me...
+            self.assertEqual(str(expected), actual_size)
+            self.assertEqual('./{}.txt'.format(expected), actual_name)
+        self.assertEqual('', self.stdout.read(-1))
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_nonnormalized_manifest(self):
+        collection, api_client = self.mock_api_for_manifest(
+            ['. {} 0:1010:non.txt'.format(self.random_blocks(1010)),
+             '. {} 0:2020:non.txt'.format(self.random_blocks(2020))])
+        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))
+        self.stdout.seek(0, 0)
+        self.assertEqual(['3', './non.txt'], self.stdout.readline().split())
+        self.assertEqual('', self.stdout.read(-1))
+        self.assertEqual('', self.stderr.getvalue())
+
+    def test_locator_failure(self):
+        api_client = mock.MagicMock(name='mock_api_client')
+        api_client.collections().get().execute.side_effect = (
+            arv_error.NotFoundError)
+        self.assertNotEqual(0, self.run_ls([self.FAKE_UUID], api_client))
+        self.assertNotEqual('', self.stderr.getvalue())
diff --git a/sdk/python/tests/test_arv_put.py b/sdk/python/tests/test_arv_put.py
new file mode 100644 (file)
index 0000000..001add3
--- /dev/null
@@ -0,0 +1,557 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import apiclient
+import os
+import pwd
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+import unittest
+import yaml
+
+from cStringIO import StringIO
+
+import arvados
+import arvados.commands.put as arv_put
+
+from arvados_testutil import ArvadosBaseTestCase
+import run_test_server
+
+class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
+    CACHE_ARGSET = [
+        [],
+        ['/dev/null'],
+        ['/dev/null', '--filename', 'empty'],
+        ['/tmp'],
+        ['/tmp', '--max-manifest-depth', '0'],
+        ['/tmp', '--max-manifest-depth', '1']
+        ]
+
+    def tearDown(self):
+        super(ArvadosPutResumeCacheTest, self).tearDown()
+        try:
+            self.last_cache.destroy()
+        except AttributeError:
+            pass
+
+    def cache_path_from_arglist(self, arglist):
+        return arv_put.ResumeCache.make_path(arv_put.parse_arguments(arglist))
+
+    def test_cache_names_stable(self):
+        for argset in self.CACHE_ARGSET:
+            self.assertEquals(self.cache_path_from_arglist(argset),
+                              self.cache_path_from_arglist(argset),
+                              "cache name changed for {}".format(argset))
+
+    def test_cache_names_unique(self):
+        results = []
+        for argset in self.CACHE_ARGSET:
+            path = self.cache_path_from_arglist(argset)
+            self.assertNotIn(path, results)
+            results.append(path)
+
+    def test_cache_names_simple(self):
+        # The goal here is to make sure the filename doesn't use characters
+        # reserved by the filesystem.  Feel free to adjust this regexp as
+        # long as it still does that.
+        bad_chars = re.compile(r'[^-\.\w]')
+        for argset in self.CACHE_ARGSET:
+            path = self.cache_path_from_arglist(argset)
+            self.assertFalse(bad_chars.search(os.path.basename(path)),
+                             "path too exotic: {}".format(path))
+
+    def test_cache_names_ignore_argument_order(self):
+        self.assertEquals(
+            self.cache_path_from_arglist(['a', 'b', 'c']),
+            self.cache_path_from_arglist(['c', 'a', 'b']))
+        self.assertEquals(
+            self.cache_path_from_arglist(['-', '--filename', 'stdin']),
+            self.cache_path_from_arglist(['--filename', 'stdin', '-']))
+
+    def test_cache_names_differ_for_similar_paths(self):
+        # This test needs names at / that don't exist on the real filesystem.
+        self.assertNotEqual(
+            self.cache_path_from_arglist(['/_arvputtest1', '/_arvputtest2']),
+            self.cache_path_from_arglist(['/_arvputtest1/_arvputtest2']))
+
+    def test_cache_names_ignore_irrelevant_arguments(self):
+        # Workaround: parse_arguments bails on --filename with a directory.
+        path1 = self.cache_path_from_arglist(['/tmp'])
+        args = arv_put.parse_arguments(['/tmp'])
+        args.filename = 'tmp'
+        path2 = arv_put.ResumeCache.make_path(args)
+        self.assertEquals(path1, path2,
+                         "cache path considered --filename for directory")
+        self.assertEquals(
+            self.cache_path_from_arglist(['-']),
+            self.cache_path_from_arglist(['-', '--max-manifest-depth', '1']),
+            "cache path considered --max-manifest-depth for file")
+
+    def test_cache_names_treat_negative_manifest_depths_identically(self):
+        base_args = ['/tmp', '--max-manifest-depth']
+        self.assertEquals(
+            self.cache_path_from_arglist(base_args + ['-1']),
+            self.cache_path_from_arglist(base_args + ['-2']))
+
+    def test_cache_names_treat_stdin_consistently(self):
+        self.assertEquals(
+            self.cache_path_from_arglist(['-', '--filename', 'test']),
+            self.cache_path_from_arglist(['/dev/stdin', '--filename', 'test']))
+
+    def test_cache_names_identical_for_synonymous_names(self):
+        self.assertEquals(
+            self.cache_path_from_arglist(['.']),
+            self.cache_path_from_arglist([os.path.realpath('.')]))
+        testdir = self.make_tmpdir()
+        looplink = os.path.join(testdir, 'loop')
+        os.symlink(testdir, looplink)
+        self.assertEquals(
+            self.cache_path_from_arglist([testdir]),
+            self.cache_path_from_arglist([looplink]))
+
+    def test_cache_names_different_by_api_host(self):
+        config = arvados.config.settings()
+        orig_host = config.get('ARVADOS_API_HOST')
+        try:
+            name1 = self.cache_path_from_arglist(['.'])
+            config['ARVADOS_API_HOST'] = 'x' + (orig_host or 'localhost')
+            self.assertNotEqual(name1, self.cache_path_from_arglist(['.']))
+        finally:
+            if orig_host is None:
+                del config['ARVADOS_API_HOST']
+            else:
+                config['ARVADOS_API_HOST'] = orig_host
+
+    def test_basic_cache_storage(self):
+        thing = ['test', 'list']
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+        self.last_cache.save(thing)
+        self.assertEquals(thing, self.last_cache.load())
+
+    def test_empty_cache(self):
+        with tempfile.NamedTemporaryFile() as cachefile:
+            cache = arv_put.ResumeCache(cachefile.name)
+        self.assertRaises(ValueError, cache.load)
+
+    def test_cache_persistent(self):
+        thing = ['test', 'list']
+        path = os.path.join(self.make_tmpdir(), 'cache')
+        cache = arv_put.ResumeCache(path)
+        cache.save(thing)
+        cache.close()
+        self.last_cache = arv_put.ResumeCache(path)
+        self.assertEquals(thing, self.last_cache.load())
+
+    def test_multiple_cache_writes(self):
+        thing = ['short', 'list']
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+        # Start writing an object longer than the one we test, to make
+        # sure the cache file gets truncated.
+        self.last_cache.save(['long', 'long', 'list'])
+        self.last_cache.save(thing)
+        self.assertEquals(thing, self.last_cache.load())
+
+    def test_cache_is_locked(self):
+        with tempfile.NamedTemporaryFile() as cachefile:
+            cache = arv_put.ResumeCache(cachefile.name)
+            self.assertRaises(arv_put.ResumeCacheConflict,
+                              arv_put.ResumeCache, cachefile.name)
+
+    def test_cache_stays_locked(self):
+        with tempfile.NamedTemporaryFile() as cachefile:
+            self.last_cache = arv_put.ResumeCache(cachefile.name)
+            path = cachefile.name
+        self.last_cache.save('test')
+        self.assertRaises(arv_put.ResumeCacheConflict,
+                          arv_put.ResumeCache, path)
+
+    def test_destroy_cache(self):
+        cachefile = tempfile.NamedTemporaryFile(delete=False)
+        try:
+            cache = arv_put.ResumeCache(cachefile.name)
+            cache.save('test')
+            cache.destroy()
+            try:
+                arv_put.ResumeCache(cachefile.name)
+            except arv_put.ResumeCacheConflict:
+                self.fail("could not load cache after destroying it")
+            self.assertRaises(ValueError, cache.load)
+        finally:
+            if os.path.exists(cachefile.name):
+                os.unlink(cachefile.name)
+
+    def test_restart_cache(self):
+        path = os.path.join(self.make_tmpdir(), 'cache')
+        cache = arv_put.ResumeCache(path)
+        cache.save('test')
+        cache.restart()
+        self.assertRaises(ValueError, cache.load)
+        self.assertRaises(arv_put.ResumeCacheConflict,
+                          arv_put.ResumeCache, path)
+
+
+class ArvadosPutCollectionWriterTest(run_test_server.TestCaseWithServers,
+                                     ArvadosBaseTestCase):
+    def setUp(self):
+        super(ArvadosPutCollectionWriterTest, self).setUp()
+        run_test_server.authorize_with('active')
+        with tempfile.NamedTemporaryFile(delete=False) as cachefile:
+            self.cache = arv_put.ResumeCache(cachefile.name)
+            self.cache_filename = cachefile.name
+
+    def tearDown(self):
+        super(ArvadosPutCollectionWriterTest, self).tearDown()
+        if os.path.exists(self.cache_filename):
+            self.cache.destroy()
+        self.cache.close()
+
+    def test_writer_caches(self):
+        cwriter = arv_put.ArvPutCollectionWriter(self.cache)
+        cwriter.write_file('/dev/null')
+        cwriter.cache_state()
+        self.assertTrue(self.cache.load())
+        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+
+    def test_writer_works_without_cache(self):
+        cwriter = arv_put.ArvPutCollectionWriter()
+        cwriter.write_file('/dev/null')
+        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+
+    def test_writer_resumes_from_cache(self):
+        cwriter = arv_put.ArvPutCollectionWriter(self.cache)
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            cwriter.cache_state()
+            new_writer = arv_put.ArvPutCollectionWriter.from_cache(
+                self.cache)
+            self.assertEquals(
+                ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:test\n",
+                new_writer.manifest_text())
+
+    def test_new_writer_from_stale_cache(self):
+        cwriter = arv_put.ArvPutCollectionWriter(self.cache)
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+        new_writer = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
+        new_writer.write_file('/dev/null')
+        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", new_writer.manifest_text())
+
+    def test_new_writer_from_empty_cache(self):
+        cwriter = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
+        cwriter.write_file('/dev/null')
+        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+
+    def test_writer_resumable_after_arbitrary_bytes(self):
+        cwriter = arv_put.ArvPutCollectionWriter(self.cache)
+        # These bytes are intentionally not valid UTF-8.
+        with self.make_test_file('\x00\x07\xe2') as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            cwriter.cache_state()
+            new_writer = arv_put.ArvPutCollectionWriter.from_cache(
+                self.cache)
+        self.assertEquals(cwriter.manifest_text(), new_writer.manifest_text())
+
+    def make_progress_tester(self):
+        progression = []
+        def record_func(written, expected):
+            progression.append((written, expected))
+        return progression, record_func
+
+    def test_progress_reporting(self):
+        for expect_count in (None, 8):
+            progression, reporter = self.make_progress_tester()
+            cwriter = arv_put.ArvPutCollectionWriter(
+                reporter=reporter, bytes_expected=expect_count)
+            with self.make_test_file() as testfile:
+                cwriter.write_file(testfile.name, 'test')
+            cwriter.finish_current_stream()
+            self.assertIn((4, expect_count), progression)
+
+    def test_resume_progress(self):
+        cwriter = arv_put.ArvPutCollectionWriter(self.cache, bytes_expected=4)
+        with self.make_test_file() as testfile:
+            # Set up a writer with some flushed bytes.
+            cwriter.write_file(testfile.name, 'test')
+            cwriter.finish_current_stream()
+            cwriter.cache_state()
+            new_writer = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
+            self.assertEqual(new_writer.bytes_written, 4)
+
+
+class ArvadosExpectedBytesTest(ArvadosBaseTestCase):
+    TEST_SIZE = os.path.getsize(__file__)
+
+    def test_expected_bytes_for_file(self):
+        self.assertEquals(self.TEST_SIZE,
+                          arv_put.expected_bytes_for([__file__]))
+
+    def test_expected_bytes_for_tree(self):
+        tree = self.make_tmpdir()
+        shutil.copyfile(__file__, os.path.join(tree, 'one'))
+        shutil.copyfile(__file__, os.path.join(tree, 'two'))
+        self.assertEquals(self.TEST_SIZE * 2,
+                          arv_put.expected_bytes_for([tree]))
+        self.assertEquals(self.TEST_SIZE * 3,
+                          arv_put.expected_bytes_for([tree, __file__]))
+
+    def test_expected_bytes_for_device(self):
+        self.assertIsNone(arv_put.expected_bytes_for(['/dev/null']))
+        self.assertIsNone(arv_put.expected_bytes_for([__file__, '/dev/null']))
+
+
+class ArvadosPutReportTest(ArvadosBaseTestCase):
+    def test_machine_progress(self):
+        for count, total in [(0, 1), (0, None), (1, None), (235, 9283)]:
+            expect = ": {} written {} total\n".format(
+                count, -1 if (total is None) else total)
+            self.assertTrue(
+                arv_put.machine_progress(count, total).endswith(expect))
+
+    def test_known_human_progress(self):
+        for count, total in [(0, 1), (2, 4), (45, 60)]:
+            expect = '{:.1%}'.format(float(count) / total)
+            actual = arv_put.human_progress(count, total)
+            self.assertTrue(actual.startswith('\r'))
+            self.assertIn(expect, actual)
+
+    def test_unknown_human_progress(self):
+        for count in [1, 20, 300, 4000, 50000]:
+            self.assertTrue(re.search(r'\b{}\b'.format(count),
+                                      arv_put.human_progress(count, None)))
+
+
+class ArvadosPutTest(run_test_server.TestCaseWithServers, ArvadosBaseTestCase):
+    MAIN_SERVER = {}
+    Z_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+
+    def call_main_with_args(self, args):
+        self.main_stdout = StringIO()
+        self.main_stderr = StringIO()
+        return arv_put.main(args, self.main_stdout, self.main_stderr)
+
+    def call_main_on_test_file(self):
+        with self.make_test_file() as testfile:
+            path = testfile.name
+            self.call_main_with_args(['--stream', '--no-progress', path])
+        self.assertTrue(
+            os.path.exists(os.path.join(os.environ['KEEP_LOCAL_STORE'],
+                                        '098f6bcd4621d373cade4e832627b4f6')),
+            "did not find file stream in Keep store")
+
+    def setUp(self):
+        super(ArvadosPutTest, self).setUp()
+        run_test_server.authorize_with('active')
+        arv_put.api_client = None
+
+    def tearDown(self):
+        for outbuf in ['main_stdout', 'main_stderr']:
+            if hasattr(self, outbuf):
+                getattr(self, outbuf).close()
+                delattr(self, outbuf)
+        super(ArvadosPutTest, self).tearDown()
+
+    def test_simple_file_put(self):
+        self.call_main_on_test_file()
+
+    def test_put_with_unwriteable_cache_dir(self):
+        orig_cachedir = arv_put.ResumeCache.CACHE_DIR
+        cachedir = self.make_tmpdir()
+        os.chmod(cachedir, 0o0)
+        arv_put.ResumeCache.CACHE_DIR = cachedir
+        try:
+            self.call_main_on_test_file()
+        finally:
+            arv_put.ResumeCache.CACHE_DIR = orig_cachedir
+            os.chmod(cachedir, 0o700)
+
+    def test_put_with_unwritable_cache_subdir(self):
+        orig_cachedir = arv_put.ResumeCache.CACHE_DIR
+        cachedir = self.make_tmpdir()
+        os.chmod(cachedir, 0o0)
+        arv_put.ResumeCache.CACHE_DIR = os.path.join(cachedir, 'cachedir')
+        try:
+            self.call_main_on_test_file()
+        finally:
+            arv_put.ResumeCache.CACHE_DIR = orig_cachedir
+            os.chmod(cachedir, 0o700)
+
+    def test_error_name_without_collection(self):
+        self.assertRaises(SystemExit, self.call_main_with_args,
+                          ['--name', 'test without Collection',
+                           '--stream', '/dev/null'])
+
+    def test_error_when_project_not_found(self):
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--project-uuid', self.Z_UUID])
+
+    def test_error_bad_project_uuid(self):
+        self.assertRaises(SystemExit,
+                          self.call_main_with_args,
+                          ['--project-uuid', self.Z_UUID, '--stream'])
+
+class ArvPutIntegrationTest(run_test_server.TestCaseWithServers,
+                            ArvadosBaseTestCase):
+    def _getKeepServerConfig():
+        for config_file in ['application.yml', 'application.default.yml']:
+            with open(os.path.join(run_test_server.SERVICES_SRC_DIR,
+                                   "api", "config", config_file)) as f:
+                rails_config = yaml.load(f.read())
+                for config_section in ['test', 'common']:
+                    try:
+                        key = rails_config[config_section]["blob_signing_key"]
+                    except (KeyError, TypeError):
+                        pass
+                    else:
+                        return {'blob_signing_key': key,
+                                'enforce_permissions': True}
+        return {'blog_signing_key': None, 'enforce_permissions': False}
+
+    MAIN_SERVER = {}
+    KEEP_SERVER = _getKeepServerConfig()
+    PROJECT_UUID = run_test_server.fixture('groups')['aproject']['uuid']
+
+    @classmethod
+    def setUpClass(cls):
+        super(ArvPutIntegrationTest, cls).setUpClass()
+        cls.ENVIRON = os.environ.copy()
+        cls.ENVIRON['PYTHONPATH'] = ':'.join(sys.path)
+
+    def setUp(self):
+        super(ArvPutIntegrationTest, self).setUp()
+        arv_put.api_client = None
+
+    def authorize_with(self, token_name):
+        run_test_server.authorize_with(token_name)
+        for v in ["ARVADOS_API_HOST",
+                  "ARVADOS_API_HOST_INSECURE",
+                  "ARVADOS_API_TOKEN"]:
+            self.ENVIRON[v] = arvados.config.settings()[v]
+        arv_put.api_client = arvados.api('v1', cache=False)
+
+    def current_user(self):
+        return arv_put.api_client.users().current().execute()
+
+    def test_check_real_project_found(self):
+        self.authorize_with('active')
+        self.assertTrue(arv_put.desired_project_uuid(arv_put.api_client, self.PROJECT_UUID, 0),
+                        "did not correctly find test fixture project")
+
+    def test_check_error_finding_nonexistent_uuid(self):
+        BAD_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+        self.authorize_with('active')
+        try:
+            result = arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,
+                                                  0)
+        except ValueError as error:
+            self.assertIn(BAD_UUID, error.message)
+        else:
+            self.assertFalse(result, "incorrectly found nonexistent project")
+
+    def test_check_error_finding_nonexistent_project(self):
+        BAD_UUID = 'zzzzz-tpzed-zzzzzzzzzzzzzzz'
+        self.authorize_with('active')
+        with self.assertRaises(apiclient.errors.HttpError):
+            result = arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,
+                                                  0)
+
+    def test_short_put_from_stdin(self):
+        # Have to run this as an integration test since arv-put can't
+        # read from the tests' stdin.
+        # arv-put usually can't stat(os.path.realpath('/dev/stdin')) in this
+        # case, because the /proc entry is already gone by the time it tries.
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__, '--stream'],
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT, env=self.ENVIRON)
+        pipe.stdin.write('stdin test\n')
+        pipe.stdin.close()
+        deadline = time.time() + 5
+        while (pipe.poll() is None) and (time.time() < deadline):
+            time.sleep(.1)
+        returncode = pipe.poll()
+        if returncode is None:
+            pipe.terminate()
+            self.fail("arv-put did not PUT from stdin within 5 seconds")
+        elif returncode != 0:
+            sys.stdout.write(pipe.stdout.read())
+            self.fail("arv-put returned exit code {}".format(returncode))
+        self.assertIn('4a9c8b735dce4b5fa3acf221a0b13628+11', pipe.stdout.read())
+
+    def test_ArvPutSignedManifest(self):
+        # ArvPutSignedManifest runs "arv-put foo" and then attempts to get
+        # the newly created manifest from the API server, testing to confirm
+        # that the block locators in the returned manifest are signed.
+        self.authorize_with('active')
+
+        # Before doing anything, demonstrate that the collection
+        # we're about to create is not present in our test fixture.
+        manifest_uuid = "00b4e9f40ac4dd432ef89749f1c01e74+47"
+        with self.assertRaises(apiclient.errors.HttpError):
+            notfound = arv_put.api_client.collections().get(
+                uuid=manifest_uuid).execute()
+
+        datadir = self.make_tmpdir()
+        with open(os.path.join(datadir, "foo"), "w") as f:
+            f.write("The quick brown fox jumped over the lazy dog")
+        p = subprocess.Popen([sys.executable, arv_put.__file__, datadir],
+                             stdout=subprocess.PIPE, env=self.ENVIRON)
+        (arvout, arverr) = p.communicate()
+        self.assertEqual(arverr, None)
+        self.assertEqual(p.returncode, 0)
+
+        # The manifest text stored in the API server under the same
+        # manifest UUID must use signed locators.
+        c = arv_put.api_client.collections().get(uuid=manifest_uuid).execute()
+        self.assertRegexpMatches(
+            c['manifest_text'],
+            r'^\. 08a008a01d498c404b0c30852b39d3b8\+44\+A[0-9a-f]+@[0-9a-f]+ 0:44:foo\n')
+
+        os.remove(os.path.join(datadir, "foo"))
+        os.rmdir(datadir)
+
+    def run_and_find_collection(self, text, extra_args=[]):
+        self.authorize_with('active')
+        pipe = subprocess.Popen(
+            [sys.executable, arv_put.__file__] + extra_args,
+            stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE, env=self.ENVIRON)
+        stdout, stderr = pipe.communicate(text)
+        collection_list = arvados.api('v1', cache=False).collections().list(
+            filters=[['portable_data_hash', '=', stdout.strip()]]).execute().get('items', [])
+        self.assertEqual(1, len(collection_list))
+        return collection_list[0]
+
+    def test_put_collection_with_unnamed_project_link(self):
+        link = self.run_and_find_collection("Test unnamed collection",
+                                      ['--portable-data-hash', '--project-uuid', self.PROJECT_UUID])
+        username = pwd.getpwuid(os.getuid()).pw_name
+        self.assertRegexpMatches(
+            link['name'],
+            r'^Saved at .* by {}@'.format(re.escape(username)))
+
+    def test_put_collection_with_name_and_no_project(self):
+        link_name = 'Test Collection Link in home project'
+        collection = self.run_and_find_collection("Test named collection in home project",
+                                      ['--portable-data-hash', '--name', link_name])
+        self.assertEqual(link_name, collection['name'])
+        my_user_uuid = self.current_user()['uuid']
+        self.assertEqual(my_user_uuid, collection['owner_uuid'])
+
+    def test_put_collection_with_named_project_link(self):
+        link_name = 'Test auto Collection Link'
+        collection = self.run_and_find_collection("Test named collection",
+                                      ['--portable-data-hash',
+                                       '--name', link_name,
+                                       '--project-uuid', self.PROJECT_UUID])
+        self.assertEqual(link_name, collection['name'])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_collections.py b/sdk/python/tests/test_collections.py
new file mode 100644 (file)
index 0000000..c991154
--- /dev/null
@@ -0,0 +1,810 @@
+# usage example:
+#
+# ARVADOS_API_TOKEN=abc ARVADOS_API_HOST=arvados.local python -m unittest discover
+
+import arvados
+import copy
+import hashlib
+import mock
+import os
+import pprint
+import re
+import tempfile
+import unittest
+
+import run_test_server
+import arvados_testutil as tutil
+
+class TestResumableWriter(arvados.ResumableCollectionWriter):
+    KEEP_BLOCK_SIZE = 1024  # PUT to Keep every 1K.
+
+    def current_state(self):
+        return self.dump_state(copy.deepcopy)
+
+
+class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
+                             tutil.ArvadosBaseTestCase):
+    MAIN_SERVER = {}
+
+    @classmethod
+    def setUpClass(cls):
+        super(ArvadosCollectionsTest, cls).setUpClass()
+        run_test_server.authorize_with('active')
+        cls.api_client = arvados.api('v1')
+        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
+                                             local_store=cls.local_store)
+
+    def write_foo_bar_baz(self):
+        cw = arvados.CollectionWriter(self.api_client)
+        self.assertEqual(cw.current_stream_name(), '.',
+                         'current_stream_name() should be "." now')
+        cw.set_current_file_name('foo.txt')
+        cw.write('foo')
+        self.assertEqual(cw.current_file_name(), 'foo.txt',
+                         'current_file_name() should be foo.txt now')
+        cw.start_new_file('bar.txt')
+        cw.write('bar')
+        cw.start_new_stream('baz')
+        cw.write('baz')
+        cw.set_current_file_name('baz.txt')
+        self.assertEqual(cw.manifest_text(),
+                         ". 3858f62230ac3c915f300c664312c63f+6 0:3:foo.txt 3:3:bar.txt\n" +
+                         "./baz 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\n",
+                         "wrong manifest: got {}".format(cw.manifest_text()))
+        cw.finish()
+        return cw.portable_data_hash()
+
+    def test_keep_local_store(self):
+        self.assertEqual(self.keep_client.put('foo'), 'acbd18db4cc2f85cedef654fccc4a4d8+3', 'wrong md5 hash from Keep.put')
+        self.assertEqual(self.keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3'), 'foo', 'wrong data from Keep.get')
+
+    def test_local_collection_writer(self):
+        self.assertEqual(self.write_foo_bar_baz(),
+                         '23ca013983d6239e98931cc779e68426+114',
+                         'wrong locator hash: ' + self.write_foo_bar_baz())
+
+    def test_local_collection_reader(self):
+        foobarbaz = self.write_foo_bar_baz()
+        cr = arvados.CollectionReader(
+            foobarbaz + '+Xzizzle', self.api_client)
+        got = []
+        for s in cr.all_streams():
+            for f in s.all_files():
+                got += [[f.size(), f.stream_name(), f.name(), f.read(2**26)]]
+        expected = [[3, '.', 'foo.txt', 'foo'],
+                    [3, '.', 'bar.txt', 'bar'],
+                    [3, './baz', 'baz.txt', 'baz']]
+        self.assertEqual(got,
+                         expected)
+        stream0 = cr.all_streams()[0]
+        self.assertEqual(stream0.readfrom(0, 0),
+                         '',
+                         'reading zero bytes should have returned empty string')
+        self.assertEqual(stream0.readfrom(0, 2**26),
+                         'foobar',
+                         'reading entire stream failed')
+        self.assertEqual(stream0.readfrom(2**26, 0),
+                         '',
+                         'reading zero bytes should have returned empty string')
+
+    def _test_subset(self, collection, expected):
+        cr = arvados.CollectionReader(collection, self.api_client)
+        for s in cr.all_streams():
+            for ex in expected:
+                if ex[0] == s:
+                    f = s.files()[ex[2]]
+                    got = [f.size(), f.stream_name(), f.name(), "".join(f.readall(2**26))]
+                    self.assertEqual(got,
+                                     ex,
+                                     'all_files|as_manifest did not preserve manifest contents: got %s expected %s' % (got, ex))
+
+    def test_collection_manifest_subset(self):
+        foobarbaz = self.write_foo_bar_baz()
+        self._test_subset(foobarbaz,
+                          [[3, '.',     'bar.txt', 'bar'],
+                           [3, '.',     'foo.txt', 'foo'],
+                           [3, './baz', 'baz.txt', 'baz']])
+        self._test_subset((". %s %s 0:3:foo.txt 3:3:bar.txt\n" %
+                           (self.keep_client.put("foo"),
+                            self.keep_client.put("bar"))),
+                          [[3, '.', 'bar.txt', 'bar'],
+                           [3, '.', 'foo.txt', 'foo']])
+        self._test_subset((". %s %s 0:2:fo.txt 2:4:obar.txt\n" %
+                           (self.keep_client.put("foo"),
+                            self.keep_client.put("bar"))),
+                          [[2, '.', 'fo.txt', 'fo'],
+                           [4, '.', 'obar.txt', 'obar']])
+        self._test_subset((". %s %s 0:2:fo.txt 2:0:zero.txt 2:2:ob.txt 4:2:ar.txt\n" %
+                           (self.keep_client.put("foo"),
+                            self.keep_client.put("bar"))),
+                          [[2, '.', 'ar.txt', 'ar'],
+                           [2, '.', 'fo.txt', 'fo'],
+                           [2, '.', 'ob.txt', 'ob'],
+                           [0, '.', 'zero.txt', '']])
+
+    def test_collection_empty_file(self):
+        cw = arvados.CollectionWriter(self.api_client)
+        cw.start_new_file('zero.txt')
+        cw.write('')
+
+        self.assertEqual(cw.manifest_text(), ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:zero.txt\n")
+        self.check_manifest_file_sizes(cw.manifest_text(), [0])
+        cw = arvados.CollectionWriter(self.api_client)
+        cw.start_new_file('zero.txt')
+        cw.write('')
+        cw.start_new_file('one.txt')
+        cw.write('1')
+        cw.start_new_stream('foo')
+        cw.start_new_file('zero.txt')
+        cw.write('')
+        self.check_manifest_file_sizes(cw.manifest_text(), [0,1,0])
+
+    def test_no_implicit_normalize(self):
+        cw = arvados.CollectionWriter(self.api_client)
+        cw.start_new_file('b')
+        cw.write('b')
+        cw.start_new_file('a')
+        cw.write('')
+        self.check_manifest_file_sizes(cw.manifest_text(), [1,0])
+        self.check_manifest_file_sizes(
+            arvados.CollectionReader(
+                cw.manifest_text()).manifest_text(normalize=True),
+            [0,1])
+
+    def check_manifest_file_sizes(self, manifest_text, expect_sizes):
+        cr = arvados.CollectionReader(manifest_text, self.api_client)
+        got_sizes = []
+        for f in cr.all_files():
+            got_sizes += [f.size()]
+        self.assertEqual(got_sizes, expect_sizes, "got wrong file sizes %s, expected %s" % (got_sizes, expect_sizes))
+
+    def test_normalized_collection(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        self.assertEqual(arvados.CollectionReader(m1, self.api_client).manifest_text(normalize=True),
+                         """. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt
+""")
+
+        m2 = """. 204e43b8a1185621ca55a94839582e6f+67108864 b9677abbac956bd3e86b1deb28dfac03+67108864 fc15aff2a762b13f521baf042140acec+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:227212247:var-GS000016015-ASM.tsv.bz2
+"""
+        self.assertEqual(arvados.CollectionReader(m2, self.api_client).manifest_text(normalize=True), m2)
+
+        m3 = """. 5348b82a029fd9e971a811ce1f71360b+43 3:40:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        self.assertEqual(arvados.CollectionReader(m3, self.api_client).manifest_text(normalize=True),
+                         """. 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 3:124:md5sum.txt
+""")
+
+        m4 = """. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+./foo 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar
+"""
+        self.assertEqual(arvados.CollectionReader(m4, self.api_client).manifest_text(normalize=True),
+                         """./foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+""")
+
+        m5 = """. 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+./foo 204e43b8a1185621ca55a94839582e6f+67108864 3:3:bar
+"""
+        self.assertEqual(arvados.CollectionReader(m5, self.api_client).manifest_text(normalize=True),
+                         """./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:6:bar
+./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz
+""")
+
+        with self.data_file('1000G_ref_manifest') as f6:
+            m6 = f6.read()
+            self.assertEqual(arvados.CollectionReader(m6, self.api_client).manifest_text(normalize=True), m6)
+
+        with self.data_file('jlake_manifest') as f7:
+            m7 = f7.read()
+            self.assertEqual(arvados.CollectionReader(m7, self.api_client).manifest_text(normalize=True), m7)
+
+        m8 = """./a\\040b\\040c 59ca0efa9f5633cb0371bbc0355478d8+13 0:13:hello\\040world.txt
+"""
+        self.assertEqual(arvados.CollectionReader(m8, self.api_client).manifest_text(normalize=True), m8)
+
+    def test_locators_and_ranges(self):
+        blocks2 = [['a', 10, 0],
+                  ['b', 10, 10],
+                  ['c', 10, 20],
+                  ['d', 10, 30],
+                  ['e', 10, 40],
+                  ['f', 10, 50]]
+
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  2,  2), [['a', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 12, 2), [['b', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 22, 2), [['c', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 32, 2), [['d', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 42, 2), [['e', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 52, 2), [['f', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 62, 2), [])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  0,  2), [['a', 10, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 10, 2), [['b', 10, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 20, 2), [['c', 10, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 30, 2), [['d', 10, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 40, 2), [['e', 10, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 50, 2), [['f', 10, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 60, 2), [])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, -2, 2), [])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks2,  9,  2), [['a', 10, 9, 1], ['b', 10, 0, 1]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 19, 2), [['b', 10, 9, 1], ['c', 10, 0, 1]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 29, 2), [['c', 10, 9, 1], ['d', 10, 0, 1]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 39, 2), [['d', 10, 9, 1], ['e', 10, 0, 1]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 49, 2), [['e', 10, 9, 1], ['f', 10, 0, 1]])
+        self.assertEqual(arvados.locators_and_ranges(blocks2, 59, 2), [['f', 10, 9, 1]])
+
+
+        blocks3 = [['a', 10, 0],
+                  ['b', 10, 10],
+                  ['c', 10, 20],
+                  ['d', 10, 30],
+                  ['e', 10, 40],
+                  ['f', 10, 50],
+                  ['g', 10, 60]]
+
+        self.assertEqual(arvados.locators_and_ranges(blocks3,  2,  2), [['a', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 12, 2), [['b', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 22, 2), [['c', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 32, 2), [['d', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 42, 2), [['e', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 52, 2), [['f', 10, 2, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks3, 62, 2), [['g', 10, 2, 2]])
+
+
+        blocks = [['a', 10, 0],
+                  ['b', 15, 10],
+                  ['c', 5, 25]]
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 0), [])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 5), [['a', 10, 0, 5]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 3, 5), [['a', 10, 3, 5]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 10), [['a', 10, 0, 10]])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 11), [['a', 10, 0, 10],
+                                                                      ['b', 15, 0, 1]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 11), [['a', 10, 1, 9],
+                                                                      ['b', 15, 0, 2]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 25), [['a', 10, 0, 10],
+                                                                      ['b', 15, 0, 15]])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 30), [['a', 10, 0, 10],
+                                                                      ['b', 15, 0, 15],
+                                                                      ['c', 5, 0, 5]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 1, 30), [['a', 10, 1, 9],
+                                                                      ['b', 15, 0, 15],
+                                                                      ['c', 5, 0, 5]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 0, 31), [['a', 10, 0, 10],
+                                                                      ['b', 15, 0, 15],
+                                                                      ['c', 5, 0, 5]])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 15, 5), [['b', 15, 5, 5]])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 17), [['a', 10, 8, 2],
+                                                                      ['b', 15, 0, 15]])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 8, 20), [['a', 10, 8, 2],
+                                                                      ['b', 15, 0, 15],
+                                                                      ['c', 5, 0, 3]])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 26, 2), [['c', 5, 1, 2]])
+
+        self.assertEqual(arvados.locators_and_ranges(blocks, 9, 15), [['a', 10, 9, 1],
+                                                                      ['b', 15, 0, 14]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 10, 15), [['b', 15, 0, 15]])
+        self.assertEqual(arvados.locators_and_ranges(blocks, 11, 15), [['b', 15, 1, 14],
+                                                                       ['c', 5, 0, 1]])
+
+    class MockKeep(object):
+        def __init__(self, content, num_retries=0):
+            self.content = content
+
+        def get(self, locator, num_retries=0):
+            return self.content[locator]
+
+    def test_stream_reader(self):
+        keepblocks = {'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+10': 'abcdefghij',
+                      'bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15': 'klmnopqrstuvwxy',
+                      'cccccccccccccccccccccccccccccccc+5': 'z0123'}
+        mk = self.MockKeep(keepblocks)
+
+        sr = arvados.StreamReader([".", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+10", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15", "cccccccccccccccccccccccccccccccc+5", "0:30:foo"], mk)
+
+        content = 'abcdefghijklmnopqrstuvwxyz0123456789'
+
+        self.assertEqual(sr.readfrom(0, 30), content[0:30])
+        self.assertEqual(sr.readfrom(2, 30), content[2:30])
+
+        self.assertEqual(sr.readfrom(2, 8), content[2:10])
+        self.assertEqual(sr.readfrom(0, 10), content[0:10])
+
+        self.assertEqual(sr.readfrom(0, 5), content[0:5])
+        self.assertEqual(sr.readfrom(5, 5), content[5:10])
+        self.assertEqual(sr.readfrom(10, 5), content[10:15])
+        self.assertEqual(sr.readfrom(15, 5), content[15:20])
+        self.assertEqual(sr.readfrom(20, 5), content[20:25])
+        self.assertEqual(sr.readfrom(25, 5), content[25:30])
+        self.assertEqual(sr.readfrom(30, 5), '')
+
+    def test_extract_file(self):
+        m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md7sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 47:80:md8sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt
+"""
+
+        m2 = arvados.CollectionReader(m1, self.api_client).manifest_text(normalize=True)
+
+        self.assertEqual(m2,
+                         ". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt 43:41:md6sum.txt 84:43:md7sum.txt 6:37:md8sum.txt 84:43:md8sum.txt 83:1:md9sum.txt 0:43:md9sum.txt 84:36:md9sum.txt\n")
+        files = arvados.CollectionReader(
+            m2, self.api_client).all_streams()[0].files()
+
+        self.assertEqual(files['md5sum.txt'].as_manifest(),
+                         ". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\n")
+        self.assertEqual(files['md6sum.txt'].as_manifest(),
+                         ". 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt\n")
+        self.assertEqual(files['md7sum.txt'].as_manifest(),
+                         ". 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md7sum.txt\n")
+        self.assertEqual(files['md9sum.txt'].as_manifest(),
+                         ". 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt\n")
+
+    def test_write_directory_tree(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(self.build_directory_tree(
+                ['basefile', 'subdir/subfile']))
+        self.assertEqual(cwriter.manifest_text(),
+                         """. c5110c5ac93202d8e0f9e381f22bac0f+8 0:8:basefile
+./subdir 1ca4dec89403084bf282ad31e6cf7972+14 0:14:subfile\n""")
+
+    def test_write_named_directory_tree(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(self.build_directory_tree(
+                ['basefile', 'subdir/subfile']), 'root')
+        self.assertEqual(
+            cwriter.manifest_text(),
+            """./root c5110c5ac93202d8e0f9e381f22bac0f+8 0:8:basefile
+./root/subdir 1ca4dec89403084bf282ad31e6cf7972+14 0:14:subfile\n""")
+
+    def test_write_directory_tree_in_one_stream(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(self.build_directory_tree(
+                ['basefile', 'subdir/subfile']), max_manifest_depth=0)
+        self.assertEqual(cwriter.manifest_text(),
+                         """. 4ace875ffdc6824a04950f06858f4465+22 0:8:basefile 8:14:subdir/subfile\n""")
+
+    def test_write_directory_tree_with_limited_recursion(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        cwriter.write_directory_tree(
+            self.build_directory_tree(['f1', 'd1/f2', 'd1/d2/f3']),
+            max_manifest_depth=1)
+        self.assertEqual(cwriter.manifest_text(),
+                         """. bd19836ddb62c11c55ab251ccaca5645+2 0:2:f1
+./d1 50170217e5b04312024aa5cd42934494+13 0:8:d2/f3 8:5:f2\n""")
+
+    def test_write_directory_tree_with_zero_recursion(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        content = 'd1/d2/f3d1/f2f1'
+        blockhash = hashlib.md5(content).hexdigest() + '+' + str(len(content))
+        cwriter.write_directory_tree(
+            self.build_directory_tree(['f1', 'd1/f2', 'd1/d2/f3']),
+            max_manifest_depth=0)
+        self.assertEqual(
+            cwriter.manifest_text(),
+            ". {} 0:8:d1/d2/f3 8:5:d1/f2 13:2:f1\n".format(blockhash))
+
+    def test_write_one_file(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name)
+            self.assertEqual(
+                cwriter.manifest_text(),
+                ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:{}\n".format(
+                    os.path.basename(testfile.name)))
+
+    def test_write_named_file(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'foo')
+            self.assertEqual(cwriter.manifest_text(),
+                             ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:foo\n")
+
+    def test_write_multiple_files(self):
+        cwriter = arvados.CollectionWriter(self.api_client)
+        for letter in 'ABC':
+            with self.make_test_file(letter) as testfile:
+                cwriter.write_file(testfile.name, letter)
+        self.assertEqual(
+            cwriter.manifest_text(),
+            ". 902fbdd2b1df0c4f70b4a5d23525e932+3 0:1:A 1:1:B 2:1:C\n")
+
+    def test_basic_resume(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            resumed = TestResumableWriter.from_state(cwriter.current_state())
+        self.assertEquals(cwriter.manifest_text(), resumed.manifest_text(),
+                          "resumed CollectionWriter had different manifest")
+
+    def test_resume_fails_when_missing_dependency(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+        self.assertRaises(arvados.errors.StaleWriterStateError,
+                          TestResumableWriter.from_state,
+                          cwriter.current_state())
+
+    def test_resume_fails_when_dependency_mtime_changed(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            os.utime(testfile.name, (0, 0))
+            self.assertRaises(arvados.errors.StaleWriterStateError,
+                              TestResumableWriter.from_state,
+                              cwriter.current_state())
+
+    def test_resume_fails_when_dependency_is_nonfile(self):
+        cwriter = TestResumableWriter()
+        cwriter.write_file('/dev/null', 'empty')
+        self.assertRaises(arvados.errors.StaleWriterStateError,
+                          TestResumableWriter.from_state,
+                          cwriter.current_state())
+
+    def test_resume_fails_when_dependency_size_changed(self):
+        cwriter = TestResumableWriter()
+        with self.make_test_file() as testfile:
+            cwriter.write_file(testfile.name, 'test')
+            orig_mtime = os.fstat(testfile.fileno()).st_mtime
+            testfile.write('extra')
+            testfile.flush()
+            os.utime(testfile.name, (orig_mtime, orig_mtime))
+            self.assertRaises(arvados.errors.StaleWriterStateError,
+                              TestResumableWriter.from_state,
+                              cwriter.current_state())
+
+    def test_resume_fails_with_expired_locator(self):
+        cwriter = TestResumableWriter()
+        state = cwriter.current_state()
+        # Add an expired locator to the state.
+        state['_current_stream_locators'].append(''.join([
+                    'a' * 32, '+1+A', 'b' * 40, '@', '10000000']))
+        self.assertRaises(arvados.errors.StaleWriterStateError,
+                          TestResumableWriter.from_state, state)
+
+    def test_arbitrary_objects_not_resumable(self):
+        cwriter = TestResumableWriter()
+        with open('/dev/null') as badfile:
+            self.assertRaises(arvados.errors.AssertionError,
+                              cwriter.write_file, badfile)
+
+    def test_arbitrary_writes_not_resumable(self):
+        cwriter = TestResumableWriter()
+        self.assertRaises(arvados.errors.AssertionError,
+                          cwriter.write, "badtext")
+
+    def test_read_arbitrary_data_with_collection_reader(self):
+        # arv-get relies on this to do "arv-get {keep-locator} -".
+        self.write_foo_bar_baz()
+        self.assertEqual(
+            'foobar',
+            arvados.CollectionReader(
+                '3858f62230ac3c915f300c664312c63f+6'
+                ).manifest_text())
+
+
+class CollectionTestMixin(object):
+    PROXY_RESPONSE = {
+        'items_available': 1,
+        'items': [{
+                'uuid': 'zzzzz-bi6l4-mockproxy012345',
+                'owner_uuid': 'zzzzz-tpzed-mockowner012345',
+                'service_host': tutil.TEST_HOST,
+                'service_port': 65535,
+                'service_ssl_flag': True,
+                'service_type': 'proxy',
+                }]}
+    API_COLLECTIONS = run_test_server.fixture('collections')
+    DEFAULT_COLLECTION = API_COLLECTIONS['foo_file']
+    DEFAULT_DATA_HASH = DEFAULT_COLLECTION['portable_data_hash']
+    DEFAULT_MANIFEST = DEFAULT_COLLECTION['manifest_text']
+    DEFAULT_UUID = DEFAULT_COLLECTION['uuid']
+    ALT_COLLECTION = API_COLLECTIONS['bar_file']
+    ALT_DATA_HASH = ALT_COLLECTION['portable_data_hash']
+    ALT_MANIFEST = ALT_COLLECTION['manifest_text']
+
+    def _mock_api_call(self, mock_method, code, body):
+        mock_method = mock_method().execute
+        if code == 200:
+            mock_method.return_value = body
+        else:
+            mock_method.side_effect = arvados.errors.ApiError(
+                tutil.fake_httplib2_response(code), "{}")
+
+    def mock_keep_services(self, api_mock, code, body):
+        self._mock_api_call(api_mock.keep_services().accessible, code, body)
+
+    def api_client_mock(self, code=200):
+        client = mock.MagicMock(name='api_client')
+        self.mock_keep_services(client, code, self.PROXY_RESPONSE)
+        return client
+
+
+@tutil.skip_sleep
+class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
+    def mock_get_collection(self, api_mock, code, body):
+        body = self.API_COLLECTIONS.get(body)
+        self._mock_api_call(api_mock.collections().get, code, body)
+
+    def api_client_mock(self, code=200):
+        client = super(CollectionReaderTestCase, self).api_client_mock(code)
+        self.mock_get_collection(client, code, 'foo_file')
+        return client
+
+    def test_init_no_default_retries(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        reader.manifest_text()
+        client.collections().get().execute.assert_called_with(num_retries=0)
+
+    def test_uuid_init_success(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client,
+                                          num_retries=3)
+        self.assertEqual(self.DEFAULT_COLLECTION['manifest_text'],
+                         reader.manifest_text())
+        client.collections().get().execute.assert_called_with(num_retries=3)
+
+    def test_uuid_init_failure_raises_api_error(self):
+        client = self.api_client_mock(500)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        with self.assertRaises(arvados.errors.ApiError):
+            reader.manifest_text()
+
+    def test_locator_init(self):
+        client = self.api_client_mock(200)
+        # Ensure Keep will not return anything if asked.
+        with tutil.mock_get_responses(None, 404):
+            reader = arvados.CollectionReader(self.DEFAULT_DATA_HASH,
+                                              api_client=client)
+            self.assertEqual(self.DEFAULT_MANIFEST, reader.manifest_text())
+
+    def test_locator_init_fallback_to_keep(self):
+        # crunch-job needs this to read manifests that have only ever
+        # been written to Keep.
+        client = self.api_client_mock(200)
+        self.mock_get_collection(client, 404, None)
+        with tutil.mock_get_responses(self.DEFAULT_MANIFEST, 200):
+            reader = arvados.CollectionReader(self.DEFAULT_DATA_HASH,
+                                              api_client=client)
+            self.assertEqual(self.DEFAULT_MANIFEST, reader.manifest_text())
+
+    def test_uuid_init_no_fallback_to_keep(self):
+        # Do not look up a collection UUID in Keep.
+        client = self.api_client_mock(404)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID,
+                                          api_client=client)
+        with tutil.mock_get_responses(self.DEFAULT_MANIFEST, 200):
+            with self.assertRaises(arvados.errors.ApiError):
+                reader.manifest_text()
+
+    def test_try_keep_first_if_permission_hint(self):
+        # To verify that CollectionReader tries Keep first here, we
+        # mock API server to return the wrong data.
+        client = self.api_client_mock(200)
+        with tutil.mock_get_responses(self.ALT_MANIFEST, 200):
+            self.assertEqual(
+                self.ALT_MANIFEST,
+                arvados.CollectionReader(
+                    self.ALT_DATA_HASH + '+Affffffffffffffffffffffffffffffffffffffff@fedcba98',
+                    api_client=client).manifest_text())
+
+    def test_init_num_retries_propagated(self):
+        # More of an integration test...
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client,
+                                          num_retries=3)
+        with tutil.mock_get_responses('foo', 500, 500, 200):
+            self.assertEqual('foo',
+                             ''.join(f.read(9) for f in reader.all_files()))
+
+    def test_read_nonnormalized_manifest_with_collection_reader(self):
+        # client should be able to use CollectionReader on a manifest without normalizing it
+        client = self.api_client_mock(500)
+        nonnormal = ". acbd18db4cc2f85cedef654fccc4a4d8+3+Aabadbadbee@abeebdee 0:3:foo.txt 1:0:bar.txt 0:3:foo.txt\n"
+        reader = arvados.CollectionReader(
+            nonnormal,
+            api_client=client, num_retries=0)
+        # Ensure stripped_manifest() doesn't mangle our manifest in
+        # any way other than stripping hints.
+        self.assertEqual(
+            re.sub('\+[^\d\s\+]+', '', nonnormal),
+            reader.stripped_manifest())
+        # Ensure stripped_manifest() didn't mutate our reader.
+        self.assertEqual(nonnormal, reader.manifest_text())
+        # Ensure the files appear in the order given in the manifest.
+        self.assertEqual(
+            [[6, '.', 'foo.txt'],
+             [0, '.', 'bar.txt']],
+            [[f.size(), f.stream_name(), f.name()]
+             for f in reader.all_streams()[0].all_files()])
+
+    def test_read_empty_collection(self):
+        client = self.api_client_mock(200)
+        self.mock_get_collection(client, 200, 'empty')
+        reader = arvados.CollectionReader('d41d8cd98f00b204e9800998ecf8427e+0',
+                                          api_client=client)
+        self.assertEqual('', reader.manifest_text())
+
+    def test_api_response(self):
+        client = self.api_client_mock()
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        self.assertEqual(self.DEFAULT_COLLECTION, reader.api_response())
+
+    def test_api_response_with_collection_from_keep(self):
+        client = self.api_client_mock()
+        self.mock_get_collection(client, 404, 'foo')
+        with tutil.mock_get_responses(self.DEFAULT_MANIFEST, 200):
+            reader = arvados.CollectionReader(self.DEFAULT_DATA_HASH,
+                                              api_client=client)
+            api_response = reader.api_response()
+        self.assertIsNone(api_response)
+
+    def check_open_file(self, coll_file, stream_name, file_name, file_size):
+        self.assertFalse(coll_file.closed, "returned file is not open")
+        self.assertEqual(stream_name, coll_file.stream_name())
+        self.assertEqual(file_name, coll_file.name())
+        self.assertEqual(file_size, coll_file.size())
+
+    def test_open_collection_file_one_argument(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        cfile = reader.open('./foo')
+        self.check_open_file(cfile, '.', 'foo', 3)
+
+    def test_open_collection_file_two_arguments(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        cfile = reader.open('.', 'foo')
+        self.check_open_file(cfile, '.', 'foo', 3)
+
+    def test_open_deep_file(self):
+        coll_name = 'collection_with_files_in_subdir'
+        client = self.api_client_mock(200)
+        self.mock_get_collection(client, 200, coll_name)
+        reader = arvados.CollectionReader(
+            self.API_COLLECTIONS[coll_name]['uuid'], api_client=client)
+        cfile = reader.open('./subdir2/subdir3/file2_in_subdir3.txt')
+        self.check_open_file(cfile, './subdir2/subdir3', 'file2_in_subdir3.txt',
+                             32)
+
+    def test_open_nonexistent_stream(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        self.assertRaises(ValueError, reader.open, './nonexistent', 'foo')
+
+    def test_open_nonexistent_file(self):
+        client = self.api_client_mock(200)
+        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)
+        self.assertRaises(ValueError, reader.open, '.', 'nonexistent')
+
+
+@tutil.skip_sleep
+class CollectionWriterTestCase(unittest.TestCase, CollectionTestMixin):
+    def mock_keep(self, body, *codes, **headers):
+        headers.setdefault('x-keep-replicas-stored', 2)
+        return tutil.mock_put_responses(body, *codes, **headers)
+
+    def foo_writer(self, **kwargs):
+        api_client = self.api_client_mock()
+        writer = arvados.CollectionWriter(api_client, **kwargs)
+        writer.start_new_file('foo')
+        writer.write('foo')
+        return writer
+
+    def test_write_whole_collection(self):
+        writer = self.foo_writer()
+        with self.mock_keep(self.DEFAULT_DATA_HASH, 200, 200):
+            self.assertEqual(self.DEFAULT_DATA_HASH, writer.finish())
+
+    def test_write_no_default(self):
+        writer = self.foo_writer()
+        with self.mock_keep(None, 500):
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                writer.finish()
+
+    def test_write_whole_collection_through_retries(self):
+        writer = self.foo_writer(num_retries=2)
+        with self.mock_keep(self.DEFAULT_DATA_HASH,
+                            500, 500, 200, 500, 500, 200):
+            self.assertEqual(self.DEFAULT_DATA_HASH, writer.finish())
+
+    def test_flush_data_retries(self):
+        writer = self.foo_writer(num_retries=2)
+        foo_hash = self.DEFAULT_MANIFEST.split()[1]
+        with self.mock_keep(foo_hash, 500, 200):
+            writer.flush_data()
+        self.assertEqual(self.DEFAULT_MANIFEST, writer.manifest_text())
+
+    def test_one_open(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('out') as out_file:
+            self.assertEqual('.', writer.current_stream_name())
+            self.assertEqual('out', writer.current_file_name())
+            out_file.write('test data')
+            data_loc = hashlib.md5('test data').hexdigest() + '+9'
+        self.assertTrue(out_file.closed, "writer file not closed after context")
+        self.assertRaises(ValueError, out_file.write, 'extra text')
+        with self.mock_keep(data_loc, 200) as keep_mock:
+            self.assertEqual(". {} 0:9:out\n".format(data_loc),
+                             writer.manifest_text())
+
+    def test_open_writelines(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('six') as out_file:
+            out_file.writelines(['12', '34', '56'])
+            data_loc = hashlib.md5('123456').hexdigest() + '+6'
+        with self.mock_keep(data_loc, 200) as keep_mock:
+            self.assertEqual(". {} 0:6:six\n".format(data_loc),
+                             writer.manifest_text())
+
+    def test_open_flush(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('flush_test') as out_file:
+            out_file.write('flush1')
+            data_loc1 = hashlib.md5('flush1').hexdigest() + '+6'
+            with self.mock_keep(data_loc1, 200) as keep_mock:
+                out_file.flush()
+            out_file.write('flush2')
+            data_loc2 = hashlib.md5('flush2').hexdigest() + '+6'
+        with self.mock_keep(data_loc2, 200) as keep_mock:
+            self.assertEqual(". {} {} 0:12:flush_test\n".format(data_loc1,
+                                                                data_loc2),
+                             writer.manifest_text())
+
+    def test_two_opens_same_stream(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('.', '1') as out_file:
+            out_file.write('1st')
+        with writer.open('.', '2') as out_file:
+            out_file.write('2nd')
+        data_loc = hashlib.md5('1st2nd').hexdigest() + '+6'
+        with self.mock_keep(data_loc, 200) as keep_mock:
+            self.assertEqual(". {} 0:3:1 3:3:2\n".format(data_loc),
+                             writer.manifest_text())
+
+    def test_two_opens_two_streams(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        with writer.open('file') as out_file:
+            out_file.write('file')
+            data_loc1 = hashlib.md5('file').hexdigest() + '+4'
+        with self.mock_keep(data_loc1, 200) as keep_mock:
+            with writer.open('./dir', 'indir') as out_file:
+                out_file.write('indir')
+                data_loc2 = hashlib.md5('indir').hexdigest() + '+5'
+        with self.mock_keep(data_loc2, 200) as keep_mock:
+            expected = ". {} 0:4:file\n./dir {} 0:5:indir\n".format(
+                data_loc1, data_loc2)
+            self.assertEqual(expected, writer.manifest_text())
+
+    def test_dup_open_fails(self):
+        client = self.api_client_mock()
+        writer = arvados.CollectionWriter(client)
+        file1 = writer.open('one')
+        self.assertRaises(arvados.errors.AssertionError, writer.open, 'two')
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_keep_client.py b/sdk/python/tests/test_keep_client.py
new file mode 100644 (file)
index 0000000..982e4b4
--- /dev/null
@@ -0,0 +1,523 @@
+import hashlib
+import mock
+import os
+import re
+import socket
+import unittest
+import urlparse
+
+import arvados
+import arvados.retry
+import arvados_testutil as tutil
+import run_test_server
+
+class KeepTestCase(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+
+    @classmethod
+    def setUpClass(cls):
+        super(KeepTestCase, cls).setUpClass()
+        run_test_server.authorize_with("admin")
+        cls.api_client = arvados.api('v1')
+        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
+                                             proxy='', local_store='')
+
+    def test_KeepBasicRWTest(self):
+        foo_locator = self.keep_client.put('foo')
+        self.assertRegexpMatches(
+            foo_locator,
+            '^acbd18db4cc2f85cedef654fccc4a4d8\+3',
+            'wrong md5 hash from Keep.put("foo"): ' + foo_locator)
+        self.assertEqual(self.keep_client.get(foo_locator),
+                         'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+    def test_KeepBinaryRWTest(self):
+        blob_str = '\xff\xfe\xf7\x00\x01\x02'
+        blob_locator = self.keep_client.put(blob_str)
+        self.assertRegexpMatches(
+            blob_locator,
+            '^7fc7c53b45e53926ba52821140fef396\+6',
+            ('wrong locator from Keep.put(<binarydata>):' + blob_locator))
+        self.assertEqual(self.keep_client.get(blob_locator),
+                         blob_str,
+                         'wrong content from Keep.get(md5(<binarydata>))')
+
+    def test_KeepLongBinaryRWTest(self):
+        blob_str = '\xff\xfe\xfd\xfc\x00\x01\x02\x03'
+        for i in range(0,23):
+            blob_str = blob_str + blob_str
+        blob_locator = self.keep_client.put(blob_str)
+        self.assertRegexpMatches(
+            blob_locator,
+            '^84d90fc0d8175dd5dcfab04b999bc956\+67108864',
+            ('wrong locator from Keep.put(<binarydata>): ' + blob_locator))
+        self.assertEqual(self.keep_client.get(blob_locator),
+                         blob_str,
+                         'wrong content from Keep.get(md5(<binarydata>))')
+
+    def test_KeepSingleCopyRWTest(self):
+        blob_str = '\xff\xfe\xfd\xfc\x00\x01\x02\x03'
+        blob_locator = self.keep_client.put(blob_str, copies=1)
+        self.assertRegexpMatches(
+            blob_locator,
+            '^c902006bc98a3eb4a3663b65ab4a6fab\+8',
+            ('wrong locator from Keep.put(<binarydata>): ' + blob_locator))
+        self.assertEqual(self.keep_client.get(blob_locator),
+                         blob_str,
+                         'wrong content from Keep.get(md5(<binarydata>))')
+
+    def test_KeepEmptyCollectionTest(self):
+        blob_locator = self.keep_client.put('', copies=1)
+        self.assertRegexpMatches(
+            blob_locator,
+            '^d41d8cd98f00b204e9800998ecf8427e\+0',
+            ('wrong locator from Keep.put(""): ' + blob_locator))
+
+
+class KeepPermissionTestCase(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {'blob_signing_key': 'abcdefghijk0123456789',
+                   'enforce_permissions': True}
+
+    def test_KeepBasicRWTest(self):
+        run_test_server.authorize_with('active')
+        keep_client = arvados.KeepClient()
+        foo_locator = keep_client.put('foo')
+        self.assertRegexpMatches(
+            foo_locator,
+            r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+            'invalid locator from Keep.put("foo"): ' + foo_locator)
+        self.assertEqual(keep_client.get(foo_locator),
+                         'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+        # GET with an unsigned locator => NotFound
+        bar_locator = keep_client.put('bar')
+        unsigned_bar_locator = "37b51d194a7513e45b56f6524f2d51f2+3"
+        self.assertRegexpMatches(
+            bar_locator,
+            r'^37b51d194a7513e45b56f6524f2d51f2\+3\+A[a-f0-9]+@[a-f0-9]+$',
+            'invalid locator from Keep.put("bar"): ' + bar_locator)
+        self.assertRaises(arvados.errors.NotFoundError,
+                          keep_client.get,
+                          unsigned_bar_locator)
+
+        # GET from a different user => NotFound
+        run_test_server.authorize_with('spectator')
+        self.assertRaises(arvados.errors.NotFoundError,
+                          arvados.Keep.get,
+                          bar_locator)
+
+        # Unauthenticated GET for a signed locator => NotFound
+        # Unauthenticated GET for an unsigned locator => NotFound
+        keep_client.api_token = ''
+        self.assertRaises(arvados.errors.NotFoundError,
+                          keep_client.get,
+                          bar_locator)
+        self.assertRaises(arvados.errors.NotFoundError,
+                          keep_client.get,
+                          unsigned_bar_locator)
+
+
+# KeepOptionalPermission: starts Keep with --permission-key-file
+# but not --enforce-permissions (i.e. generate signatures on PUT
+# requests, but do not require them for GET requests)
+#
+# All of these requests should succeed when permissions are optional:
+# * authenticated request, signed locator
+# * authenticated request, unsigned locator
+# * unauthenticated request, signed locator
+# * unauthenticated request, unsigned locator
+class KeepOptionalPermission(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {'blob_signing_key': 'abcdefghijk0123456789',
+                   'enforce_permissions': False}
+
+    @classmethod
+    def setUpClass(cls):
+        super(KeepOptionalPermission, cls).setUpClass()
+        run_test_server.authorize_with("admin")
+        cls.api_client = arvados.api('v1')
+
+    def setUp(self):
+        super(KeepOptionalPermission, self).setUp()
+        self.keep_client = arvados.KeepClient(api_client=self.api_client,
+                                              proxy='', local_store='')
+
+    def _put_foo_and_check(self):
+        signed_locator = self.keep_client.put('foo')
+        self.assertRegexpMatches(
+            signed_locator,
+            r'^acbd18db4cc2f85cedef654fccc4a4d8\+3\+A[a-f0-9]+@[a-f0-9]+$',
+            'invalid locator from Keep.put("foo"): ' + signed_locator)
+        return signed_locator
+
+    def test_KeepAuthenticatedSignedTest(self):
+        signed_locator = self._put_foo_and_check()
+        self.assertEqual(self.keep_client.get(signed_locator),
+                         'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+    def test_KeepAuthenticatedUnsignedTest(self):
+        signed_locator = self._put_foo_and_check()
+        self.assertEqual(self.keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8"),
+                         'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+    def test_KeepUnauthenticatedSignedTest(self):
+        # Check that signed GET requests work even when permissions
+        # enforcement is off.
+        signed_locator = self._put_foo_and_check()
+        self.keep_client.api_token = ''
+        self.assertEqual(self.keep_client.get(signed_locator),
+                         'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+    def test_KeepUnauthenticatedUnsignedTest(self):
+        # Since --enforce-permissions is not in effect, GET requests
+        # need not be authenticated.
+        signed_locator = self._put_foo_and_check()
+        self.keep_client.api_token = ''
+        self.assertEqual(self.keep_client.get("acbd18db4cc2f85cedef654fccc4a4d8"),
+                         'foo',
+                         'wrong content from Keep.get(md5("foo"))')
+
+
+class KeepProxyTestCase(run_test_server.TestCaseWithServers):
+    MAIN_SERVER = {}
+    KEEP_SERVER = {}
+    KEEP_PROXY_SERVER = {'auth': 'admin'}
+
+    @classmethod
+    def setUpClass(cls):
+        super(KeepProxyTestCase, cls).setUpClass()
+        cls.api_client = arvados.api('v1')
+
+    def tearDown(self):
+        arvados.config.settings().pop('ARVADOS_EXTERNAL_CLIENT', None)
+        super(KeepProxyTestCase, self).tearDown()
+
+    def test_KeepProxyTest1(self):
+        # Will use ARVADOS_KEEP_PROXY environment variable that is set by
+        # setUpClass().
+        keep_client = arvados.KeepClient(api_client=self.api_client,
+                                         local_store='')
+        baz_locator = keep_client.put('baz')
+        self.assertRegexpMatches(
+            baz_locator,
+            '^73feffa4b7f6bb68e44cf984c85f6e88\+3',
+            'wrong md5 hash from Keep.put("baz"): ' + baz_locator)
+        self.assertEqual(keep_client.get(baz_locator),
+                         'baz',
+                         'wrong content from Keep.get(md5("baz"))')
+        self.assertTrue(keep_client.using_proxy)
+
+    def test_KeepProxyTest2(self):
+        # Don't instantiate the proxy directly, but set the X-External-Client
+        # header.  The API server should direct us to the proxy.
+        arvados.config.settings()['ARVADOS_EXTERNAL_CLIENT'] = 'true'
+        keep_client = arvados.KeepClient(api_client=self.api_client,
+                                         proxy='', local_store='')
+        baz_locator = keep_client.put('baz2')
+        self.assertRegexpMatches(
+            baz_locator,
+            '^91f372a266fe2bf2823cb8ec7fda31ce\+4',
+            'wrong md5 hash from Keep.put("baz2"): ' + baz_locator)
+        self.assertEqual(keep_client.get(baz_locator),
+                         'baz2',
+                         'wrong content from Keep.get(md5("baz2"))')
+        self.assertTrue(keep_client.using_proxy)
+
+
+class KeepClientServiceTestCase(unittest.TestCase):
+    def mock_keep_services(self, *services):
+        api_client = mock.MagicMock(name='api_client')
+        api_client.keep_services().accessible().execute.return_value = {
+            'items_available': len(services),
+            'items': [{
+                    'uuid': 'zzzzz-bi6l4-{:015x}'.format(index),
+                    'owner_uuid': 'zzzzz-tpzed-000000000000000',
+                    'service_host': host,
+                    'service_port': port,
+                    'service_ssl_flag': ssl,
+                    'service_type': servtype,
+                    } for index, (host, port, ssl, servtype)
+                      in enumerate(services)],
+            }
+        return api_client
+
+    def mock_n_keep_disks(self, service_count):
+        return self.mock_keep_services(
+            *[("keep0x{:x}".format(index), 80, False, 'disk')
+              for index in range(service_count)])
+
+    def get_service_roots(self, *services):
+        api_client = self.mock_keep_services(*services)
+        keep_client = arvados.KeepClient(api_client=api_client)
+        services = keep_client.weighted_service_roots('000000')
+        return [urlparse.urlparse(url) for url in sorted(services)]
+
+    def test_ssl_flag_respected_in_roots(self):
+        services = self.get_service_roots(('keep', 10, False, 'disk'),
+                                          ('keep', 20, True, 'disk'))
+        self.assertEqual(10, services[0].port)
+        self.assertEqual('http', services[0].scheme)
+        self.assertEqual(20, services[1].port)
+        self.assertEqual('https', services[1].scheme)
+
+    def test_correct_ports_with_ipv6_addresses(self):
+        service = self.get_service_roots(('100::1', 10, True, 'proxy'))[0]
+        self.assertEqual('100::1', service.hostname)
+        self.assertEqual(10, service.port)
+
+    # test_get_timeout and test_put_timeout test that
+    # KeepClient.get and KeepClient.put use the appropriate timeouts
+    # when connected directly to a Keep server (i.e. non-proxy timeout)
+
+    def test_get_timeout(self):
+        api_client = self.mock_keep_services(('keep', 10, False, 'disk'))
+        keep_client = arvados.KeepClient(api_client=api_client)
+        force_timeout = [socket.timeout("timed out")]
+        with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
+            with self.assertRaises(arvados.errors.KeepReadError):
+                keep_client.get('ffffffffffffffffffffffffffffffff')
+            self.assertTrue(mock_request.called)
+            self.assertEqual(
+                arvados.KeepClient.DEFAULT_TIMEOUT,
+                mock_request.call_args[1]['timeout'])
+
+    def test_put_timeout(self):
+        api_client = self.mock_keep_services(('keep', 10, False, 'disk'))
+        keep_client = arvados.KeepClient(api_client=api_client)
+        force_timeout = [socket.timeout("timed out")]
+        with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                keep_client.put('foo')
+            self.assertTrue(mock_request.called)
+            self.assertEqual(
+                arvados.KeepClient.DEFAULT_TIMEOUT,
+                mock_request.call_args[1]['timeout'])
+
+    def test_proxy_get_timeout(self):
+        # Force a timeout, verifying that the requests.get or
+        # requests.put method was called with the proxy_timeout
+        # setting rather than the default timeout.
+        api_client = self.mock_keep_services(('keep', 10, False, 'proxy'))
+        keep_client = arvados.KeepClient(api_client=api_client)
+        force_timeout = [socket.timeout("timed out")]
+        with mock.patch('requests.get', side_effect=force_timeout) as mock_request:
+            with self.assertRaises(arvados.errors.KeepReadError):
+                keep_client.get('ffffffffffffffffffffffffffffffff')
+            self.assertTrue(mock_request.called)
+            self.assertEqual(
+                arvados.KeepClient.DEFAULT_PROXY_TIMEOUT,
+                mock_request.call_args[1]['timeout'])
+
+    def test_proxy_put_timeout(self):
+        # Force a timeout, verifying that the requests.get or
+        # requests.put method was called with the proxy_timeout
+        # setting rather than the default timeout.
+        api_client = self.mock_keep_services(('keep', 10, False, 'proxy'))
+        keep_client = arvados.KeepClient(api_client=api_client)
+        force_timeout = [socket.timeout("timed out")]
+        with mock.patch('requests.put', side_effect=force_timeout) as mock_request:
+            with self.assertRaises(arvados.errors.KeepWriteError):
+                keep_client.put('foo')
+            self.assertTrue(mock_request.called)
+            self.assertEqual(
+                arvados.KeepClient.DEFAULT_PROXY_TIMEOUT,
+                mock_request.call_args[1]['timeout'])
+
+    def test_probe_order_reference_set(self):
+        # expected_order[i] is the probe order for
+        # hash=md5(sprintf("%064x",i)) where there are 16 services
+        # with uuid sprintf("anything-%015x",j) with j in 0..15. E.g.,
+        # the first probe for the block consisting of 64 "0"
+        # characters is the service whose uuid is
+        # "zzzzz-bi6l4-000000000000003", so expected_order[0][0]=='3'.
+        expected_order = [
+            list('3eab2d5fc9681074'),
+            list('097dba52e648f1c3'),
+            list('c5b4e023f8a7d691'),
+            list('9d81c02e76a3bf54'),
+            ]
+        hashes = [
+            hashlib.md5("{:064x}".format(x)).hexdigest()
+            for x in range(len(expected_order))]
+        api_client = self.mock_n_keep_disks(16)
+        keep_client = arvados.KeepClient(api_client=api_client)
+        for i, hash in enumerate(hashes):
+            roots = keep_client.weighted_service_roots(hash)
+            got_order = [
+                re.search(r'//\[?keep0x([0-9a-f]+)', root).group(1)
+                for root in roots]
+            self.assertEqual(expected_order[i], got_order)
+
+    def test_probe_waste_adding_one_server(self):
+        hashes = [
+            hashlib.md5("{:064x}".format(x)).hexdigest() for x in range(100)]
+        initial_services = 12
+        api_client = self.mock_n_keep_disks(initial_services)
+        keep_client = arvados.KeepClient(api_client=api_client)
+        probes_before = [
+            keep_client.weighted_service_roots(hash) for hash in hashes]
+        for added_services in range(1, 12):
+            api_client = self.mock_n_keep_disks(initial_services+added_services)
+            keep_client = arvados.KeepClient(api_client=api_client)
+            total_penalty = 0
+            for hash_index in range(len(hashes)):
+                probe_after = keep_client.weighted_service_roots(
+                    hashes[hash_index])
+                penalty = probe_after.index(probes_before[hash_index][0])
+                self.assertLessEqual(penalty, added_services)
+                total_penalty += penalty
+            # Average penalty per block should not exceed
+            # N(added)/N(orig) by more than 20%, and should get closer
+            # to the ideal as we add data points.
+            expect_penalty = (
+                added_services *
+                len(hashes) / initial_services)
+            max_penalty = (
+                expect_penalty *
+                (120 - added_services)/100)
+            min_penalty = (
+                expect_penalty * 8/10)
+            self.assertTrue(
+                min_penalty <= total_penalty <= max_penalty,
+                "With {}+{} services, {} blocks, penalty {} but expected {}..{}".format(
+                    initial_services,
+                    added_services,
+                    len(hashes),
+                    total_penalty,
+                    min_penalty,
+                    max_penalty))
+
+
+class KeepClientRetryTestMixin(object):
+    # Testing with a local Keep store won't exercise the retry behavior.
+    # Instead, our strategy is:
+    # * Create a client with one proxy specified (pointed at a black
+    #   hole), so there's no need to instantiate an API client, and
+    #   all HTTP requests come from one place.
+    # * Mock httplib's request method to provide simulated responses.
+    # This lets us test the retry logic extensively without relying on any
+    # supporting servers, and prevents side effects in case something hiccups.
+    # To use this mixin, define DEFAULT_EXPECT, DEFAULT_EXCEPTION, and
+    # run_method().
+    #
+    # Test classes must define TEST_PATCHER to a method that mocks
+    # out appropriate methods in the client.
+
+    PROXY_ADDR = 'http://[%s]:65535/' % (tutil.TEST_HOST,)
+    TEST_DATA = 'testdata'
+    TEST_LOCATOR = 'ef654c40ab4f1747fc699915d4f70902+8'
+
+    def setUp(self):
+        self.client_kwargs = {'proxy': self.PROXY_ADDR, 'local_store': ''}
+
+    def new_client(self, **caller_kwargs):
+        kwargs = self.client_kwargs.copy()
+        kwargs.update(caller_kwargs)
+        return arvados.KeepClient(**kwargs)
+
+    def run_method(self, *args, **kwargs):
+        raise NotImplementedError("test subclasses must define run_method")
+
+    def check_success(self, expected=None, *args, **kwargs):
+        if expected is None:
+            expected = self.DEFAULT_EXPECT
+        self.assertEqual(expected, self.run_method(*args, **kwargs))
+
+    def check_exception(self, error_class=None, *args, **kwargs):
+        if error_class is None:
+            error_class = self.DEFAULT_EXCEPTION
+        self.assertRaises(error_class, self.run_method, *args, **kwargs)
+
+    def test_immediate_success(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 200):
+            self.check_success()
+
+    def test_retry_then_success(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
+            self.check_success(num_retries=3)
+
+    def test_no_default_retry(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
+            self.check_exception()
+
+    def test_no_retry_after_permanent_error(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 403, 200):
+            self.check_exception(num_retries=3)
+
+    def test_error_after_retries_exhausted(self):
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 500, 200):
+            self.check_exception(num_retries=1)
+
+    def test_num_retries_instance_fallback(self):
+        self.client_kwargs['num_retries'] = 3
+        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):
+            self.check_success()
+
+
+@tutil.skip_sleep
+class KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+    DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_DATA
+    DEFAULT_EXCEPTION = arvados.errors.KeepReadError
+    HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'
+    TEST_PATCHER = staticmethod(tutil.mock_get_responses)
+
+    def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,
+                   *args, **kwargs):
+        return self.new_client().get(locator, *args, **kwargs)
+
+    def test_specific_exception_when_not_found(self):
+        with tutil.mock_get_responses(self.DEFAULT_EXPECT, 404, 200):
+            self.check_exception(arvados.errors.NotFoundError, num_retries=3)
+
+    def test_general_exception_with_mixed_errors(self):
+        # get should raise a NotFoundError if no server returns the block,
+        # and a high threshold of servers report that it's not found.
+        # This test rigs up 50/50 disagreement between two servers, and
+        # checks that it does not become a NotFoundError.
+        client = self.new_client()
+        with tutil.mock_get_responses(self.DEFAULT_EXPECT, 404, 500):
+            with self.assertRaises(arvados.errors.KeepReadError) as exc_check:
+                client.get(self.HINTED_LOCATOR)
+            self.assertNotIsInstance(
+                exc_check.exception, arvados.errors.NotFoundError,
+                "mixed errors raised NotFoundError")
+
+    def test_hint_server_can_succeed_without_retries(self):
+        with tutil.mock_get_responses(self.DEFAULT_EXPECT, 404, 200, 500):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+    def test_try_next_server_after_timeout(self):
+        side_effects = [
+            socket.timeout("timed out"),
+            tutil.fake_requests_response(200, self.DEFAULT_EXPECT)]
+        with mock.patch('requests.get',
+                        side_effect=iter(side_effects)):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+    def test_retry_data_with_wrong_checksum(self):
+        side_effects = (tutil.fake_requests_response(200, s)
+                        for s in ['baddata', self.TEST_DATA])
+        with mock.patch('requests.get', side_effect=side_effects):
+            self.check_success(locator=self.HINTED_LOCATOR)
+
+
+@tutil.skip_sleep
+class KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase):
+    DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_LOCATOR
+    DEFAULT_EXCEPTION = arvados.errors.KeepWriteError
+    TEST_PATCHER = staticmethod(tutil.mock_put_responses)
+
+    def run_method(self, data=KeepClientRetryTestMixin.TEST_DATA,
+                   copies=1, *args, **kwargs):
+        return self.new_client().put(data, copies, *args, **kwargs)
+
+    def test_do_not_send_multiple_copies_to_same_server(self):
+        with tutil.mock_put_responses(self.DEFAULT_EXPECT, 200):
+            self.check_exception(copies=2, num_retries=3)
diff --git a/sdk/python/tests/test_keep_locator.py b/sdk/python/tests/test_keep_locator.py
new file mode 100644 (file)
index 0000000..a7e9cb1
--- /dev/null
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import datetime
+import itertools
+import random
+import unittest
+
+from arvados.keep import KeepLocator
+
+class ArvadosKeepLocatorTest(unittest.TestCase):
+    DEFAULT_TEST_COUNT = 10
+
+    def numstrs(fmtstr, base, exponent):
+        def genstrs(self, count=None):
+            return (fmtstr.format(random.randint(0, base ** exponent))
+                    for c in xrange(count or self.DEFAULT_TEST_COUNT))
+        return genstrs
+
+    checksums = numstrs('{:032x}', 16, 32)
+    sizes = numstrs('{:d}', 2, 26)
+    signatures = numstrs('{:040x}', 16, 40)
+    timestamps = numstrs('{:08x}', 16, 8)
+
+    def base_locators(self, count=DEFAULT_TEST_COUNT):
+        return ('+'.join(pair) for pair in
+                itertools.izip(self.checksums(count), self.sizes(count)))
+
+    def perm_hints(self, count=DEFAULT_TEST_COUNT):
+        for sig, ts in itertools.izip(self.signatures(count),
+                                      self.timestamps(count)):
+            yield 'A{}@{}'.format(sig, ts)
+
+    def test_good_locators_returned(self):
+        for hint_gens in [(), (self.sizes(),),
+                          (self.sizes(), self.perm_hints())]:
+            for loc_data in itertools.izip(self.checksums(), *hint_gens):
+                locator = '+'.join(loc_data)
+                self.assertEquals(locator, str(KeepLocator(locator)))
+
+    def test_nonchecksum_rejected(self):
+        for badstr in ['', 'badbadbad', '8f9e68d957b504a29ba76c526c3145dj',
+                       '+8f9e68d957b504a29ba76c526c3145d9',
+                       '3+8f9e68d957b504a29ba76c526c3145d9']:
+            self.assertRaises(ValueError, KeepLocator, badstr)
+
+    def test_unknown_hints_accepted(self):
+        base = next(self.base_locators(1))
+        for weirdhint in ['Zfoo', 'Ybar234', 'Xa@b_c-372', 'W99']:
+            locator = '+'.join([base, weirdhint])
+            self.assertEquals(locator, str(KeepLocator(locator)))
+
+    def test_bad_hints_rejected(self):
+        base = next(self.base_locators(1))
+        for badhint in ['', 'A', 'lowercase', '+32']:
+            self.assertRaises(ValueError, KeepLocator,
+                              '+'.join([base, badhint]))
+
+    def test_multiple_locator_hints_accepted(self):
+        base = next(self.base_locators(1))
+        for loc_hints in itertools.permutations(['Kab1cd', 'Kef2gh', 'Kij3kl']):
+            locator = '+'.join((base,) + loc_hints)
+            self.assertEquals(locator, str(KeepLocator(locator)))
+
+    def test_expiry_passed(self):
+        base = next(self.base_locators(1))
+        signature = next(self.signatures(1))
+        dt1980 = datetime.datetime(1980, 1, 1)
+        dt2000 = datetime.datetime(2000, 2, 2)
+        dt2080 = datetime.datetime(2080, 3, 3)
+        locator = KeepLocator(base)
+        self.assertFalse(locator.permission_expired())
+        self.assertFalse(locator.permission_expired(dt1980))
+        self.assertFalse(locator.permission_expired(dt2080))
+        # Timestamped to 1987-01-05 18:48:32.
+        locator = KeepLocator('{}+A{}@20000000'.format(base, signature))
+        self.assertTrue(locator.permission_expired())
+        self.assertTrue(locator.permission_expired(dt2000))
+        self.assertFalse(locator.permission_expired(dt1980))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_pipeline_template.py b/sdk/python/tests/test_pipeline_template.py
new file mode 100644 (file)
index 0000000..54539b0
--- /dev/null
@@ -0,0 +1,60 @@
+# usage example:
+#
+# ARVADOS_API_TOKEN=abc ARVADOS_API_HOST=arvados.local python -m unittest discover
+
+import unittest
+import arvados
+import apiclient
+import run_test_server
+
+class PipelineTemplateTest(unittest.TestCase):
+    def setUp(self):
+        run_test_server.run()
+
+    def runTest(self):
+        run_test_server.authorize_with("admin")
+        pt_uuid = arvados.api('v1', cache=False).pipeline_templates().create(
+            body={'name':__file__}
+            ).execute()['uuid']
+        self.assertEqual(len(pt_uuid), 27,
+                         'Unexpected format of pipeline template UUID ("%s")'
+                         % pt_uuid)
+        components = {
+            'x': 'x',
+            '-x-': [1,2,{'foo':'bar'}],
+            'Boggis': {'Bunce': '[\'Bean\']'},
+            'SpassBox': True,
+            'spass_box': False,
+            'spass-box': [True, 'Maybe', False]
+            }
+        update_response = arvados.api('v1', cache=False).pipeline_templates().update(
+            uuid=pt_uuid,
+            body={'components':components}
+            ).execute()
+        self.assertEqual('uuid' in update_response, True,
+                         'update() response did not include a uuid')
+        self.assertEqual(update_response['uuid'], pt_uuid,
+                         'update() response has a different uuid (%s, not %s)'
+                         % (update_response['uuid'], pt_uuid))
+        self.assertEqual(update_response['name'], __file__,
+                         'update() response has a different name (%s, not %s)'
+                         % (update_response['name'], __file__))
+        get_response = arvados.api('v1', cache=False).pipeline_templates().get(
+            uuid=pt_uuid
+            ).execute()
+        self.assertEqual(get_response['components'], components,
+                         'components got munged by server (%s -> %s)'
+                         % (components, update_response['components']))
+        delete_response = arvados.api('v1', cache=False).pipeline_templates().delete(
+            uuid=pt_uuid
+            ).execute()
+        self.assertEqual(delete_response['uuid'], pt_uuid,
+                         'delete() response has wrong uuid (%s, not %s)'
+                         % (delete_response['uuid'], pt_uuid))
+        with self.assertRaises(apiclient.errors.HttpError):
+            geterror_response = arvados.api('v1', cache=False).pipeline_templates().get(
+                uuid=pt_uuid
+                ).execute()
+
+    def tearDown(self):
+        run_test_server.stop()
diff --git a/sdk/python/tests/test_retry.py b/sdk/python/tests/test_retry.py
new file mode 100644 (file)
index 0000000..0c1110c
--- /dev/null
@@ -0,0 +1,225 @@
+#!/usr/bin/env python
+
+import itertools
+import unittest
+
+import arvados.errors as arv_error
+import arvados.retry as arv_retry
+import mock
+
+from arvados_testutil import fake_requests_response
+
+class RetryLoopTestMixin(object):
+    @staticmethod
+    def loop_success(result):
+        # During the tests, we use integers that look like HTTP status
+        # codes as loop results.  Then we define simplified HTTP
+        # heuristics here to decide whether the result is success (True),
+        # permanent failure (False), or temporary failure (None).
+        if result < 400:
+            return True
+        elif result < 500:
+            return False
+        else:
+            return None
+
+    def run_loop(self, num_retries, *results, **kwargs):
+        responses = itertools.chain(results, itertools.repeat(None))
+        retrier = arv_retry.RetryLoop(num_retries, self.loop_success,
+                                      **kwargs)
+        for tries_left, response in itertools.izip(retrier, responses):
+            retrier.save_result(response)
+        return retrier
+
+    def check_result(self, retrier, expect_success, last_code):
+        self.assertIs(retrier.success(), expect_success,
+                      "loop success flag is incorrect")
+        self.assertEqual(last_code, retrier.last_result())
+
+
+class RetryLoopTestCase(unittest.TestCase, RetryLoopTestMixin):
+    def test_zero_retries_and_success(self):
+        retrier = self.run_loop(0, 200)
+        self.check_result(retrier, True, 200)
+
+    def test_zero_retries_and_tempfail(self):
+        retrier = self.run_loop(0, 500, 501)
+        self.check_result(retrier, None, 500)
+
+    def test_zero_retries_and_permfail(self):
+        retrier = self.run_loop(0, 400, 201)
+        self.check_result(retrier, False, 400)
+
+    def test_one_retry_with_immediate_success(self):
+        retrier = self.run_loop(1, 200, 201)
+        self.check_result(retrier, True, 200)
+
+    def test_one_retry_with_delayed_success(self):
+        retrier = self.run_loop(1, 500, 201)
+        self.check_result(retrier, True, 201)
+
+    def test_one_retry_with_no_success(self):
+        retrier = self.run_loop(1, 500, 501, 502)
+        self.check_result(retrier, None, 501)
+
+    def test_one_retry_but_permfail(self):
+        retrier = self.run_loop(1, 400, 201)
+        self.check_result(retrier, False, 400)
+
+    def test_two_retries_with_immediate_success(self):
+        retrier = self.run_loop(2, 200, 201, 202)
+        self.check_result(retrier, True, 200)
+
+    def test_two_retries_with_success_after_one(self):
+        retrier = self.run_loop(2, 500, 201, 502)
+        self.check_result(retrier, True, 201)
+
+    def test_two_retries_with_success_after_two(self):
+        retrier = self.run_loop(2, 500, 501, 202, 503)
+        self.check_result(retrier, True, 202)
+
+    def test_two_retries_with_no_success(self):
+        retrier = self.run_loop(2, 500, 501, 502, 503)
+        self.check_result(retrier, None, 502)
+
+    def test_two_retries_with_permfail(self):
+        retrier = self.run_loop(2, 500, 401, 202)
+        self.check_result(retrier, False, 401)
+
+    def test_save_result_before_start_is_error(self):
+        retrier = arv_retry.RetryLoop(0)
+        self.assertRaises(arv_error.AssertionError, retrier.save_result, 1)
+
+    def test_save_result_after_end_is_error(self):
+        retrier = arv_retry.RetryLoop(0)
+        for count in retrier:
+            pass
+        self.assertRaises(arv_error.AssertionError, retrier.save_result, 1)
+
+
+@mock.patch('time.time', side_effect=itertools.count())
+@mock.patch('time.sleep')
+class RetryLoopBackoffTestCase(unittest.TestCase, RetryLoopTestMixin):
+    def run_loop(self, num_retries, *results, **kwargs):
+        kwargs.setdefault('backoff_start', 8)
+        return super(RetryLoopBackoffTestCase, self).run_loop(
+            num_retries, *results, **kwargs)
+
+    def check_backoff(self, sleep_mock, sleep_count, multiplier=1):
+        # Figure out how much time we actually spent sleeping.
+        sleep_times = [arglist[0][0] for arglist in sleep_mock.call_args_list
+                       if arglist[0][0] > 0]
+        self.assertEqual(sleep_count, len(sleep_times),
+                         "loop did not back off correctly")
+        last_wait = 0
+        for this_wait in sleep_times:
+            self.assertGreater(this_wait, last_wait * multiplier,
+                               "loop did not grow backoff times correctly")
+            last_wait = this_wait
+
+    def test_no_backoff_with_no_retries(self, sleep_mock, time_mock):
+        self.run_loop(0, 500, 201)
+        self.check_backoff(sleep_mock, 0)
+
+    def test_no_backoff_after_success(self, sleep_mock, time_mock):
+        self.run_loop(1, 200, 501)
+        self.check_backoff(sleep_mock, 0)
+
+    def test_no_backoff_after_permfail(self, sleep_mock, time_mock):
+        self.run_loop(1, 400, 201)
+        self.check_backoff(sleep_mock, 0)
+
+    def test_backoff_before_success(self, sleep_mock, time_mock):
+        self.run_loop(5, 500, 501, 502, 203, 504)
+        self.check_backoff(sleep_mock, 3)
+
+    def test_backoff_before_permfail(self, sleep_mock, time_mock):
+        self.run_loop(5, 500, 501, 502, 403, 504)
+        self.check_backoff(sleep_mock, 3)
+
+    def test_backoff_all_tempfail(self, sleep_mock, time_mock):
+        self.run_loop(3, 500, 501, 502, 503, 504)
+        self.check_backoff(sleep_mock, 3)
+
+    def test_backoff_multiplier(self, sleep_mock, time_mock):
+        self.run_loop(5, 500, 501, 502, 503, 504, 505,
+                      backoff_start=5, backoff_growth=10)
+        self.check_backoff(sleep_mock, 5, 9)
+
+
+class CheckHTTPResponseSuccessTestCase(unittest.TestCase):
+    def results_map(self, *codes):
+        for code in codes:
+            response = fake_requests_response(code, None)
+            yield code, arv_retry.check_http_response_success(response)
+
+    def check(assert_name):
+        def check_method(self, expected, *codes):
+            assert_func = getattr(self, assert_name)
+            for code, actual in self.results_map(*codes):
+                assert_func(expected, actual,
+                            "{} status flagged {}".format(code, actual))
+                if assert_name != 'assertIs':
+                    self.assertTrue(
+                        actual is True or actual is False or actual is None,
+                        "{} status returned {}".format(code, actual))
+        return check_method
+
+    check_is = check('assertIs')
+    check_is_not = check('assertIsNot')
+
+    def test_obvious_successes(self):
+        self.check_is(True, *range(200, 207))
+
+    def test_obvious_stops(self):
+        self.check_is(False, 424, 426, 428, 431,
+                      *range(400, 408) + range(410, 420))
+
+    def test_obvious_retries(self):
+        self.check_is(None, 500, 502, 503, 504)
+
+    def test_4xx_retries(self):
+        self.check_is(None, 408, 409, 422, 423)
+
+    def test_5xx_failures(self):
+        self.check_is(False, 501, *range(505, 512))
+
+    def test_1xx_not_retried(self):
+        self.check_is_not(None, 100, 101)
+
+    def test_redirects_not_retried(self):
+        self.check_is_not(None, *range(300, 309))
+
+    def test_wacky_code_retries(self):
+        self.check_is(None, 0, 99, 600, -200)
+
+
+class RetryMethodTestCase(unittest.TestCase):
+    class Tester(object):
+        def __init__(self):
+            self.num_retries = 1
+
+        @arv_retry.retry_method
+        def check(self, a, num_retries=None, z=0):
+            return (a, num_retries, z)
+
+
+    def test_positional_arg_passed(self):
+        self.assertEqual((3, 2, 0), self.Tester().check(3, 2))
+
+    def test_keyword_arg_passed(self):
+        self.assertEqual((4, 3, 0), self.Tester().check(num_retries=3, a=4))
+
+    def test_not_specified(self):
+        self.assertEqual((0, 1, 0), self.Tester().check(0))
+
+    def test_not_specified_with_other_kwargs(self):
+        self.assertEqual((1, 1, 1), self.Tester().check(1, z=1))
+
+    def test_bad_call(self):
+        with self.assertRaises(TypeError):
+            self.Tester().check(num_retries=2)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_sdk.py b/sdk/python/tests/test_sdk.py
new file mode 100644 (file)
index 0000000..3436a07
--- /dev/null
@@ -0,0 +1,43 @@
+import mock
+import os
+import unittest
+
+import arvados
+import arvados.collection
+
+class TestSDK(unittest.TestCase):
+
+    @mock.patch('arvados.api')
+    @mock.patch('arvados.current_task')
+    @mock.patch('arvados.current_job')
+    def test_one_task_per_input_file_normalize(self, mock_job, mock_task, mock_api):
+        # This manifest will be reduced from three lines to one when it is
+        # normalized.
+        nonnormalized_manifest = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
+. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
+. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt
+"""
+        dummy_hash = 'ffffffffffffffffffffffffffffffff+0'
+
+        mock_job.return_value = {
+            'uuid': 'none',
+            'script_parameters': {
+                'input': dummy_hash
+            }
+        }
+        mock_task.return_value = {
+            'uuid': 'none',
+            'sequence': 0,
+        }
+        # mock the API client to return a collection with a nonnormalized manifest.
+        mock_api('v1').collections().get().execute.return_value = {
+            'uuid': 'zzzzz-4zz18-mockcollection0',
+            'portable_data_hash': dummy_hash,
+            'manifest_text': nonnormalized_manifest,
+        }
+
+        # Because one_task_per_input_file normalizes this collection,
+        # it should now create only one job task and not three.
+        arvados.job_setup.one_task_per_input_file(and_end_task=False)
+        mock_api('v1').job_tasks().create().execute.assert_called_once_with()
+
diff --git a/sdk/python/tests/test_stream.py b/sdk/python/tests/test_stream.py
new file mode 100644 (file)
index 0000000..08a3d28
--- /dev/null
@@ -0,0 +1,277 @@
+#!/usr/bin/env python
+
+import bz2
+import gzip
+import io
+import mock
+import os
+import unittest
+
+import arvados
+from arvados import StreamReader, StreamFileReader
+
+import arvados_testutil as tutil
+import run_test_server
+
+class StreamFileReaderTestCase(unittest.TestCase):
+    def make_count_reader(self):
+        stream = tutil.MockStreamReader('.', '01234', '34567', '67890')
+        return StreamFileReader(stream, [[1, 3, 0], [6, 3, 3], [11, 3, 6]],
+                                'count.txt')
+
+    def test_read_returns_first_block(self):
+        # read() calls will be aligned on block boundaries - see #3663.
+        sfile = self.make_count_reader()
+        self.assertEqual('123', sfile.read(10))
+
+    def test_small_read(self):
+        sfile = self.make_count_reader()
+        self.assertEqual('12', sfile.read(2))
+
+    def test_successive_reads(self):
+        sfile = self.make_count_reader()
+        for expect in ['123', '456', '789', '']:
+            self.assertEqual(expect, sfile.read(10))
+
+    def test_readfrom_spans_blocks(self):
+        sfile = self.make_count_reader()
+        self.assertEqual('6789', sfile.readfrom(5, 12))
+
+    def test_small_readfrom_spanning_blocks(self):
+        sfile = self.make_count_reader()
+        self.assertEqual('2345', sfile.readfrom(1, 4))
+
+    def test_readall(self):
+        sfile = self.make_count_reader()
+        self.assertEqual('123456789', ''.join(sfile.readall()))
+
+    def test_one_arg_seek(self):
+        self.test_relative_seek([])
+
+    def test_absolute_seek(self, args=[os.SEEK_SET]):
+        sfile = self.make_count_reader()
+        sfile.seek(6, *args)
+        self.assertEqual('78', sfile.read(2))
+        sfile.seek(4, *args)
+        self.assertEqual('56', sfile.read(2))
+
+    def test_relative_seek(self, args=[os.SEEK_CUR]):
+        sfile = self.make_count_reader()
+        self.assertEqual('12', sfile.read(2))
+        sfile.seek(2, *args)
+        self.assertEqual('56', sfile.read(2))
+
+    def test_end_seek(self):
+        sfile = self.make_count_reader()
+        sfile.seek(-6, os.SEEK_END)
+        self.assertEqual('45', sfile.read(2))
+
+    def test_seek_min_zero(self):
+        sfile = self.make_count_reader()
+        sfile.seek(-2, os.SEEK_SET)
+        self.assertEqual(0, sfile.tell())
+
+    def test_seek_max_size(self):
+        sfile = self.make_count_reader()
+        sfile.seek(2, os.SEEK_END)
+        self.assertEqual(9, sfile.tell())
+
+    def test_size(self):
+        self.assertEqual(9, self.make_count_reader().size())
+
+    def test_tell_after_block_read(self):
+        sfile = self.make_count_reader()
+        sfile.read(5)
+        self.assertEqual(3, sfile.tell())
+
+    def test_tell_after_small_read(self):
+        sfile = self.make_count_reader()
+        sfile.read(1)
+        self.assertEqual(1, sfile.tell())
+
+    def test_no_read_after_close(self):
+        sfile = self.make_count_reader()
+        sfile.close()
+        self.assertRaises(ValueError, sfile.read, 2)
+
+    def test_context(self):
+        with self.make_count_reader() as sfile:
+            self.assertFalse(sfile.closed, "reader is closed inside context")
+            self.assertEqual('12', sfile.read(2))
+        self.assertTrue(sfile.closed, "reader is open after context")
+
+    def make_newlines_reader(self):
+        stream = tutil.MockStreamReader('.', 'one\ntwo\n\nth', 'ree\nfour\n\n')
+        return StreamFileReader(stream, [[0, 11, 0], [11, 10, 11]], 'count.txt')
+
+    def check_lines(self, actual):
+        self.assertEqual(['one\n', 'two\n', '\n', 'three\n', 'four\n', '\n'],
+                         actual)
+
+    def test_readline(self):
+        reader = self.make_newlines_reader()
+        actual = []
+        while True:
+            data = reader.readline()
+            if not data:
+                break
+            actual.append(data)
+        self.check_lines(actual)
+
+    def test_readlines(self):
+        self.check_lines(self.make_newlines_reader().readlines())
+
+    def test_iteration(self):
+        self.check_lines(list(iter(self.make_newlines_reader())))
+
+    def test_readline_size(self):
+        reader = self.make_newlines_reader()
+        self.assertEqual('on', reader.readline(2))
+        self.assertEqual('e\n', reader.readline(4))
+        self.assertEqual('two\n', reader.readline(6))
+        self.assertEqual('\n', reader.readline(8))
+        self.assertEqual('thre', reader.readline(4))
+
+    def test_readlines_sizehint(self):
+        result = self.make_newlines_reader().readlines(8)
+        self.assertEqual(['one\n', 'two\n'], result[:2])
+        self.assertNotIn('three\n', result)
+
+    def test_name_attribute(self):
+        # Test both .name and .name() (for backward compatibility)
+        stream = tutil.MockStreamReader()
+        sfile = StreamFileReader(stream, [[0, 0, 0]], 'nametest')
+        self.assertEqual('nametest', sfile.name)
+        self.assertEqual('nametest', sfile.name())
+
+    def check_decompression(self, compress_ext, compress_func):
+        test_text = 'decompression\ntest\n'
+        test_data = compress_func(test_text)
+        stream = tutil.MockStreamReader('.', test_data)
+        reader = StreamFileReader(stream, [[0, len(test_data), 0]],
+                                  'test.' + compress_ext)
+        self.assertEqual(test_text, ''.join(reader.readall_decompressed()))
+
+    @staticmethod
+    def gzip_compress(data):
+        compressed_data = io.BytesIO()
+        with gzip.GzipFile(fileobj=compressed_data, mode='wb') as gzip_file:
+            gzip_file.write(data)
+        return compressed_data.getvalue()
+
+    def test_no_decompression(self):
+        self.check_decompression('log', lambda s: s)
+
+    def test_gzip_decompression(self):
+        self.check_decompression('gz', self.gzip_compress)
+
+    def test_bz2_decompression(self):
+        self.check_decompression('bz2', bz2.compress)
+
+
+class StreamRetryTestMixin(object):
+    # Define reader_for(coll_name, **kwargs)
+    # and read_for_test(reader, size, **kwargs).
+    API_COLLECTIONS = run_test_server.fixture('collections')
+
+    def keep_client(self):
+        return arvados.KeepClient(proxy='http://[%s]:1' % (tutil.TEST_HOST,),
+                                  local_store='')
+
+    def manifest_for(self, coll_name):
+        return self.API_COLLECTIONS[coll_name]['manifest_text']
+
+    @tutil.skip_sleep
+    def test_success_without_retries(self):
+        reader = self.reader_for('bar_file')
+        with tutil.mock_get_responses('bar', 200):
+            self.assertEqual('bar', self.read_for_test(reader, 3))
+
+    @tutil.skip_sleep
+    def test_read_no_default_retry(self):
+        reader = self.reader_for('user_agreement')
+        with tutil.mock_get_responses('', 500):
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 10)
+
+    @tutil.skip_sleep
+    def test_read_with_instance_retries(self):
+        reader = self.reader_for('foo_file', num_retries=3)
+        with tutil.mock_get_responses('foo', 500, 200):
+            self.assertEqual('foo', self.read_for_test(reader, 3))
+
+    @tutil.skip_sleep
+    def test_read_with_method_retries(self):
+        reader = self.reader_for('foo_file')
+        with tutil.mock_get_responses('foo', 500, 200):
+            self.assertEqual('foo',
+                             self.read_for_test(reader, 3, num_retries=3))
+
+    @tutil.skip_sleep
+    def test_read_instance_retries_exhausted(self):
+        reader = self.reader_for('bar_file', num_retries=3)
+        with tutil.mock_get_responses('bar', 500, 500, 500, 500, 200):
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 3)
+
+    @tutil.skip_sleep
+    def test_read_method_retries_exhausted(self):
+        reader = self.reader_for('bar_file')
+        with tutil.mock_get_responses('bar', 500, 500, 500, 500, 200):
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 3, num_retries=3)
+
+    @tutil.skip_sleep
+    def test_method_retries_take_precedence(self):
+        reader = self.reader_for('user_agreement', num_retries=10)
+        with tutil.mock_get_responses('', 500, 500, 500, 200):
+            with self.assertRaises(arvados.errors.KeepReadError):
+                self.read_for_test(reader, 10, num_retries=1)
+
+
+class StreamReaderTestCase(unittest.TestCase, StreamRetryTestMixin):
+    def reader_for(self, coll_name, **kwargs):
+        return StreamReader(self.manifest_for(coll_name).split(),
+                            self.keep_client(), **kwargs)
+
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.readfrom(0, byte_count, **kwargs)
+
+    def test_manifest_text_without_keep_client(self):
+        mtext = self.manifest_for('multilevel_collection_1')
+        for line in mtext.rstrip('\n').split('\n'):
+            reader = StreamReader(line.split())
+            self.assertEqual(line + '\n', reader.manifest_text())
+
+
+class StreamFileReadTestCase(unittest.TestCase, StreamRetryTestMixin):
+    def reader_for(self, coll_name, **kwargs):
+        return StreamReader(self.manifest_for(coll_name).split(),
+                            self.keep_client(), **kwargs).all_files()[0]
+
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.read(byte_count, **kwargs)
+
+
+class StreamFileReadFromTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return reader.readfrom(0, byte_count, **kwargs)
+
+
+class StreamFileReadAllTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readall(**kwargs))
+
+
+class StreamFileReadAllDecompressedTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readall_decompressed(**kwargs))
+
+
+class StreamFileReadlinesTestCase(StreamFileReadTestCase):
+    def read_for_test(self, reader, byte_count, **kwargs):
+        return ''.join(reader.readlines(**kwargs))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/sdk/python/tests/test_util.py b/sdk/python/tests/test_util.py
new file mode 100644 (file)
index 0000000..f9e5d8c
--- /dev/null
@@ -0,0 +1,22 @@
+import unittest
+import os
+import arvados.util
+
+class MkdirDashPTest(unittest.TestCase):
+    def setUp(self):
+        try:
+            os.path.mkdir('./tmp')
+        except:
+            pass
+    def tearDown(self):
+        try:
+            os.unlink('./tmp/bar')
+            os.rmdir('./tmp/foo')
+            os.rmdir('./tmp')
+        except:
+            pass
+    def runTest(self):
+        arvados.util.mkdir_dash_p('./tmp/foo')
+        with open('./tmp/bar', 'wb') as f:
+            f.write('bar')
+        self.assertRaises(OSError, arvados.util.mkdir_dash_p, './tmp/bar')
diff --git a/sdk/python/tests/test_websockets.py b/sdk/python/tests/test_websockets.py
new file mode 100644 (file)
index 0000000..032ac51
--- /dev/null
@@ -0,0 +1,52 @@
+import run_test_server
+import unittest
+import arvados
+import arvados.events
+import threading
+
+class EventTestBase(object):
+    def on_event(self, ev):
+        if self.state == 1:
+            self.assertEqual(200, ev['status'])
+            self.state = 2
+            self.subscribed.set()
+        elif self.state == 2:
+            self.assertEqual(self.h[u'uuid'], ev[u'object_uuid'])
+            self.state = 3
+            self.done.set()
+        elif self.state == 3:
+            self.fail()
+
+    def runTest(self):
+        self.ws = None
+        self.state = 1
+        self.subscribed = threading.Event()
+        self.done = threading.Event()
+
+        run_test_server.authorize_with("admin")
+        api = arvados.api('v1', cache=False)
+        self.ws = arvados.events.subscribe(arvados.api('v1', cache=False), [['object_uuid', 'is_a', 'arvados#human']], self.on_event, poll_fallback=2)
+        self.assertIsInstance(self.ws, self.WS_TYPE)
+        self.subscribed.wait(10)
+        self.h = api.humans().create(body={}).execute()
+        self.done.wait(10)
+        self.assertEqual(3, self.state)
+
+class WebsocketTest(run_test_server.TestCaseWithServers, EventTestBase):
+    MAIN_SERVER = {'websockets': True}
+    WS_TYPE = arvados.events.EventClient
+
+    def tearDown(self):
+        if self.ws:
+            self.ws.close()
+        super(WebsocketTest, self).tearDown()
+
+
+class PollClientTest(run_test_server.TestCaseWithServers, EventTestBase):
+    MAIN_SERVER = {}
+    WS_TYPE = arvados.events.PollClient
+
+    def tearDown(self):
+        if self.ws:
+            self.ws.close()
+        super(PollClientTest, self).tearDown()
diff --git a/sdk/ruby/.gitignore b/sdk/ruby/.gitignore
new file mode 100644 (file)
index 0000000..1a58eb0
--- /dev/null
@@ -0,0 +1,2 @@
+Gemfile.lock
+arvados*gem
diff --git a/sdk/ruby/Gemfile b/sdk/ruby/Gemfile
new file mode 100644 (file)
index 0000000..8f441da
--- /dev/null
@@ -0,0 +1,4 @@
+source 'https://rubygems.org'
+gemspec
+gem 'rake'
+gem 'minitest', '>= 5.0.0'
diff --git a/sdk/ruby/README b/sdk/ruby/README
new file mode 100644 (file)
index 0000000..f72a3d1
--- /dev/null
@@ -0,0 +1,28 @@
+
+This directory contains contains the Ruby SDK.
+
+## Installation instructions
+
+You can build the gem with the following command:
+
+  gem build arvados.gemspec
+
+and install it like this:
+
+  gem install ./arvados-0.1.0.gem
+
+## Code example
+
+#!/usr/bin/env ruby
+
+ENV['ARVADOS_API_HOST'] = 'arvados.local'
+ENV['ARVADOS_API_TOKEN'] = 'qwertyuiopasdfghjklzxcvbnm1234567890abcdefghijklmn'
+
+require 'arvados'
+arv = Arvados.new( { :suppress_ssl_warnings => false } )
+
+pt_list = arv.pipeline_template.list(where:{})
+puts pt_list[:items].first.inspect
+
+pt = arv.pipeline_template.get(uuid:"9zb4a-p5p6p-fkkbrl98u3pk87m")
+puts pt.inspect
diff --git a/sdk/ruby/Rakefile b/sdk/ruby/Rakefile
new file mode 100644 (file)
index 0000000..cf4652f
--- /dev/null
@@ -0,0 +1,8 @@
+require 'rake/testtask'
+
+Rake::TestTask.new do |t|
+  t.libs << 'test'
+end
+
+desc 'Run tests'
+task default: :test
diff --git a/sdk/ruby/arvados.gemspec b/sdk/ruby/arvados.gemspec
new file mode 100644 (file)
index 0000000..5361e03
--- /dev/null
@@ -0,0 +1,27 @@
+if not File.exists?('/usr/bin/git') then
+  STDERR.puts "\nGit binary not found, aborting. Please install git and run gem build from a checked out copy of the git repository.\n\n"
+  exit
+end
+
+git_timestamp, git_hash = `git log -n1 --first-parent --format=%ct:%H .`.chomp.split(":")
+git_timestamp = Time.at(git_timestamp.to_i).utc
+
+Gem::Specification.new do |s|
+  s.name        = 'arvados'
+  s.version     = "0.1.#{git_timestamp.strftime('%Y%m%d%H%M%S')}"
+  s.date        = git_timestamp.strftime("%Y-%m-%d")
+  s.summary     = "Arvados client library"
+  s.description = "Arvados client library, git commit #{git_hash}"
+  s.authors     = ["Arvados Authors"]
+  s.email       = 'gem-dev@curoverse.com'
+  s.licenses    = ['Apache License, Version 2.0']
+  s.files       = ["lib/arvados.rb", "lib/arvados/keep.rb"]
+  s.required_ruby_version = '>= 2.1.0'
+  s.add_dependency('google-api-client', '~> 0.6.3', '>= 0.6.3')
+  s.add_dependency('activesupport', '>= 3.2.13')
+  s.add_dependency('json', '~> 1.7', '>= 1.7.7')
+  s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
+  s.add_runtime_dependency('jwt', '>= 0.1.5', '< 1.0.0')
+  s.homepage    =
+    'https://arvados.org'
+end
diff --git a/sdk/ruby/lib/arvados.rb b/sdk/ruby/lib/arvados.rb
new file mode 100644 (file)
index 0000000..a6ebc36
--- /dev/null
@@ -0,0 +1,292 @@
+require 'rubygems'
+require 'google/api_client'
+require 'active_support/inflector'
+require 'json'
+require 'fileutils'
+require 'andand'
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.irregular 'specimen', 'specimens'
+  inflect.irregular 'human', 'humans'
+end
+
+module Kernel
+  def suppress_warnings
+    original_verbosity = $VERBOSE
+    $VERBOSE = nil
+    result = yield
+    $VERBOSE = original_verbosity
+    return result
+  end
+end
+
+class Arvados
+
+  class TransactionFailedError < StandardError
+  end
+
+  @@config = nil
+  @@debuglevel = 0
+  class << self
+    attr_accessor :debuglevel
+  end
+
+  def initialize(opts={})
+    @application_version ||= 0.0
+    @application_name ||= File.split($0).last
+
+    @arvados_api_version = opts[:api_version] || 'v1'
+
+    @arvados_api_host = opts[:api_host] ||
+      config['ARVADOS_API_HOST'] or
+      raise "#{$0}: no :api_host or ENV[ARVADOS_API_HOST] provided."
+    @arvados_api_token = opts[:api_token] ||
+      config['ARVADOS_API_TOKEN'] or
+      raise "#{$0}: no :api_token or ENV[ARVADOS_API_TOKEN] provided."
+
+    if (opts[:suppress_ssl_warnings] or
+        %w(1 true yes).index(config['ARVADOS_API_HOST_INSECURE'].
+                             andand.downcase))
+      suppress_warnings do
+        OpenSSL::SSL.const_set 'VERIFY_PEER', OpenSSL::SSL::VERIFY_NONE
+      end
+    end
+
+    # Define a class and an Arvados instance method for each Arvados
+    # resource. After this, self.job will return Arvados::Job;
+    # self.job.new() and self.job.find() will do what you want.
+    _arvados = self
+    namespace_class = Arvados.const_set "A#{self.object_id}", Class.new
+    self.arvados_api.schemas.each do |classname, schema|
+      next if classname.match /List$/
+      klass = Class.new(Arvados::Model) do
+        def self.arvados
+          @arvados
+        end
+        def self.api_models_sym
+          @api_models_sym
+        end
+        def self.api_model_sym
+          @api_model_sym
+        end
+      end
+
+      # Define the resource methods (create, get, update, delete, ...)
+      self.
+        arvados_api.
+        send(classname.underscore.split('/').last.pluralize.to_sym).
+        discovered_methods.
+        each do |method|
+        class << klass; self; end.class_eval do
+          define_method method.name do |*params|
+            self.api_exec method, *params
+          end
+        end
+      end
+
+      # Give the new class access to the API
+      klass.instance_eval do
+        @arvados = _arvados
+        # TODO: Pull these from the discovery document instead.
+        @api_models_sym = classname.underscore.split('/').last.pluralize.to_sym
+        @api_model_sym = classname.underscore.split('/').last.to_sym
+      end
+
+      # Create the new class in namespace_class so it doesn't
+      # interfere with classes created by other Arvados objects. The
+      # result looks like Arvados::A26949680::Job.
+      namespace_class.const_set classname, klass
+
+      self.class.class_eval do
+        define_method classname.underscore do
+          klass
+        end
+      end
+    end
+  end
+
+  class Google::APIClient
+    def discovery_document(api, version)
+      api = api.to_s
+      discovery_uri = self.discovery_uri(api, version)
+      discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)
+      return @discovery_documents[discovery_uri_hash] ||=
+        begin
+          # fetch new API discovery doc if stale
+          cached_doc = File.expand_path "~/.cache/arvados/discovery-#{discovery_uri_hash}.json" rescue nil
+          if cached_doc.nil? or not File.exist?(cached_doc) or (Time.now - File.mtime(cached_doc)) > 86400
+            response = self.execute!(:http_method => :get,
+                                     :uri => discovery_uri,
+                                     :authenticated => false)
+            begin
+              FileUtils.makedirs(File.dirname cached_doc)
+              File.open(cached_doc, 'w') do |f|
+                f.puts response.body
+              end
+            rescue
+              return JSON.load response.body
+            end
+          end
+
+          File.open(cached_doc) { |f| JSON.load f }
+        end
+    end
+  end
+
+  def client
+    @client ||= Google::APIClient.
+      new(:host => @arvados_api_host,
+          :application_name => @application_name,
+          :application_version => @application_version.to_s)
+  end
+
+  def arvados_api
+    @arvados_api ||= self.client.discovered_api('arvados', @arvados_api_version)
+  end
+
+  def self.debuglog(message, verbosity=1)
+    $stderr.puts "#{File.split($0).last} #{$$}: #{message}" if @@debuglevel >= verbosity
+  end
+
+  def debuglog *args
+    self.class.debuglog *args
+  end
+
+  def config(config_file_path="~/.config/arvados/settings.conf")
+    return @@config if @@config
+
+    # Initialize config settings with environment variables.
+    config = {}
+    config['ARVADOS_API_HOST']          = ENV['ARVADOS_API_HOST']
+    config['ARVADOS_API_TOKEN']         = ENV['ARVADOS_API_TOKEN']
+    config['ARVADOS_API_HOST_INSECURE'] = ENV['ARVADOS_API_HOST_INSECURE']
+
+    if config['ARVADOS_API_HOST'] and config['ARVADOS_API_TOKEN']
+      # Environment variables take precedence over the config file, so
+      # there is no point reading the config file. If the environment
+      # specifies a _HOST without asking for _INSECURE, we certainly
+      # shouldn't give the config file a chance to create a
+      # system-wide _INSECURE state for this user.
+      #
+      # Note: If we start using additional configuration settings from
+      # this file in the future, we might have to read the file anyway
+      # instead of returning here.
+      return (@@config = config)
+    end
+
+    begin
+      expanded_path = File.expand_path config_file_path
+      if File.exist? expanded_path
+        # Load settings from the config file.
+        lineno = 0
+        File.open(expanded_path).each do |line|
+          lineno = lineno + 1
+          # skip comments and blank lines
+          next if line.match('^\s*#') or not line.match('\S')
+          var, val = line.chomp.split('=', 2)
+          var.strip!
+          val.strip!
+          # allow environment settings to override config files.
+          if !var.empty? and val
+            config[var] ||= val
+          else
+            debuglog "#{expanded_path}: #{lineno}: could not parse `#{line}'", 0
+          end
+        end
+      end
+    rescue StandardError => e
+      debuglog "Ignoring error reading #{config_file_path}: #{e}", 0
+    end
+
+    @@config = config
+  end
+
+  class Model
+    def self.arvados_api
+      arvados.arvados_api
+    end
+    def self.client
+      arvados.client
+    end
+    def self.debuglog(*args)
+      arvados.class.debuglog *args
+    end
+    def debuglog(*args)
+      self.class.arvados.class.debuglog *args
+    end
+    def self.api_exec(method, parameters={})
+      api_method = arvados_api.send(api_models_sym).send(method.name.to_sym)
+      parameters.each do |k,v|
+        parameters[k] = v.to_json if v.is_a? Array or v.is_a? Hash
+      end
+      # Look for objects expected by request.properties.(key).$ref and
+      # move them from parameters (query string) to request body.
+      body = nil
+      method.discovery_document['request'].
+        andand['properties'].
+        andand.each do |k,v|
+        if v.is_a? Hash and v['$ref']
+          body ||= {}
+          body[k] = parameters.delete k.to_sym
+        end
+      end
+      result = client.
+        execute(:api_method => api_method,
+                :authenticated => false,
+                :parameters => parameters,
+                :body_object => body,
+                :headers => {
+                  authorization: 'OAuth2 '+arvados.config['ARVADOS_API_TOKEN']
+                })
+      resp = JSON.parse result.body, :symbolize_names => true
+      if resp[:errors]
+        raise Arvados::TransactionFailedError.new(resp[:errors])
+      elsif resp[:uuid] and resp[:etag]
+        self.new(resp)
+      elsif resp[:items].is_a? Array
+        resp.merge(items: resp[:items].collect do |i|
+                     self.new(i)
+                   end)
+      else
+        resp
+      end
+    end
+
+    def []=(x,y)
+      @attributes_to_update[x] = y
+      @attributes[x] = y
+    end
+    def [](x)
+      if @attributes[x].is_a? Hash or @attributes[x].is_a? Array
+        # We won't be notified via []= if these change, so we'll just
+        # assume they are going to get changed, and submit them if
+        # save() is called.
+        @attributes_to_update[x] = @attributes[x]
+      end
+      @attributes[x]
+    end
+    def save
+      @attributes_to_update.keys.each do |k|
+        @attributes_to_update[k] = @attributes[k]
+      end
+      j = self.class.api_exec :update, {
+        :uuid => @attributes[:uuid],
+        self.class.api_model_sym => @attributes_to_update.to_json
+      }
+      unless j.respond_to? :[] and j[:uuid]
+        debuglog "Failed to save #{self.to_s}: #{j[:errors] rescue nil}", 0
+        nil
+      else
+        @attributes_to_update = {}
+        @attributes = j
+      end
+    end
+
+    protected
+
+    def initialize(j)
+      @attributes_to_update = {}
+      @attributes = j
+    end
+  end
+end
diff --git a/sdk/ruby/lib/arvados/keep.rb b/sdk/ruby/lib/arvados/keep.rb
new file mode 100644 (file)
index 0000000..acf8099
--- /dev/null
@@ -0,0 +1,190 @@
+module Keep
+  class Locator
+    # A Locator is used to parse and manipulate Keep locator strings.
+    #
+    # Locators obey the following syntax:
+    #
+    #   locator      ::= address hint*
+    #   address      ::= digest size-hint
+    #   digest       ::= <32 hexadecimal digits>
+    #   size-hint    ::= "+" [0-9]+
+    #   hint         ::= "+" hint-type hint-content
+    #   hint-type    ::= [A-Z]
+    #   hint-content ::= [A-Za-z0-9@_-]+
+    #
+    # Individual hints may have their own required format:
+    #
+    #   sign-hint      ::= "+A" <40 lowercase hex digits> "@" sign-timestamp
+    #   sign-timestamp ::= <8 lowercase hex digits>
+    attr_reader :hash, :hints, :size
+
+    def initialize(hasharg, sizearg, hintarg)
+      @hash = hasharg
+      @size = sizearg
+      @hints = hintarg
+    end
+
+    # Locator.parse returns a Locator object parsed from the string tok.
+    # Returns nil if tok could not be parsed as a valid locator.
+    def self.parse(tok)
+      begin
+        Locator.parse!(tok)
+      rescue ArgumentError => e
+        nil
+      end
+    end
+
+    # Locator.parse! returns a Locator object parsed from the string tok,
+    # raising an ArgumentError if tok cannot be parsed.
+    def self.parse!(tok)
+      if tok.nil? or tok.empty?
+        raise ArgumentError.new "locator is nil or empty"
+      end
+
+      m = /^([[:xdigit:]]{32})(\+([[:digit:]]+))?(\+([[:upper:]][[:alnum:]+@_-]*))?$/.match(tok.strip)
+      unless m
+        raise ArgumentError.new "not a valid locator #{tok}"
+      end
+
+      tokhash, _, toksize, _, trailer = m[1..5]
+      tokhints = []
+      if trailer
+        trailer.split('+').each do |hint|
+          if hint =~ /^[[:upper:]][[:alnum:]@_-]+$/
+            tokhints.push(hint)
+          else
+            raise ArgumentError.new "unknown hint #{hint}"
+          end
+        end
+      end
+
+      Locator.new(tokhash, toksize, tokhints)
+    end
+
+    # Returns the signature hint supplied with this locator,
+    # or nil if the locator was not signed.
+    def signature
+      @hints.grep(/^A/).first
+    end
+
+    # Returns an unsigned Locator.
+    def without_signature
+      Locator.new(@hash, @size, @hints.reject { |o| o.start_with?("A") })
+    end
+
+    def strip_hints
+      Locator.new(@hash, @size, [])
+    end
+
+    def strip_hints!
+      @hints = []
+      self
+    end
+
+    def to_s
+      if @size
+        [ @hash, @size, *@hints ].join('+')
+      else
+        [ @hash, *@hints ].join('+')
+      end
+    end
+  end
+
+  class Manifest
+    # Class to parse a manifest text and provide common views of that data.
+    def initialize(manifest_text)
+      @text = manifest_text
+      @files = nil
+    end
+
+    def each_line
+      return to_enum(__method__) unless block_given?
+      @text.each_line do |line|
+        tokens = line.split
+        next if tokens.empty?
+        stream_name = unescape(tokens.shift)
+        blocks = []
+        while loc = Locator.parse(tokens.first)
+          blocks << loc
+          tokens.shift
+        end
+        yield [stream_name, blocks, tokens.map { |s| unescape(s) }]
+      end
+    end
+
+    def unescape(s)
+      # Parse backslash escapes in a Keep manifest stream or file name.
+      s.gsub(/\\(\\|[0-7]{3})/) do |_|
+        case $1
+        when '\\'
+          '\\'
+        else
+          $1.to_i(8).chr
+        end
+      end
+    end
+
+    def each_file_spec(speclist)
+      return to_enum(__method__, speclist) unless block_given?
+      speclist.each do |filespec|
+        start_pos, filesize, filename = filespec.split(':', 3)
+        yield [start_pos.to_i, filesize.to_i, filename]
+      end
+    end
+
+    def files
+      if @files.nil?
+        file_sizes = Hash.new(0)
+        each_line do |streamname, blocklist, filelist|
+          each_file_spec(filelist) do |_, filesize, filename|
+            file_sizes[[streamname, filename]] += filesize
+          end
+        end
+        @files = file_sizes.each_pair.map do |(streamname, filename), size|
+          [streamname, filename, size]
+        end
+      end
+      @files
+    end
+
+    def files_count(stop_after=nil)
+      # Return the number of files represented in this manifest.
+      # If stop_after is provided, files_count will read the manifest
+      # incrementally, and return immediately when it counts that number of
+      # files.  This can help you avoid parsing the entire manifest if you
+      # just want to check if a small number of files are specified.
+      if stop_after.nil? or not @files.nil?
+        return files.size
+      end
+      seen_files = {}
+      each_line do |streamname, blocklist, filelist|
+        each_file_spec(filelist) do |_, _, filename|
+          seen_files[[streamname, filename]] = true
+          return stop_after if (seen_files.size >= stop_after)
+        end
+      end
+      seen_files.size
+    end
+
+    def exact_file_count?(want_count)
+      files_count(want_count + 1) == want_count
+    end
+
+    def minimum_file_count?(want_count)
+      files_count(want_count) >= want_count
+    end
+
+    def has_file?(want_stream, want_file=nil)
+      if want_file.nil?
+        want_stream, want_file = File.split(want_stream)
+      end
+      each_line do |stream_name, _, filelist|
+        if (stream_name == want_stream) and
+            each_file_spec(filelist).any? { |_, _, name| name == want_file }
+          return true
+        end
+      end
+      false
+    end
+  end
+end
diff --git a/sdk/ruby/test/test_big_request.rb b/sdk/ruby/test/test_big_request.rb
new file mode 100644 (file)
index 0000000..84700fd
--- /dev/null
@@ -0,0 +1,31 @@
+require 'minitest/autorun'
+require 'arvados'
+require 'digest/md5'
+
+class TestBigRequest < Minitest::Test
+  def boring_manifest nblocks
+    x = '.'
+    (0..nblocks).each do |z|
+      x += ' d41d8cd98f00b204e9800998ecf8427e+0'
+    end
+    x += " 0:0:foo.txt\n"
+    x
+  end
+
+  def test_create_manifest nblocks=1
+    skip "Test needs an API server to run against"
+    manifest_text = boring_manifest nblocks
+    uuid = Digest::MD5.hexdigest(manifest_text) + '+' + manifest_text.size.to_s
+    c = Arvados.new.collection.create(collection: {
+                                        uuid: uuid,
+                                        manifest_text: manifest_text,
+                                      })
+    assert_equal uuid, c[:portable_data_hash]
+  end
+
+  def test_create_big_manifest
+    # This ensures that manifest_text is passed in the request body:
+    # it's too large to fit in the query string.
+    test_create_manifest 9999
+  end
+end
diff --git a/sdk/ruby/test/test_keep_manifest.rb b/sdk/ruby/test/test_keep_manifest.rb
new file mode 100644 (file)
index 0000000..64c8ea3
--- /dev/null
@@ -0,0 +1,156 @@
+require "minitest/autorun"
+require "arvados/keep"
+
+def random_block(size=nil)
+  sprintf("%032x+%d", rand(16 ** 32), size || rand(64 * 1024 * 1024))
+end
+
+class ManifestTest < Minitest::Test
+  SIMPLEST_MANIFEST = ". #{random_block(9)} 0:9:simple.txt\n"
+  MULTIBLOCK_FILE_MANIFEST =
+    [". #{random_block(8)} 0:4:repfile 4:4:uniqfile",
+     "./s1 #{random_block(6)} 0:3:repfile 3:3:uniqfile",
+     ". #{random_block(8)} 0:7:uniqfile2 7:1:repfile\n"].join("\n")
+  MULTILEVEL_MANIFEST =
+    [". #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir1 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir1/subdir #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n",
+     "./dir2 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\n"].join("")
+
+  def check_stream(stream, exp_name, exp_blocks, exp_files)
+    assert_equal(exp_name, stream.first)
+    assert_equal(exp_blocks, stream[1].map(&:to_s))
+    assert_equal(exp_files, stream.last)
+  end
+
+  def test_simple_each_line_array
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    stream_name, block_s, file = SIMPLEST_MANIFEST.strip.split
+    stream_a = manifest.each_line.to_a
+    assert_equal(1, stream_a.size, "wrong number of streams")
+    check_stream(stream_a.first, stream_name, [block_s], [file])
+  end
+
+  def test_simple_each_line_block
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    result = []
+    manifest.each_line do |stream, blocks, files|
+      result << files
+    end
+    assert_equal([[SIMPLEST_MANIFEST.split.last]], result,
+                 "wrong result from each_line block")
+  end
+
+  def test_multilevel_each_line
+    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST)
+    seen = []
+    manifest.each_line do |stream, blocks, files|
+      refute(seen.include?(stream),
+             "each_line already yielded stream #{stream}")
+      seen << stream
+      assert_equal(3, files.size, "wrong file count for stream #{stream}")
+    end
+    assert_equal(4, seen.size, "wrong number of streams")
+  end
+
+  def test_empty_each_line
+    assert_empty(Keep::Manifest.new("").each_line.to_a)
+  end
+
+  def test_empty_line_within_manifest
+    block_s = random_block
+    manifest = Keep::Manifest.
+      new([". #{block_s} 0:1:file1 1:2:file2\n",
+           "\n",
+           ". #{block_s} 3:3:file3 6:4:file4\n"].join(""))
+    streams = manifest.each_line.to_a
+    assert_equal(2, streams.size)
+    check_stream(streams[0], ".", [block_s], ["0:1:file1", "1:2:file2"])
+    check_stream(streams[1], ".", [block_s], ["3:3:file3", "6:4:file4"])
+  end
+
+  def test_backslash_escape_parsing
+    m_text = "./dir\\040name #{random_block} 0:0:file\\\\name\\011\\here.txt\n"
+    manifest = Keep::Manifest.new(m_text)
+    streams = manifest.each_line.to_a
+    assert_equal(1, streams.size, "wrong number of streams with whitespace")
+    assert_equal("./dir name", streams.first.first,
+                 "wrong stream name with whitespace")
+    assert_equal(["0:0:file\\name\t\\here.txt"], streams.first.last,
+                 "wrong filename(s) with whitespace")
+  end
+
+  def test_simple_files
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    assert_equal([[".", "simple.txt", 9]], manifest.files)
+  end
+
+  def test_multilevel_files
+    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST)
+    seen = Hash.new { |this, key| this[key] = [] }
+    manifest.files.each do |stream, basename, size|
+      refute(seen[stream].include?(basename),
+             "each_file repeated #{stream}/#{basename}")
+      seen[stream] << basename
+      assert_equal(3, size, "wrong size for #{stream}/#{basename}")
+    end
+    seen.each_pair do |stream, basenames|
+      assert_equal(%w(file1 file2 file3), basenames.sort,
+                   "wrong file list for #{stream}")
+    end
+  end
+
+  def test_files_with_colons_in_names
+    manifest = Keep::Manifest.new(". #{random_block(9)} 0:9:file:test.txt\n")
+    assert_equal([[".", "file:test.txt", 9]], manifest.files)
+  end
+
+  def test_files_spanning_multiple_blocks
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert_equal([[".", "repfile", 5],
+                  [".", "uniqfile", 4],
+                  [".", "uniqfile2", 7],
+                  ["./s1", "repfile", 3],
+                  ["./s1", "uniqfile", 3]],
+                 manifest.files.sort)
+  end
+
+  def test_minimum_file_count_simple
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    assert(manifest.minimum_file_count?(1), "real minimum file count false")
+    refute(manifest.minimum_file_count?(2), "fake minimum file count true")
+  end
+
+  def test_minimum_file_count_multiblock
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert(manifest.minimum_file_count?(2), "low minimum file count false")
+    assert(manifest.minimum_file_count?(5), "real minimum file count false")
+    refute(manifest.minimum_file_count?(6), "fake minimum file count true")
+  end
+
+  def test_exact_file_count_simple
+    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)
+    assert(manifest.exact_file_count?(1), "exact file count false")
+    refute(manifest.exact_file_count?(0), "-1 file count true")
+    refute(manifest.exact_file_count?(2), "+1 file count true")
+  end
+
+  def test_exact_file_count_multiblock
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert(manifest.exact_file_count?(5), "exact file count false")
+    refute(manifest.exact_file_count?(4), "-1 file count true")
+    refute(manifest.exact_file_count?(6), "+1 file count true")
+  end
+
+  def test_has_file
+    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)
+    assert(manifest.has_file?("./repfile"), "one-arg repfile not found")
+    assert(manifest.has_file?(".", "repfile"), "two-arg repfile not found")
+    assert(manifest.has_file?("./s1/repfile"), "one-arg s1/repfile not found")
+    assert(manifest.has_file?("./s1", "repfile"), "two-arg s1/repfile not found")
+    refute(manifest.has_file?("./s1/uniqfile2"), "one-arg missing file found")
+    refute(manifest.has_file?("./s1", "uniqfile2"), "two-arg missing file found")
+    refute(manifest.has_file?("./s2/repfile"), "one-arg missing stream found")
+    refute(manifest.has_file?("./s2", "repfile"), "two-arg missing stream found")
+  end
+end
diff --git a/services/api/.gitignore b/services/api/.gitignore
new file mode 100644 (file)
index 0000000..c1d5219
--- /dev/null
@@ -0,0 +1,29 @@
+# Ignore the default SQLite database.
+/db/*.sqlite3
+
+# Ignore all logfiles and tempfiles.
+/log/*.log
+/tmp
+
+# Sensitive files and local configuration
+/config/database.yml
+/config/initializers/omniauth.rb
+/config/application.yml
+
+# asset cache
+/public/assets/
+
+/config/environments/development.rb
+/config/environments/production.rb
+/config/environments/test.rb
+
+# Capistrano files are coming from another repo
+/Capfile*
+/config/deploy*
+
+# SimpleCov reports
+/coverage
+
+# Dev/test SSL certificates
+/self-signed.key
+/self-signed.pem
diff --git a/services/api/Gemfile b/services/api/Gemfile
new file mode 100644 (file)
index 0000000..a7da122
--- /dev/null
@@ -0,0 +1,81 @@
+source 'https://rubygems.org'
+
+gem 'rails', '~> 3.2.0'
+
+# Bundle edge Rails instead:
+# gem 'rails',     :git => 'git://github.com/rails/rails.git'
+
+group :test, :development do
+  gem 'factory_girl_rails'
+  gem 'database_cleaner'
+  # Note: "require: false" here tells bunder not to automatically
+  # 'require' the packages during application startup. Installation is
+  # still mandatory.
+  gem 'simplecov', '~> 0.7.1', require: false
+  gem 'simplecov-rcov', require: false
+end
+
+# This might not be needed in :test and :development, but we load it
+# anyway to make sure it always gets in Gemfile.lock and to help
+# reveal install problems sooner rather than later.
+gem 'pg'
+
+# Start using multi_json once we are on Rails 3.2;
+# Rails 3.1 has a dependency on multi_json < 1.3.0 but we need version 1.3.4 to
+# fix bug https://github.com/collectiveidea/json_spec/issues/27
+gem 'multi_json'
+gem 'oj'
+
+# Gems used only for assets and not required
+# in production environments by default.
+group :assets do
+  gem 'sass-rails',   '>= 3.2.0'
+  gem 'coffee-rails', '~> 3.2.0'
+
+  # See https://github.com/sstephenson/execjs#readme for more supported runtimes
+  gem 'therubyracer'
+
+  gem 'uglifier', '>= 1.0.3'
+end
+
+gem 'jquery-rails'
+
+# To use ActiveModel has_secure_password
+# gem 'bcrypt-ruby', '~> 3.0.0'
+
+# Use unicorn as the web server
+# gem 'unicorn'
+
+# Deploy with Capistrano
+# gem 'capistrano'
+
+# To use debugger
+# gem 'ruby-debug'
+
+gem 'rvm-capistrano', :group => :test
+
+gem 'acts_as_api'
+
+gem 'passenger', :group => :production
+
+gem 'omniauth', '1.1.1'
+gem 'omniauth-oauth2', '1.1.1'
+
+gem 'andand'
+
+gem 'test_after_commit', :group => :test
+
+gem 'google-api-client', '~> 0.6.3'
+gem 'trollop'
+gem 'faye-websocket'
+
+gem 'themes_for_rails'
+
+gem 'arvados', '>= 0.1.20140919104705'
+gem 'arvados-cli', '>= 0.1.20141202211726'
+
+# pg_power lets us use partial indexes in schema.rb in Rails 3
+gem 'pg_power'
+
+gem 'puma'
+gem 'sshkey'
diff --git a/services/api/Gemfile.lock b/services/api/Gemfile.lock
new file mode 100644 (file)
index 0000000..c2b2351
--- /dev/null
@@ -0,0 +1,252 @@
+GEM
+  remote: https://rubygems.org/
+  specs:
+    actionmailer (3.2.17)
+      actionpack (= 3.2.17)
+      mail (~> 2.5.4)
+    actionpack (3.2.17)
+      activemodel (= 3.2.17)
+      activesupport (= 3.2.17)
+      builder (~> 3.0.0)
+      erubis (~> 2.7.0)
+      journey (~> 1.0.4)
+      rack (~> 1.4.5)
+      rack-cache (~> 1.2)
+      rack-test (~> 0.6.1)
+      sprockets (~> 2.2.1)
+    activemodel (3.2.17)
+      activesupport (= 3.2.17)
+      builder (~> 3.0.0)
+    activerecord (3.2.17)
+      activemodel (= 3.2.17)
+      activesupport (= 3.2.17)
+      arel (~> 3.0.2)
+      tzinfo (~> 0.3.29)
+    activeresource (3.2.17)
+      activemodel (= 3.2.17)
+      activesupport (= 3.2.17)
+    activesupport (3.2.17)
+      i18n (~> 0.6, >= 0.6.4)
+      multi_json (~> 1.0)
+    acts_as_api (0.4.2)
+      activemodel (>= 3.0.0)
+      activesupport (>= 3.0.0)
+      rack (>= 1.1.0)
+    addressable (2.3.6)
+    andand (1.3.3)
+    arel (3.0.3)
+    arvados (0.1.20141114230720)
+      activesupport (>= 3.2.13)
+      andand (~> 1.3, >= 1.3.3)
+      google-api-client (~> 0.6.3, >= 0.6.3)
+      json (~> 1.7, >= 1.7.7)
+      jwt (>= 0.1.5, < 1.0.0)
+    arvados-cli (0.1.20141209151444)
+      activesupport (~> 3.2, >= 3.2.13)
+      andand (~> 1.3, >= 1.3.3)
+      arvados (~> 0.1, >= 0.1.0)
+      curb (~> 0.8)
+      google-api-client (~> 0.6.3, >= 0.6.3)
+      json (~> 1.7, >= 1.7.7)
+      jwt (>= 0.1.5, < 1.0.0)
+      oj (~> 2.0, >= 2.0.3)
+      trollop (~> 2.0)
+    autoparse (0.3.3)
+      addressable (>= 2.3.1)
+      extlib (>= 0.9.15)
+      multi_json (>= 1.0.0)
+    builder (3.0.4)
+    capistrano (2.15.5)
+      highline
+      net-scp (>= 1.0.0)
+      net-sftp (>= 2.0.0)
+      net-ssh (>= 2.0.14)
+      net-ssh-gateway (>= 1.1.0)
+    coffee-rails (3.2.1)
+      coffee-script (>= 2.2.0)
+      railties (~> 3.2.0.beta)
+    coffee-script (2.2.0)
+      coffee-script-source
+      execjs
+    coffee-script-source (1.7.0)
+    curb (0.8.6)
+    daemon_controller (1.2.0)
+    database_cleaner (1.2.0)
+    erubis (2.7.0)
+    eventmachine (1.0.3)
+    execjs (2.0.2)
+    extlib (0.9.16)
+    factory_girl (4.4.0)
+      activesupport (>= 3.0.0)
+    factory_girl_rails (4.4.1)
+      factory_girl (~> 4.4.0)
+      railties (>= 3.0.0)
+    faraday (0.8.9)
+      multipart-post (~> 1.2.0)
+    faye-websocket (0.7.2)
+      eventmachine (>= 0.12.0)
+      websocket-driver (>= 0.3.1)
+    google-api-client (0.6.4)
+      addressable (>= 2.3.2)
+      autoparse (>= 0.3.3)
+      extlib (>= 0.9.15)
+      faraday (~> 0.8.4)
+      jwt (>= 0.1.5)
+      launchy (>= 2.1.1)
+      multi_json (>= 1.0.0)
+      signet (~> 0.4.5)
+      uuidtools (>= 2.1.0)
+    hashie (1.2.0)
+    highline (1.6.21)
+    hike (1.2.3)
+    httpauth (0.2.1)
+    i18n (0.6.11)
+    journey (1.0.4)
+    jquery-rails (3.1.0)
+      railties (>= 3.0, < 5.0)
+      thor (>= 0.14, < 2.0)
+    json (1.8.1)
+    jwt (0.1.13)
+      multi_json (>= 1.5)
+    launchy (2.4.3)
+      addressable (~> 2.3)
+    libv8 (3.16.14.3)
+    mail (2.5.4)
+      mime-types (~> 1.16)
+      treetop (~> 1.4.8)
+    mime-types (1.25.1)
+    multi_json (1.10.1)
+    multipart-post (1.2.0)
+    net-scp (1.2.0)
+      net-ssh (>= 2.6.5)
+    net-sftp (2.1.2)
+      net-ssh (>= 2.6.5)
+    net-ssh (2.8.0)
+    net-ssh-gateway (1.2.0)
+      net-ssh (>= 2.6.5)
+    oauth2 (0.8.1)
+      faraday (~> 0.8)
+      httpauth (~> 0.1)
+      jwt (~> 0.1.4)
+      multi_json (~> 1.0)
+      rack (~> 1.2)
+    oj (2.11.1)
+    omniauth (1.1.1)
+      hashie (~> 1.2)
+      rack
+    omniauth-oauth2 (1.1.1)
+      oauth2 (~> 0.8.0)
+      omniauth (~> 1.0)
+    passenger (4.0.41)
+      daemon_controller (>= 1.2.0)
+      rack
+      rake (>= 0.8.1)
+    pg (0.17.1)
+    pg_power (1.6.4)
+      pg
+      rails (~> 3.1)
+    polyglot (0.3.4)
+    puma (2.8.2)
+      rack (>= 1.1, < 2.0)
+    rack (1.4.5)
+    rack-cache (1.2)
+      rack (>= 0.4)
+    rack-ssl (1.3.4)
+      rack
+    rack-test (0.6.2)
+      rack (>= 1.0)
+    rails (3.2.17)
+      actionmailer (= 3.2.17)
+      actionpack (= 3.2.17)
+      activerecord (= 3.2.17)
+      activeresource (= 3.2.17)
+      activesupport (= 3.2.17)
+      bundler (~> 1.0)
+      railties (= 3.2.17)
+    railties (3.2.17)
+      actionpack (= 3.2.17)
+      activesupport (= 3.2.17)
+      rack-ssl (~> 1.3.2)
+      rake (>= 0.8.7)
+      rdoc (~> 3.4)
+      thor (>= 0.14.6, < 2.0)
+    rake (10.2.2)
+    rdoc (3.12.2)
+      json (~> 1.4)
+    ref (1.0.5)
+    rvm-capistrano (1.5.1)
+      capistrano (~> 2.15.4)
+    sass (3.3.4)
+    sass-rails (3.2.6)
+      railties (~> 3.2.0)
+      sass (>= 3.1.10)
+      tilt (~> 1.3)
+    signet (0.4.5)
+      addressable (>= 2.2.3)
+      faraday (~> 0.8.1)
+      jwt (>= 0.1.5)
+      multi_json (>= 1.0.0)
+    simplecov (0.7.1)
+      multi_json (~> 1.0)
+      simplecov-html (~> 0.7.1)
+    simplecov-html (0.7.1)
+    simplecov-rcov (0.2.3)
+      simplecov (>= 0.4.1)
+    sprockets (2.2.2)
+      hike (~> 1.2)
+      multi_json (~> 1.0)
+      rack (~> 1.0)
+      tilt (~> 1.1, != 1.3.0)
+    sshkey (1.6.1)
+    test_after_commit (0.2.3)
+    themes_for_rails (0.5.1)
+      rails (>= 3.0.0)
+    therubyracer (0.12.1)
+      libv8 (~> 3.16.14.0)
+      ref
+    thor (0.19.1)
+    tilt (1.4.1)
+    treetop (1.4.15)
+      polyglot
+      polyglot (>= 0.3.1)
+    trollop (2.0)
+    tzinfo (0.3.39)
+    uglifier (2.5.0)
+      execjs (>= 0.3.0)
+      json (>= 1.8.0)
+    uuidtools (2.1.5)
+    websocket-driver (0.3.2)
+
+PLATFORMS
+  ruby
+
+DEPENDENCIES
+  acts_as_api
+  andand
+  arvados (>= 0.1.20140919104705)
+  arvados-cli (>= 0.1.20141202211726)
+  coffee-rails (~> 3.2.0)
+  database_cleaner
+  factory_girl_rails
+  faye-websocket
+  google-api-client (~> 0.6.3)
+  jquery-rails
+  multi_json
+  oj
+  omniauth (= 1.1.1)
+  omniauth-oauth2 (= 1.1.1)
+  passenger
+  pg
+  pg_power
+  puma
+  rails (~> 3.2.0)
+  rvm-capistrano
+  sass-rails (>= 3.2.0)
+  simplecov (~> 0.7.1)
+  simplecov-rcov
+  sshkey
+  test_after_commit
+  themes_for_rails
+  therubyracer
+  trollop
+  uglifier (>= 1.0.3)
diff --git a/services/api/README b/services/api/README
new file mode 100644 (file)
index 0000000..7c36f23
--- /dev/null
@@ -0,0 +1,261 @@
+== Welcome to Rails
+
+Rails is a web-application framework that includes everything needed to create
+database-backed web applications according to the Model-View-Control pattern.
+
+This pattern splits the view (also called the presentation) into "dumb"
+templates that are primarily responsible for inserting pre-built data in between
+HTML tags. The model contains the "smart" domain objects (such as Account,
+Product, Person, Post) that holds all the business logic and knows how to
+persist themselves to a database. The controller handles the incoming requests
+(such as Save New Account, Update Product, Show Post) by manipulating the model
+and directing data to the view.
+
+In Rails, the model is handled by what's called an object-relational mapping
+layer entitled Active Record. This layer allows you to present the data from
+database rows as objects and embellish these data objects with business logic
+methods. You can read more about Active Record in
+link:files/vendor/rails/activerecord/README.html.
+
+The controller and view are handled by the Action Pack, which handles both
+layers by its two parts: Action View and Action Controller. These two layers
+are bundled in a single package due to their heavy interdependence. This is
+unlike the relationship between the Active Record and Action Pack that is much
+more separate. Each of these packages can be used independently outside of
+Rails. You can read more about Action Pack in
+link:files/vendor/rails/actionpack/README.html.
+
+
+== Getting Started
+
+1. At the command prompt, create a new Rails application:
+       <tt>rails new myapp</tt> (where <tt>myapp</tt> is the application name)
+
+2. Change directory to <tt>myapp</tt> and start the web server:
+       <tt>cd myapp; rails server</tt> (run with --help for options)
+
+3. Go to http://localhost:3000/ and you'll see:
+       "Welcome aboard: You're riding Ruby on Rails!"
+
+4. Follow the guidelines to start developing your application. You can find
+the following resources handy:
+
+* The Getting Started Guide: http://guides.rubyonrails.org/getting_started.html
+* Ruby on Rails Tutorial Book: http://www.railstutorial.org/
+
+
+== Debugging Rails
+
+Sometimes your application goes wrong. Fortunately there are a lot of tools that
+will help you debug it and get it back on the rails.
+
+First area to check is the application log files. Have "tail -f" commands
+running on the server.log and development.log. Rails will automatically display
+debugging and runtime information to these files. Debugging info will also be
+shown in the browser on requests from 127.0.0.1.
+
+You can also log your own messages directly into the log file from your code
+using the Ruby logger class from inside your controllers. Example:
+
+  class WeblogController < ActionController::Base
+    def destroy
+      @weblog = Weblog.find(params[:id])
+      @weblog.destroy
+      logger.info("#{Time.now} Destroyed Weblog ID ##{@weblog.id}!")
+    end
+  end
+
+The result will be a message in your log file along the lines of:
+
+  Mon Oct 08 14:22:29 +1000 2007 Destroyed Weblog ID #1!
+
+More information on how to use the logger is at http://www.ruby-doc.org/core/
+
+Also, Ruby documentation can be found at http://www.ruby-lang.org/. There are
+several books available online as well:
+
+* Programming Ruby: http://www.ruby-doc.org/docs/ProgrammingRuby/ (Pickaxe)
+* Learn to Program: http://pine.fm/LearnToProgram/ (a beginners guide)
+
+These two books will bring you up to speed on the Ruby language and also on
+programming in general.
+
+
+== Debugger
+
+Debugger support is available through the debugger command when you start your
+Mongrel or WEBrick server with --debugger. This means that you can break out of
+execution at any point in the code, investigate and change the model, and then,
+resume execution! You need to install ruby-debug to run the server in debugging
+mode. With gems, use <tt>sudo gem install ruby-debug</tt>. Example:
+
+  class WeblogController < ActionController::Base
+    def index
+      @posts = Post.all
+      debugger
+    end
+  end
+
+So the controller will accept the action, run the first line, then present you
+with a IRB prompt in the server window. Here you can do things like:
+
+  >> @posts.inspect
+  => "[#<Post:0x14a6be8
+          @attributes={"title"=>nil, "body"=>nil, "id"=>"1"}>,
+       #<Post:0x14a6620
+          @attributes={"title"=>"Rails", "body"=>"Only ten..", "id"=>"2"}>]"
+  >> @posts.first.title = "hello from a debugger"
+  => "hello from a debugger"
+
+...and even better, you can examine how your runtime objects actually work:
+
+  >> f = @posts.first
+  => #<Post:0x13630c4 @attributes={"title"=>nil, "body"=>nil, "id"=>"1"}>
+  >> f.
+  Display all 152 possibilities? (y or n)
+
+Finally, when you're ready to resume execution, you can enter "cont".
+
+
+== Console
+
+The console is a Ruby shell, which allows you to interact with your
+application's domain model. Here you'll have all parts of the application
+configured, just like it is when the application is running. You can inspect
+domain models, change values, and save to the database. Starting the script
+without arguments will launch it in the development environment.
+
+To start the console, run <tt>rails console</tt> from the application
+directory.
+
+Options:
+
+* Passing the <tt>-s, --sandbox</tt> argument will rollback any modifications
+  made to the database.
+* Passing an environment name as an argument will load the corresponding
+  environment. Example: <tt>rails console production</tt>.
+
+To reload your controllers and models after launching the console run
+<tt>reload!</tt>
+
+More information about irb can be found at:
+link:http://www.rubycentral.org/pickaxe/irb.html
+
+
+== dbconsole
+
+You can go to the command line of your database directly through <tt>rails
+dbconsole</tt>. You would be connected to the database with the credentials
+defined in database.yml. Starting the script without arguments will connect you
+to the development database. Passing an argument will connect you to a different
+database, like <tt>rails dbconsole production</tt>. Currently works for MySQL,
+PostgreSQL and SQLite 3.
+
+== Description of Contents
+
+The default directory structure of a generated Ruby on Rails application:
+
+  |-- app
+  |   |-- assets
+  |       |-- images
+  |       |-- javascripts
+  |       `-- stylesheets
+  |   |-- controllers
+  |   |-- helpers
+  |   |-- mailers
+  |   |-- models
+  |   `-- views
+  |       `-- layouts
+  |-- config
+  |   |-- environments
+  |   |-- initializers
+  |   `-- locales
+  |-- db
+  |-- doc
+  |-- lib
+  |   `-- tasks
+  |-- log
+  |-- public
+  |-- script
+  |-- test
+  |   |-- fixtures
+  |   |-- functional
+  |   |-- integration
+  |   |-- performance
+  |   `-- unit
+  |-- tmp
+  |   |-- cache
+  |   |-- pids
+  |   |-- sessions
+  |   `-- sockets
+  `-- vendor
+      |-- assets
+          `-- stylesheets
+      `-- plugins
+
+app
+  Holds all the code that's specific to this particular application.
+
+app/assets
+  Contains subdirectories for images, stylesheets, and JavaScript files.
+
+app/controllers
+  Holds controllers that should be named like weblogs_controller.rb for
+  automated URL mapping. All controllers should descend from
+  ApplicationController which itself descends from ActionController::Base.
+
+app/models
+  Holds models that should be named like post.rb. Models descend from
+  ActiveRecord::Base by default.
+
+app/views
+  Holds the template files for the view that should be named like
+  weblogs/index.html.erb for the WeblogsController#index action. All views use
+  eRuby syntax by default.
+
+app/views/layouts
+  Holds the template files for layouts to be used with views. This models the
+  common header/footer method of wrapping views. In your views, define a layout
+  using the <tt>layout :default</tt> and create a file named default.html.erb.
+  Inside default.html.erb, call <% yield %> to render the view using this
+  layout.
+
+app/helpers
+  Holds view helpers that should be named like weblogs_helper.rb. These are
+  generated for you automatically when using generators for controllers.
+  Helpers can be used to wrap functionality for your views into methods.
+
+config
+  Configuration files for the Rails environment, the routing map, the database,
+  and other dependencies.
+
+db
+  Contains the database schema in schema.rb. db/migrate contains all the
+  sequence of Migrations for your schema.
+
+doc
+  This directory is where your application documentation will be stored when
+  generated using <tt>rake doc:app</tt>
+
+lib
+  Application specific libraries. Basically, any kind of custom code that
+  doesn't belong under controllers, models, or helpers. This directory is in
+  the load path.
+
+public
+  The directory available for the web server. Also contains the dispatchers and the
+  default HTML files. This should be set as the DOCUMENT_ROOT of your web
+  server.
+
+script
+  Helper scripts for automation and generation.
+
+test
+  Unit and functional tests along with fixtures. When using the rails generate
+  command, template test files will be generated for you and placed in this
+  directory.
+
+vendor
+  External libraries that the application depends on. Also includes the plugins
+  subdirectory. If the app has frozen rails, those gems also go here, under
+  vendor/rails/. This directory is in the load path.
diff --git a/services/api/Rakefile b/services/api/Rakefile
new file mode 100644 (file)
index 0000000..223f5ca
--- /dev/null
@@ -0,0 +1,13 @@
+#!/usr/bin/env rake
+# Add your own tasks in files placed in lib/tasks ending in .rake,
+# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake.
+
+require File.expand_path('../config/application', __FILE__)
+
+begin
+  ok = PgPower
+rescue
+  abort "Hm, pg_power is missing. Make sure you use 'bundle exec rake ...'"
+end
+
+Server::Application.load_tasks
diff --git a/services/api/app/assets/images/logo.png b/services/api/app/assets/images/logo.png
new file mode 100644 (file)
index 0000000..4db96ef
Binary files /dev/null and b/services/api/app/assets/images/logo.png differ
diff --git a/services/api/app/assets/images/rails.png b/services/api/app/assets/images/rails.png
new file mode 100644 (file)
index 0000000..d5edc04
Binary files /dev/null and b/services/api/app/assets/images/rails.png differ
diff --git a/services/api/app/assets/stylesheets/api_client_authorizations.css.scss b/services/api/app/assets/stylesheets/api_client_authorizations.css.scss
new file mode 100644 (file)
index 0000000..fd2c9d8
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the ApiClientAuthorizations controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/api_clients.css.scss b/services/api/app/assets/stylesheets/api_clients.css.scss
new file mode 100644 (file)
index 0000000..bd62734
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the ApiClients controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/application.css b/services/api/app/assets/stylesheets/application.css
new file mode 100644 (file)
index 0000000..6d2c3ef
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * This is a manifest file that'll automatically include all the stylesheets available in this directory
+ * and any sub-directories. You're free to add application-wide styles to this file and they'll appear at
+ * the top of the compiled file, but it's generally better to create a new file per style scope.
+ *= require_self
+ *= require_tree . 
+*/
+
+.contain-align-left {
+    text-align: left;
+}
+
+body {
+    margin: 0;
+}
+body > div {
+    margin: 2px;
+}
+div#footer {
+    font-family: Verdana,Arial,sans-serif;
+    font-size: 12px;
+    margin-top: 24px;
+    border-top: 1px solid #ccc;
+}
+div#footer, div#footer a {
+    color: #777;
+}
+div#header {
+    margin: 0;
+    padding: .5em 1em;
+    background: #000;
+    font-weight: bold;
+    font-size: 18px;
+    font-family: Verdana,Arial,sans-serif;
+    vertical-align: middle;
+    color: #ddd;
+}
+div#header > div {
+    display: inline-block;
+    font-size: 12px;
+    line-height: 18px;
+}
+div#header > .apptitle {
+    font-size: 18px;
+}
+div#header a.logout {
+    color: #fff;
+    font-weight: normal;
+}
+div#header button {
+    font-size: 12px;
+}
+div#header span.beta {
+    opacity: 0.5;
+}
+div#header span.beta > span {
+    border-top: 1px solid #fff;
+    border-bottom: 1px solid #fff;
+    font-size: 0.8em;
+}
+img.curoverse-logo {
+    width: 221px;
+    height: 44px;
+}
+#intropage {
+    font-family: Verdana,Arial,sans-serif;
+}
+#errorpage {
+    font-family: Verdana,Arial,sans-serif;
+}
+
+div.full-page-tab-set > ul > li {
+    font-size: 14px;
+}
+.titlebanner p {
+    font-size: 16px;
+}
+p {
+    font-size: 12px;
+}
+.small-text {
+    font-size: 12px;
+}
+.autoui-icon-float-left {
+    float: left;
+    margin-right: .3em;
+}
+.autoui-pad {
+    padding: 0 1em;
+}
+table.datatablesme {
+    border: 0;
+    border-collapse: collapse;
+    width: 100%;
+}
+.loadinggif {
+    background: #fff url(/images/ajax-loader-16-fff-aad.gif) no-repeat;
+}
+.clientprogressgif {
+    /* warning: depends on 24px outer container. */
+    position: absolute;
+    left: 4px;
+    top: 4px;
+    width: 16px;
+    height: 16px;
+}
+.counttable {
+    width: 100%;
+    display: table;
+    border-collapse: collapse;
+    margin-bottom: 0.5em;
+}
+.counttable > div {
+    display: table-row;
+}
+.counttable > div > div {
+    display: table-cell;
+    text-align: center;
+    background: #ccf;
+    padding: 0 2px;
+    font-size: 0.8em;
+}
+.counttable > div > div.counter {
+    font-size: 2em;
+    padding: 4px 2px 0 2px;
+}
+table.admin_table {
+    border-collapse: collapse;
+}
+table.admin_table tbody tr {
+    height: 2.5em;
+}
+table.admin_table th,table.admin_table td {
+    text-align: left;
+    border: 1px solid #bbb;
+    padding: 3px;
+}
+table.admin_table tbody tr:hover {
+    background: #ff8;
+}
+table.admin_table tbody tr:hover td {
+    background: transparent;
+}
+
+div.helptopics {
+    position: fixed;
+}
+div.helptopics ul {
+    padding: 0;
+    margin-left: 1em;
+    list-style-type: none;
+}
+div.helptopics ul li {
+    margin: 0 0 1em 0;
+}
+div.helpcontent li {
+    margin-bottom: .5em;
+}
+
+div.preview {
+    color: red;
+    font-weight: bold;
+    text-align: center;
+}
+
+.sudo-warning {
+    padding: 4px 10px;
+    background: #ffdd00;
+    color: red;
+    -webkit-border-radius: 3px;
+    -moz-border-radius: 3px;
+    border-radius: 3px
+}
+
+div#header a.sudo-logout {
+    color: #000;
+    font-weight: bold;
+}
+
diff --git a/services/api/app/assets/stylesheets/authorized_keys.css.scss b/services/api/app/assets/stylesheets/authorized_keys.css.scss
new file mode 100644 (file)
index 0000000..7298460
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the AuthorizedKeys controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/collections.css.scss b/services/api/app/assets/stylesheets/collections.css.scss
new file mode 100644 (file)
index 0000000..c21e96d
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Collections controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/commit_ancestors.css.scss b/services/api/app/assets/stylesheets/commit_ancestors.css.scss
new file mode 100644 (file)
index 0000000..2077eb4
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the commit_ancestors controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/commits.css.scss b/services/api/app/assets/stylesheets/commits.css.scss
new file mode 100644 (file)
index 0000000..e3673f4
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the commits controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/groups.css.scss b/services/api/app/assets/stylesheets/groups.css.scss
new file mode 100644 (file)
index 0000000..6795636
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Groups controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/humans.css.scss b/services/api/app/assets/stylesheets/humans.css.scss
new file mode 100644 (file)
index 0000000..f8afcdf
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Humans controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/job_tasks.css.scss b/services/api/app/assets/stylesheets/job_tasks.css.scss
new file mode 100644 (file)
index 0000000..ab340e9
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the JobTasks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/jobs.css.scss b/services/api/app/assets/stylesheets/jobs.css.scss
new file mode 100644 (file)
index 0000000..e485745
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Jobs controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/keep_disks.css.scss b/services/api/app/assets/stylesheets/keep_disks.css.scss
new file mode 100644 (file)
index 0000000..1f7780b
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the KeepDisks controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/links.css.scss b/services/api/app/assets/stylesheets/links.css.scss
new file mode 100644 (file)
index 0000000..81ec94b
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the links controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/logs.css.scss b/services/api/app/assets/stylesheets/logs.css.scss
new file mode 100644 (file)
index 0000000..4aaccac
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Logs controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/nodes.css b/services/api/app/assets/stylesheets/nodes.css
new file mode 100644 (file)
index 0000000..79b7293
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+  Place all the styles related to the matching controller here.
+  They will automatically be included in application.css.
+*/
+.node-status {
+    /* unknown status - might be bad */
+    background: #ff8888;
+}
+.node-status-running .node-status {
+    background: #88ff88;
+}
+.node-status-missing .node-status {
+    background: #ff8888;
+}
+.node-status-terminated .node-status {
+    background: #ffffff;
+}
+
+.node-slurm-state {
+    /* unknown status - might be bad */
+    background: #ff8888;
+}
+.node-status-missing .node-slurm-state {
+    background: #ffffff;
+}
+.node-status-terminated .node-slurm-state {
+    background: #ffffff;
+}
+.node-status-running .node-slurm-state-alloc {
+    background: #88ff88;
+}
+.node-status-running .node-slurm-state-idle {
+    background: #ffbbbb;
+}
+.node-status-running .node-slurm-state-down {
+    background: #ff8888;
+}
diff --git a/services/api/app/assets/stylesheets/nodes.css.scss b/services/api/app/assets/stylesheets/nodes.css.scss
new file mode 100644 (file)
index 0000000..7210602
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Nodes controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/pipeline_instances.css.scss b/services/api/app/assets/stylesheets/pipeline_instances.css.scss
new file mode 100644 (file)
index 0000000..aa58dcc
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the PipelineInstances controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/pipeline_templates.css.scss b/services/api/app/assets/stylesheets/pipeline_templates.css.scss
new file mode 100644 (file)
index 0000000..35d2946
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the PipelineTemplates controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/repositories.css.scss b/services/api/app/assets/stylesheets/repositories.css.scss
new file mode 100644 (file)
index 0000000..85e38d2
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Repositories controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/scaffolds.css.scss b/services/api/app/assets/stylesheets/scaffolds.css.scss
new file mode 100644 (file)
index 0000000..6ec6a8f
--- /dev/null
@@ -0,0 +1,69 @@
+body {
+  background-color: #fff;
+  color: #333;
+  font-family: verdana, arial, helvetica, sans-serif;
+  font-size: 13px;
+  line-height: 18px;
+}
+
+p, ol, ul, td {
+  font-family: verdana, arial, helvetica, sans-serif;
+  font-size: 13px;
+  line-height: 18px;
+}
+
+pre {
+  background-color: #eee;
+  padding: 10px;
+  font-size: 11px;
+}
+
+a {
+  color: #000;
+  &:visited {
+    color: #666;
+  }
+  &:hover {
+    color: #fff;
+    background-color: #000;
+  }
+}
+
+div {
+  &.field, &.actions {
+    margin-bottom: 10px;
+  }
+}
+
+#notice {
+  color: green;
+}
+
+.field_with_errors {
+  padding: 2px;
+  background-color: red;
+  display: table;
+}
+
+#error_explanation {
+  width: 450px;
+  border: 2px solid red;
+  padding: 7px;
+  padding-bottom: 0;
+  margin-bottom: 20px;
+  background-color: #f0f0f0;
+  h2 {
+    text-align: left;
+    font-weight: bold;
+    padding: 5px 5px 5px 15px;
+    font-size: 12px;
+    margin: -7px;
+    margin-bottom: 0px;
+    background-color: #c00;
+    color: #fff;
+  }
+  ul li {
+    font-size: 12px;
+    list-style: square;
+  }
+}
diff --git a/services/api/app/assets/stylesheets/specimens.css.scss b/services/api/app/assets/stylesheets/specimens.css.scss
new file mode 100644 (file)
index 0000000..460e42e
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Specimens controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/traits.css.scss b/services/api/app/assets/stylesheets/traits.css.scss
new file mode 100644 (file)
index 0000000..5f30857
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the Traits controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/assets/stylesheets/virtual_machines.css.scss b/services/api/app/assets/stylesheets/virtual_machines.css.scss
new file mode 100644 (file)
index 0000000..5532eb9
--- /dev/null
@@ -0,0 +1,3 @@
+// Place all the styles related to the VirtualMachines controller here.
+// They will automatically be included in application.css.
+// You can use Sass (SCSS) here: http://sass-lang.com/
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
new file mode 100644 (file)
index 0000000..54d5adb
--- /dev/null
@@ -0,0 +1,526 @@
+module ApiTemplateOverride
+  def allowed_to_render?(fieldset, field, model, options)
+    return false if !super
+    if options[:select]
+      options[:select].include? field.to_s
+    else
+      true
+    end
+  end
+end
+
+class ActsAsApi::ApiTemplate
+  prepend ApiTemplateOverride
+end
+
+require 'load_param'
+require 'record_filters'
+
+class ApplicationController < ActionController::Base
+  include CurrentApiClient
+  include ThemesForRails::ActionController
+  include LoadParam
+  include RecordFilters
+
+  respond_to :json
+  protect_from_forgery
+
+  ERROR_ACTIONS = [:render_error, :render_not_found]
+
+  before_filter :set_cors_headers
+  before_filter :respond_with_json_by_default
+  before_filter :remote_ip
+  before_filter :load_read_auths
+  before_filter :require_auth_scope, except: ERROR_ACTIONS
+
+  before_filter :catch_redirect_hint
+  before_filter(:find_object_by_uuid,
+                except: [:index, :create] + ERROR_ACTIONS)
+  before_filter :load_required_parameters
+  before_filter :load_limit_offset_order_params, only: [:index, :contents]
+  before_filter :load_where_param, only: [:index, :contents]
+  before_filter :load_filters_param, only: [:index, :contents]
+  before_filter :find_objects_for_index, :only => :index
+  before_filter :reload_object_before_update, :only => :update
+  before_filter(:render_404_if_no_object,
+                except: [:index, :create] + ERROR_ACTIONS)
+
+  theme :select_theme
+
+  attr_accessor :resource_attrs
+
+  begin
+    rescue_from(Exception,
+                ArvadosModel::PermissionDeniedError,
+                :with => :render_error)
+    rescue_from(ActiveRecord::RecordNotFound,
+                ActionController::RoutingError,
+                ActionController::UnknownController,
+                AbstractController::ActionNotFound,
+                :with => :render_not_found)
+  end
+
+  def default_url_options
+    if Rails.configuration.host
+      {:host => Rails.configuration.host}
+    else
+      {}
+    end
+  end
+
+  def index
+    @objects.uniq!(&:id) if @select.nil? or @select.include? "id"
+    if params[:eager] and params[:eager] != '0' and params[:eager] != 0 and params[:eager] != ''
+      @objects.each(&:eager_load_associations)
+    end
+    render_list
+  end
+
+  def show
+    render json: @object.as_api_response(nil, select: @select)
+  end
+
+  def create
+    @object = model_class.new resource_attrs
+
+    if @object.respond_to? :name and params[:ensure_unique_name]
+      # Record the original name.  See below.
+      name_stem = @object.name
+      counter = 1
+    end
+
+    begin
+      @object.save!
+    rescue ActiveRecord::RecordNotUnique => rn
+      raise unless params[:ensure_unique_name]
+
+      # Dig into the error to determine if it is specifically calling out a
+      # (owner_uuid, name) uniqueness violation.  In this specific case, and
+      # the client requested a unique name with ensure_unique_name==true,
+      # update the name field and try to save again.  Loop as necessary to
+      # discover a unique name.  It is necessary to handle name choosing at
+      # this level (as opposed to the client) to ensure that record creation
+      # never fails due to a race condition.
+      raise unless rn.original_exception.is_a? PG::UniqueViolation
+
+      # Unfortunately ActiveRecord doesn't abstract out any of the
+      # necessary information to figure out if this the error is actually
+      # the specific case where we want to apply the ensure_unique_name
+      # behavior, so the following code is specialized to Postgres.
+      err = rn.original_exception
+      detail = err.result.error_field(PG::Result::PG_DIAG_MESSAGE_DETAIL)
+      raise unless /^Key \(owner_uuid, name\)=\([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}, .*?\) already exists\./.match detail
+
+      # OK, this exception really is just a unique name constraint
+      # violation, and we've been asked to ensure_unique_name.
+      counter += 1
+      @object.uuid = nil
+      @object.name = "#{name_stem} (#{counter})"
+      redo
+    end while false
+    show
+  end
+
+  def update
+    attrs_to_update = resource_attrs.reject { |k,v|
+      [:kind, :etag, :href].index k
+    }
+    @object.update_attributes! attrs_to_update
+    show
+  end
+
+  def destroy
+    @object.destroy
+    show
+  end
+
+  def catch_redirect_hint
+    if !current_user
+      if params.has_key?('redirect_to') then
+        session[:redirect_to] = params[:redirect_to]
+      end
+    end
+  end
+
+  def render_404_if_no_object
+    render_not_found "Object not found" if !@object
+  end
+
+  def render_error(e)
+    logger.error e.inspect
+    if e.respond_to? :backtrace and e.backtrace
+      logger.error e.backtrace.collect { |x| x + "\n" }.join('')
+    end
+    if (@object.respond_to? :errors and
+        @object.errors.andand.full_messages.andand.any?)
+      errors = @object.errors.full_messages
+      logger.error errors.inspect
+    else
+      errors = [e.inspect]
+    end
+    status = e.respond_to?(:http_status) ? e.http_status : 422
+    send_error(*errors, status: status)
+  end
+
+  def render_not_found(e=ActionController::RoutingError.new("Path not found"))
+    logger.error e.inspect
+    send_error("Path not found", status: 404)
+  end
+
+  protected
+
+  def send_error(*args)
+    if args.last.is_a? Hash
+      err = args.pop
+    else
+      err = {}
+    end
+    err[:errors] ||= args
+    err[:error_token] = [Time.now.utc.to_i, "%08x" % rand(16 ** 8)].join("+")
+    status = err.delete(:status) || 422
+    logger.error "Error #{err[:error_token]}: #{status}"
+    render json: err, status: status
+  end
+
+  def find_objects_for_index
+    @objects ||= model_class.readable_by(*@read_users)
+    apply_where_limit_order_params
+  end
+
+  def apply_filters model_class=nil
+    model_class ||= self.model_class
+    ft = record_filters @filters, model_class
+    if ft[:cond_out].any?
+      @objects = @objects.where('(' + ft[:cond_out].join(') AND (') + ')',
+                                *ft[:param_out])
+    end
+  end
+
+  def apply_where_limit_order_params *args
+    apply_filters *args
+
+    ar_table_name = @objects.table_name
+    if @where.is_a? Hash and @where.any?
+      conditions = ['1=1']
+      @where.each do |attr,value|
+        if attr.to_s == 'any'
+          if value.is_a?(Array) and
+              value.length == 2 and
+              value[0] == 'contains' then
+            ilikes = []
+            model_class.searchable_columns('ilike').each do |column|
+              # Including owner_uuid in an "any column" search will
+              # probably just return a lot of false positives.
+              next if column == 'owner_uuid'
+              ilikes << "#{ar_table_name}.#{column} ilike ?"
+              conditions << "%#{value[1]}%"
+            end
+            if ilikes.any?
+              conditions[0] << ' and (' + ilikes.join(' or ') + ')'
+            end
+          end
+        elsif attr.to_s.match(/^[a-z][_a-z0-9]+$/) and
+            model_class.columns.collect(&:name).index(attr.to_s)
+          if value.nil?
+            conditions[0] << " and #{ar_table_name}.#{attr} is ?"
+            conditions << nil
+          elsif value.is_a? Array
+            if value[0] == 'contains' and value.length == 2
+              conditions[0] << " and #{ar_table_name}.#{attr} like ?"
+              conditions << "%#{value[1]}%"
+            else
+              conditions[0] << " and #{ar_table_name}.#{attr} in (?)"
+              conditions << value
+            end
+          elsif value.is_a? String or value.is_a? Fixnum or value == true or value == false
+            conditions[0] << " and #{ar_table_name}.#{attr}=?"
+            conditions << value
+          elsif value.is_a? Hash
+            # Not quite the same thing as "equal?" but better than nothing?
+            value.each do |k,v|
+              if v.is_a? String
+                conditions[0] << " and #{ar_table_name}.#{attr} ilike ?"
+                conditions << "%#{k}%#{v}%"
+              end
+            end
+          end
+        end
+      end
+      if conditions.length > 1
+        conditions[0].sub!(/^1=1 and /, '')
+        @objects = @objects.
+          where(*conditions)
+      end
+    end
+
+    if @select
+      unless action_name.in? %w(create update destroy)
+        # Map attribute names in @select to real column names, resolve
+        # those to fully-qualified SQL column names, and pass the
+        # resulting string to the select method.
+        api_column_map = model_class.attributes_required_columns
+        columns_list = @select.
+          flat_map { |attr| api_column_map[attr] }.
+          uniq.
+          map { |s| "#{table_name}.#{ActiveRecord::Base.connection.quote_column_name s}" }
+        @objects = @objects.select(columns_list.join(", "))
+      end
+
+      # This information helps clients understand what they're seeing
+      # (Workbench always expects it), but they can't select it explicitly
+      # because it's not an SQL column.  Always add it.
+      # (This is harmless, given that clients can deduce what they're
+      # looking at by the returned UUID anyway.)
+      @select |= ["kind"]
+    end
+    @objects = @objects.order(@orders.join ", ") if @orders.any?
+    @objects = @objects.limit(@limit)
+    @objects = @objects.offset(@offset)
+    @objects = @objects.uniq(@distinct) if not @distinct.nil?
+  end
+
+  def resource_attrs
+    return @attrs if @attrs
+    @attrs = params[resource_name]
+    if @attrs.is_a? String
+      @attrs = Oj.load @attrs, symbol_keys: true
+    end
+    unless @attrs.is_a? Hash
+      message = "No #{resource_name}"
+      if resource_name.index('_')
+        message << " (or #{resource_name.camelcase(:lower)})"
+      end
+      message << " hash provided with request"
+      raise ArgumentError.new(message)
+    end
+    %w(created_at modified_by_client_uuid modified_by_user_uuid modified_at).each do |x|
+      @attrs.delete x.to_sym
+    end
+    @attrs = @attrs.symbolize_keys if @attrs.is_a? HashWithIndifferentAccess
+    @attrs
+  end
+
+  # Authentication
+  def load_read_auths
+    @read_auths = []
+    if current_api_client_authorization
+      @read_auths << current_api_client_authorization
+    end
+    # Load reader tokens if this is a read request.
+    # If there are too many reader tokens, assume the request is malicious
+    # and ignore it.
+    if request.get? and params[:reader_tokens] and
+        params[:reader_tokens].size < 100
+      @read_auths += ApiClientAuthorization
+        .includes(:user)
+        .where('api_token IN (?) AND
+                (expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP)',
+               params[:reader_tokens])
+        .all
+    end
+    @read_auths.select! { |auth| auth.scopes_allow_request? request }
+    @read_users = @read_auths.map { |auth| auth.user }.uniq
+  end
+
+  def require_login
+    if not current_user
+      respond_to do |format|
+        format.json { send_error("Not logged in", status: 401) }
+        format.html { redirect_to '/auth/joshid' }
+      end
+      false
+    end
+  end
+
+  def admin_required
+    unless current_user and current_user.is_admin
+      send_error("Forbidden", status: 403)
+    end
+  end
+
+  def require_auth_scope
+    if @read_auths.empty?
+      if require_login != false
+        send_error("Forbidden", status: 403)
+      end
+      false
+    end
+  end
+
+  def set_cors_headers
+    response.headers['Access-Control-Allow-Origin'] = '*'
+    response.headers['Access-Control-Allow-Methods'] = 'GET, HEAD, PUT, POST, DELETE'
+    response.headers['Access-Control-Allow-Headers'] = 'Authorization'
+    response.headers['Access-Control-Max-Age'] = '86486400'
+  end
+
+  def respond_with_json_by_default
+    html_index = request.accepts.index(Mime::HTML)
+    if html_index.nil? or request.accepts[0...html_index].include?(Mime::JSON)
+      request.format = :json
+    end
+  end
+
+  def model_class
+    controller_name.classify.constantize
+  end
+
+  def resource_name             # params[] key used by client
+    controller_name.singularize
+  end
+
+  def table_name
+    controller_name
+  end
+
+  def find_object_by_uuid
+    if params[:id] and params[:id].match /\D/
+      params[:uuid] = params.delete :id
+    end
+    @where = { uuid: params[:uuid] }
+    @offset = 0
+    @limit = 1
+    @orders = []
+    @filters = []
+    @objects = nil
+    find_objects_for_index
+    @object = @objects.first
+  end
+
+  def reload_object_before_update
+    # This is necessary to prevent an ActiveRecord::ReadOnlyRecord
+    # error when updating an object which was retrieved using a join.
+    if @object.andand.readonly?
+      @object = model_class.find_by_uuid(@objects.first.uuid)
+    end
+  end
+
+  def load_json_value(hash, key, must_be_class=nil)
+    if hash[key].is_a? String
+      hash[key] = Oj.load(hash[key], symbol_keys: false)
+      if must_be_class and !hash[key].is_a? must_be_class
+        raise TypeError.new("parameter #{key.to_s} must be a #{must_be_class.to_s}")
+      end
+    end
+  end
+
+  def self.accept_attribute_as_json(attr, must_be_class=nil)
+    before_filter lambda { accept_attribute_as_json attr, must_be_class }
+  end
+  accept_attribute_as_json :properties, Hash
+  accept_attribute_as_json :info, Hash
+  def accept_attribute_as_json(attr, must_be_class)
+    if params[resource_name] and resource_attrs.is_a? Hash
+      if resource_attrs[attr].is_a? Hash
+        # Convert symbol keys to strings (in hashes provided by
+        # resource_attrs)
+        resource_attrs[attr] = resource_attrs[attr].
+          with_indifferent_access.to_hash
+      else
+        load_json_value(resource_attrs, attr, must_be_class)
+      end
+    end
+  end
+
+  def self.accept_param_as_json(key, must_be_class=nil)
+    prepend_before_filter lambda { load_json_value(params, key, must_be_class) }
+  end
+  accept_param_as_json :reader_tokens, Array
+
+  def render_list
+    @object_list = {
+      :kind  => "arvados##{(@response_resource_name || resource_name).camelize(:lower)}List",
+      :etag => "",
+      :self_link => "",
+      :offset => @offset,
+      :limit => @limit,
+      :items => @objects.as_api_response(nil, {select: @select})
+    }
+    if @objects.respond_to? :except
+      @object_list[:items_available] = @objects.
+        except(:limit).except(:offset).
+        count(:id, distinct: true)
+    end
+    render json: @object_list
+  end
+
+  def remote_ip
+    # Caveat: this is highly dependent on the proxy setup. YMMV.
+    if request.headers.has_key?('HTTP_X_REAL_IP') then
+      # We're behind a reverse proxy
+      @remote_ip = request.headers['HTTP_X_REAL_IP']
+    else
+      # Hopefully, we are not!
+      @remote_ip = request.env['REMOTE_ADDR']
+    end
+  end
+
+  def load_required_parameters
+    (self.class.send "_#{params[:action]}_requires_parameters" rescue {}).
+      each do |key, info|
+      if info[:required] and not params.include?(key)
+        raise ArgumentError.new("#{key} parameter is required")
+      elsif info[:type] == 'boolean'
+        # Make sure params[key] is either true or false -- not a
+        # string, not nil, etc.
+        if not params.include?(key)
+          params[key] = info[:default]
+        elsif [false, 'false', '0', 0].include? params[key]
+          params[key] = false
+        elsif [true, 'true', '1', 1].include? params[key]
+          params[key] = true
+        else
+          raise TypeError.new("#{key} parameter must be a boolean, true or false")
+        end
+      end
+    end
+    true
+  end
+
+  def self._create_requires_parameters
+    {
+      ensure_unique_name: {
+        type: "boolean",
+        description: "Adjust name to ensure uniqueness instead of returning an error on (owner_uuid, name) collision.",
+        location: "query",
+        required: false,
+        default: false
+      }
+    }
+  end
+
+  def self._index_requires_parameters
+    {
+      filters: { type: 'array', required: false },
+      where: { type: 'object', required: false },
+      order: { type: 'array', required: false },
+      select: { type: 'array', required: false },
+      distinct: { type: 'boolean', required: false },
+      limit: { type: 'integer', required: false, default: DEFAULT_LIMIT },
+      offset: { type: 'integer', required: false, default: 0 },
+    }
+  end
+
+  def client_accepts_plain_text_stream
+    (request.headers['Accept'].split(' ') &
+     ['text/plain', '*/*']).count > 0
+  end
+
+  def render *opts
+    if opts.first
+      response = opts.first[:json]
+      if response.is_a?(Hash) &&
+          params[:_profile] &&
+          Thread.current[:request_starttime]
+        response[:_profile] = {
+          request_time: Time.now - Thread.current[:request_starttime]
+        }
+      end
+    end
+    super *opts
+  end
+
+  def select_theme
+    return Rails.configuration.arvados_theme
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb b/services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb
new file mode 100644 (file)
index 0000000..f365a7f
--- /dev/null
@@ -0,0 +1,89 @@
+class Arvados::V1::ApiClientAuthorizationsController < ApplicationController
+  accept_attribute_as_json :scopes, Array
+  before_filter :current_api_client_is_trusted
+  before_filter :admin_required, :only => :create_system_auth
+  skip_before_filter :render_404_if_no_object, :only => :create_system_auth
+
+  def self._create_system_auth_requires_parameters
+    {
+      api_client_id: {type: 'integer', required: false},
+      scopes: {type: 'array', required: false}
+    }
+  end
+  def create_system_auth
+    @object = ApiClientAuthorization.
+      new(user_id: system_user.id,
+          api_client_id: params[:api_client_id] || current_api_client.andand.id,
+          created_by_ip_address: remote_ip,
+          scopes: Oj.load(params[:scopes] || '["all"]'))
+    @object.save!
+    show
+  end
+
+  def create
+    # Note: the user could specify a owner_uuid for a different user, which on
+    # the surface appears to be a security hole.  However, the record will be
+    # rejected before being saved to the database by the ApiClientAuthorization
+    # model which enforces that user_id == current user or the user is an
+    # admin.
+
+    if resource_attrs[:owner_uuid]
+      # The model has an owner_id attribute instead of owner_uuid, but
+      # we can't expect the client to know the local numeric ID. We
+      # translate UUID to numeric ID here.
+      resource_attrs[:user_id] =
+        User.where(uuid: resource_attrs.delete(:owner_uuid)).first.andand.id
+    elsif not resource_attrs[:user_id]
+      resource_attrs[:user_id] = current_user.id
+    end
+    resource_attrs[:api_client_id] = Thread.current[:api_client].id
+    super
+  end
+
+  protected
+
+  def default_orders
+    ["#{table_name}.created_at desc"]
+  end
+
+  def find_objects_for_index
+    # Here we are deliberately less helpful about searching for client
+    # authorizations.  We look up tokens belonging to the current user
+    # and filter by exact matches on api_token and scopes.
+    wanted_scopes = []
+    if @filters
+      wanted_scopes.concat(@filters.map { |attr, operator, operand|
+        ((attr == 'scopes') and (operator == '=')) ? operand : nil
+      })
+      @filters.select! { |attr, operator, operand|
+        (attr == 'uuid') and (operator == '=')
+      }
+    end
+    if @where
+      wanted_scopes << @where['scopes']
+      @where.select! { |attr, val| attr == 'uuid' }
+    end
+    @objects = model_class.
+      includes(:user, :api_client).
+      where('user_id=?', current_user.id)
+    super
+    wanted_scopes.compact.each do |scope_list|
+      sorted_scopes = scope_list.sort
+      @objects = @objects.select { |auth| auth.scopes.sort == sorted_scopes }
+    end
+  end
+
+  def find_object_by_uuid
+    # Again, to make things easier for the client and our own routing,
+    # here we look for the api_token key in a "uuid" (POST) or "id"
+    # (GET) parameter.
+    @object = model_class.where('api_token=?', params[:uuid] || params[:id]).first
+  end
+
+  def current_api_client_is_trusted
+    unless Thread.current[:api_client].andand.is_trusted
+      send_error('Forbidden: this API client cannot manipulate other clients\' access tokens.',
+                 status: 403)
+    end
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/api_clients_controller.rb b/services/api/app/controllers/arvados/v1/api_clients_controller.rb
new file mode 100644 (file)
index 0000000..d8f52b5
--- /dev/null
@@ -0,0 +1,3 @@
+class Arvados::V1::ApiClientsController < ApplicationController
+  before_filter :admin_required
+end
diff --git a/services/api/app/controllers/arvados/v1/authorized_keys_controller.rb b/services/api/app/controllers/arvados/v1/authorized_keys_controller.rb
new file mode 100644 (file)
index 0000000..d9d2010
--- /dev/null
@@ -0,0 +1,2 @@
+class Arvados::V1::AuthorizedKeysController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/collections_controller.rb b/services/api/app/controllers/arvados/v1/collections_controller.rb
new file mode 100644 (file)
index 0000000..54f0982
--- /dev/null
@@ -0,0 +1,192 @@
+require "arvados/keep"
+
+class Arvados::V1::CollectionsController < ApplicationController
+  def create
+    if resource_attrs[:uuid] and (loc = Keep::Locator.parse(resource_attrs[:uuid]))
+      resource_attrs[:portable_data_hash] = loc.to_s
+      resource_attrs.delete :uuid
+    end
+    super
+  end
+
+  def find_object_by_uuid
+    if loc = Keep::Locator.parse(params[:id])
+      loc.strip_hints!
+      if c = Collection.readable_by(*@read_users).where({ portable_data_hash: loc.to_s }).limit(1).first
+        @object = {
+          uuid: c.portable_data_hash,
+          portable_data_hash: c.portable_data_hash,
+          manifest_text: c.signed_manifest_text,
+        }
+      end
+    else
+      super
+    end
+    true
+  end
+
+  def show
+    if @object.is_a? Collection
+      super
+    else
+      render json: @object
+    end
+  end
+
+  def index
+    super
+  end
+
+  def find_collections(visited, sp, &b)
+    case sp
+    when ArvadosModel
+      sp.class.columns.each do |c|
+        find_collections(visited, sp[c.name.to_sym], &b) if c.name != "log"
+      end
+    when Hash
+      sp.each do |k, v|
+        find_collections(visited, v, &b)
+      end
+    when Array
+      sp.each do |v|
+        find_collections(visited, v, &b)
+      end
+    when String
+      if m = /[a-f0-9]{32}\+\d+/.match(sp)
+        yield m[0], nil
+      elsif m = Collection.uuid_regex.match(sp)
+        yield nil, m[0]
+      end
+    end
+  end
+
+  def search_edges(visited, uuid, direction)
+    if uuid.nil? or uuid.empty? or visited[uuid]
+      return
+    end
+
+    if loc = Keep::Locator.parse(uuid)
+      loc.strip_hints!
+      return if visited[loc.to_s]
+    end
+
+    logger.debug "visiting #{uuid}"
+
+    if loc
+      # uuid is a portable_data_hash
+      collections = Collection.readable_by(*@read_users).where(portable_data_hash: loc.to_s)
+      c = collections.limit(2).all
+      if c.size == 1
+        visited[loc.to_s] = c[0]
+      elsif c.size > 1
+        name = collections.limit(1).where("name <> ''").first
+        if name
+          visited[loc.to_s] = {
+            portable_data_hash: c[0].portable_data_hash,
+            name: "#{name.name} + #{collections.count-1} more"
+          }
+        else
+          visited[loc.to_s] = {
+            portable_data_hash: c[0].portable_data_hash,
+            name: loc.to_s
+          }
+        end
+      end
+
+      if direction == :search_up
+        # Search upstream for jobs where this locator is the output of some job
+        Job.readable_by(*@read_users).where(output: loc.to_s).each do |job|
+          search_edges(visited, job.uuid, :search_up)
+        end
+
+        Job.readable_by(*@read_users).where(log: loc.to_s).each do |job|
+          search_edges(visited, job.uuid, :search_up)
+        end
+      elsif direction == :search_down
+        if loc.to_s == "d41d8cd98f00b204e9800998ecf8427e+0"
+          # Special case, don't follow the empty collection.
+          return
+        end
+
+        # Search downstream for jobs where this locator is in script_parameters
+        Job.readable_by(*@read_users).where(["jobs.script_parameters like ?", "%#{loc.to_s}%"]).each do |job|
+          search_edges(visited, job.uuid, :search_down)
+        end
+
+        Job.readable_by(*@read_users).where(["jobs.docker_image_locator = ?", "#{loc.to_s}"]).each do |job|
+          search_edges(visited, job.uuid, :search_down)
+        end
+      end
+    else
+      # uuid is a regular Arvados UUID
+      rsc = ArvadosModel::resource_class_for_uuid uuid
+      if rsc == Job
+        Job.readable_by(*@read_users).where(uuid: uuid).each do |job|
+          visited[uuid] = job.as_api_response
+          if direction == :search_up
+            # Follow upstream collections referenced in the script parameters
+            find_collections(visited, job) do |hash, uuid|
+              search_edges(visited, hash, :search_up) if hash
+              search_edges(visited, uuid, :search_up) if uuid
+            end
+          elsif direction == :search_down
+            # Follow downstream job output
+            search_edges(visited, job.output, direction)
+          end
+        end
+      elsif rsc == Collection
+        if c = Collection.readable_by(*@read_users).where(uuid: uuid).limit(1).first
+          search_edges(visited, c.portable_data_hash, direction)
+          visited[c.portable_data_hash] = c.as_api_response
+        end
+      elsif rsc != nil
+        rsc.where(uuid: uuid).each do |r|
+          visited[uuid] = r.as_api_response
+        end
+      end
+    end
+
+    if direction == :search_up
+      # Search for provenance links pointing to the current uuid
+      Link.readable_by(*@read_users).
+        where(head_uuid: uuid, link_class: "provenance").
+        each do |link|
+        visited[link.uuid] = link.as_api_response
+        search_edges(visited, link.tail_uuid, direction)
+      end
+    elsif direction == :search_down
+      # Search for provenance links emanating from the current uuid
+      Link.readable_by(current_user).
+        where(tail_uuid: uuid, link_class: "provenance").
+        each do |link|
+        visited[link.uuid] = link.as_api_response
+        search_edges(visited, link.head_uuid, direction)
+      end
+    end
+  end
+
+  def provenance
+    visited = {}
+    search_edges(visited, @object[:portable_data_hash], :search_up)
+    search_edges(visited, @object[:uuid], :search_up)
+    render json: visited
+  end
+
+  def used_by
+    visited = {}
+    search_edges(visited, @object[:uuid], :search_down)
+    search_edges(visited, @object[:portable_data_hash], :search_down)
+    render json: visited
+  end
+
+  protected
+
+  def load_limit_offset_order_params *args
+    if action_name == 'index'
+      # Omit manifest_text from index results unless expressly selected.
+      @select ||= model_class.api_accessible_attributes(:user).
+        map { |attr_spec| attr_spec.first.to_s } - ["manifest_text"]
+    end
+    super
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb
new file mode 100644 (file)
index 0000000..b88f254
--- /dev/null
@@ -0,0 +1,124 @@
+class Arvados::V1::GroupsController < ApplicationController
+
+  def self._contents_requires_parameters
+    _index_requires_parameters.
+      merge({
+              uuid: {
+                type: 'string', required: false, default: nil
+              },
+              # include_linked returns name links, which are obsolete, so
+              # remove it when clients have been migrated.
+              include_linked: {
+                type: 'boolean', required: false, default: false
+              },
+            })
+  end
+
+  def render_404_if_no_object
+    if params[:action] == 'contents'
+      if !params[:uuid]
+        # OK!
+        @object = nil
+        true
+      elsif @object
+        # Project group
+        true
+      elsif (@object = User.where(uuid: params[:uuid]).first)
+        # "Home" pseudo-project
+        true
+      else
+        super
+      end
+    else
+      super
+    end
+  end
+
+  def contents
+    # Set @objects:
+    # include_linked returns name links, which are obsolete, so
+    # remove it when clients have been migrated.
+    load_searchable_objects(owner_uuid: @object.andand.uuid,
+                            include_linked: params[:include_linked])
+    sql = 'link_class=? and head_uuid in (?)'
+    sql_params = ['name', @objects.collect(&:uuid)]
+    if @object
+      sql += ' and tail_uuid=?'
+      sql_params << @object.uuid
+    end
+    @links = Link.where sql, *sql_params
+    @object_list = {
+      :kind  => "arvados#objectList",
+      :etag => "",
+      :self_link => "",
+      :links => @links.as_api_response(nil),
+      :offset => @offset,
+      :limit => @limit,
+      :items_available => @items_available,
+      :items => @objects.as_api_response(nil)
+    }
+    render json: @object_list
+  end
+
+  protected
+
+  def load_searchable_objects opts
+    all_objects = []
+    @items_available = 0
+
+    # Trick apply_where_limit_order_params into applying suitable
+    # per-table values. *_all are the real ones we'll apply to the
+    # aggregate set.
+    limit_all = @limit
+    offset_all = @offset
+    # save the orders from the current request as determined by load_param,
+    # but otherwise discard them because we're going to be getting objects
+    # from many models
+    request_orders = @orders.clone
+    @orders = []
+
+    [Group,
+     Job, PipelineInstance, PipelineTemplate,
+     Collection,
+     Human, Specimen, Trait].each do |klass|
+      @objects = klass.readable_by(*@read_users)
+      if klass == Group
+        @objects = @objects.where(group_class: 'project')
+      end
+      if opts[:owner_uuid]
+        conds = []
+        cond_params = []
+        conds << "#{klass.table_name}.owner_uuid = ?"
+        cond_params << opts[:owner_uuid]
+        if conds.any?
+          cond_sql = '(' + conds.join(') OR (') + ')'
+          @objects = @objects.where(cond_sql, *cond_params)
+        end
+      end
+
+      # If the currently requested orders specifically match the table_name for the current klass, apply the order
+      request_order = request_orders && request_orders.find{ |r| r =~ /^#{klass.table_name}\./i }
+      if request_order
+        @objects = @objects.order(request_order)
+      else
+        # default to created_at desc, ignoring any currently requested ordering because it doesn't apply to this klass
+        @objects = @objects.order("#{klass.table_name}.created_at desc")
+      end
+
+      @limit = limit_all - all_objects.count
+      apply_where_limit_order_params klass
+      klass_items_available = @objects.
+        except(:limit).except(:offset).
+        count(:id, distinct: true)
+      @items_available += klass_items_available
+      @offset = [@offset - klass_items_available, 0].max
+
+      all_objects += @objects.to_a
+    end
+
+    @objects = all_objects
+    @limit = limit_all
+    @offset = offset_all
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/humans_controller.rb b/services/api/app/controllers/arvados/v1/humans_controller.rb
new file mode 100644 (file)
index 0000000..61cfe69
--- /dev/null
@@ -0,0 +1,2 @@
+class Arvados::V1::HumansController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/job_tasks_controller.rb b/services/api/app/controllers/arvados/v1/job_tasks_controller.rb
new file mode 100644 (file)
index 0000000..434550c
--- /dev/null
@@ -0,0 +1,3 @@
+class Arvados::V1::JobTasksController < ApplicationController
+  accept_attribute_as_json :parameters, Hash
+end
diff --git a/services/api/app/controllers/arvados/v1/jobs_controller.rb b/services/api/app/controllers/arvados/v1/jobs_controller.rb
new file mode 100644 (file)
index 0000000..bd6cbd0
--- /dev/null
@@ -0,0 +1,278 @@
+class Arvados::V1::JobsController < ApplicationController
+  accept_attribute_as_json :script_parameters, Hash
+  accept_attribute_as_json :runtime_constraints, Hash
+  accept_attribute_as_json :tasks_summary, Hash
+  skip_before_filter :find_object_by_uuid, :only => [:queue, :queue_size]
+  skip_before_filter :render_404_if_no_object, :only => [:queue, :queue_size]
+
+  def create
+    [:repository, :script, :script_version, :script_parameters].each do |r|
+      if !resource_attrs[r]
+        return send_error("#{r} attribute must be specified",
+                          status: :unprocessable_entity)
+      end
+    end
+
+    # We used to ask for the minimum_, exclude_, and no_reuse params
+    # in the job resource. Now we advertise them as flags that alter
+    # the behavior of the create action.
+    [:minimum_script_version, :exclude_script_versions].each do |attr|
+      if resource_attrs.has_key? attr
+        params[attr] = resource_attrs.delete attr
+      end
+    end
+    if resource_attrs.has_key? :no_reuse
+      params[:find_or_create] = !resource_attrs.delete(:no_reuse)
+    end
+
+    if params[:find_or_create]
+      return if false.equal?(load_filters_param)
+      if @filters.empty?  # Translate older creation parameters into filters.
+        @filters =
+          [["repository", "=", resource_attrs[:repository]],
+           ["script", "=", resource_attrs[:script]],
+           ["script_version", "in git",
+            params[:minimum_script_version] || resource_attrs[:script_version]],
+           ["script_version", "not in git", params[:exclude_script_versions]],
+          ].reject { |filter| filter.last.nil? or filter.last.empty? }
+        if image_search = resource_attrs[:runtime_constraints].andand["docker_image"]
+          if image_tag = resource_attrs[:runtime_constraints]["docker_image_tag"]
+            image_search += ":#{image_tag}"
+          end
+          @filters.append(["docker_image_locator", "in docker", image_search])
+        else
+          @filters.append(["docker_image_locator", "=", nil])
+        end
+        if sdk_version = resource_attrs[:runtime_constraints].andand["arvados_sdk_version"]
+          @filters.append(["arvados_sdk_version", "in git", sdk_version])
+        end
+        begin
+          load_job_specific_filters
+        rescue ArgumentError => error
+          return send_error(error.message)
+        end
+      end
+
+      # Check specified filters for some reasonableness.
+      filter_names = @filters.map { |f| f.first }.uniq
+      ["repository", "script"].each do |req_filter|
+        if not filter_names.include?(req_filter)
+          return send_error("#{req_filter} filter required")
+        end
+      end
+
+      # Search for a reusable Job, and return it if found.
+      @objects = Job.readable_by(current_user)
+      apply_filters
+      @object = nil
+      incomplete_job = nil
+      @objects.each do |j|
+        if j.nondeterministic != true and
+            ["Queued", "Running", "Complete"].include?(j.state) and
+            j.script_parameters == resource_attrs[:script_parameters]
+          if j.state != "Complete" && j.owner_uuid == current_user.uuid
+            # We'll use this if we don't find a job that has completed
+            incomplete_job ||= j
+          else
+            if Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
+              # Record the first job in the list
+              if !@object
+                @object = j
+              end
+              # Ensure that all candidate jobs actually did produce the same output
+              if @object.output != j.output
+                @object = nil
+                break
+              end
+            end
+          end
+        end
+        @object ||= incomplete_job
+        if @object
+          return show
+        end
+      end
+    end
+
+    super
+  end
+
+  def cancel
+    reload_object_before_update
+    @object.update_attributes! state: Job::Cancelled
+    show
+  end
+
+  def lock
+    @object.lock current_user.uuid
+    show
+  end
+
+  class LogStreamer
+    Q_UPDATE_INTERVAL = 12
+    def initialize(job, opts={})
+      @job = job
+      @opts = opts
+    end
+    def each
+      if @job.finished_at
+        yield "#{@job.uuid} finished at #{@job.finished_at}\n"
+        return
+      end
+      while not @job.started_at
+        # send a summary (job queue + available nodes) to the client
+        # every few seconds while waiting for the job to start
+        last_ack_at ||= Time.now - Q_UPDATE_INTERVAL - 1
+        if Time.now - last_ack_at >= Q_UPDATE_INTERVAL
+          nodes_in_state = {idle: 0, alloc: 0}
+          ActiveRecord::Base.uncached do
+            Node.where('hostname is not ?', nil).collect do |n|
+              if n.info[:slurm_state]
+                nodes_in_state[n.info[:slurm_state]] ||= 0
+                nodes_in_state[n.info[:slurm_state]] += 1
+              end
+            end
+          end
+          job_queue = Job.queue
+          n_queued_before_me = 0
+          job_queue.each do |j|
+            break if j.uuid == @job.uuid
+            n_queued_before_me += 1
+          end
+          yield "#{Time.now}" \
+            " job #{@job.uuid}" \
+            " queue_position #{n_queued_before_me}" \
+            " queue_size #{job_queue.size}" \
+            " nodes_idle #{nodes_in_state[:idle]}" \
+            " nodes_alloc #{nodes_in_state[:alloc]}\n"
+          last_ack_at = Time.now
+        end
+        sleep 3
+        ActiveRecord::Base.uncached do
+          @job.reload
+        end
+      end
+    end
+  end
+
+  def queue
+    params[:order] ||= ['priority desc', 'created_at']
+    load_limit_offset_order_params
+    load_where_param
+    @where.merge!({state: Job::Queued})
+    return if false.equal?(load_filters_param)
+    find_objects_for_index
+    index
+  end
+
+  def queue_size
+    # Users may not be allowed to see all the jobs in the queue, so provide a
+    # method to get just the queue size in order to get a gist of how busy the
+    # cluster is.
+    render :json => {:queue_size => Job.queue.size}
+  end
+
+  def self._create_requires_parameters
+    (super rescue {}).
+      merge({
+              find_or_create: {
+                type: 'boolean', required: false, default: false
+              },
+              filters: {
+                type: 'array', required: false
+              },
+              minimum_script_version: {
+                type: 'string', required: false
+              },
+              exclude_script_versions: {
+                type: 'array', required: false
+              },
+            })
+  end
+
+  def self._queue_requires_parameters
+    self._index_requires_parameters
+  end
+
+  protected
+
+  def load_job_specific_filters
+    # Convert Job-specific @filters entries into general SQL filters.
+    script_info = {"repository" => nil, "script" => nil}
+    git_filters = Hash.new do |hash, key|
+      hash[key] = {"max_version" => "HEAD", "exclude_versions" => []}
+    end
+    @filters.select! do |(attr, operator, operand)|
+      if (script_info.has_key? attr) and (operator == "=")
+        if script_info[attr].nil?
+          script_info[attr] = operand
+        elsif script_info[attr] != operand
+          raise ArgumentError.new("incompatible #{attr} filters")
+        end
+      end
+      case operator
+      when "in git"
+        git_filters[attr]["min_version"] = operand
+        false
+      when "not in git"
+        git_filters[attr]["exclude_versions"] += Array.wrap(operand)
+        false
+      when "in docker", "not in docker"
+        image_hashes = Array.wrap(operand).flat_map do |search_term|
+          image_search, image_tag = search_term.split(':', 2)
+          Collection.
+            find_all_for_docker_image(image_search, image_tag, @read_users).
+            map(&:portable_data_hash)
+        end
+        @filters << [attr, operator.sub(/ docker$/, ""), image_hashes]
+        false
+      else
+        true
+      end
+    end
+
+    # Build a real script_version filter from any "not? in git" filters.
+    git_filters.each_pair do |attr, filter|
+      case attr
+      when "script_version"
+        script_info.each_pair do |key, value|
+          if value.nil?
+            raise ArgumentError.new("script_version filter needs #{key} filter")
+          end
+        end
+        filter["repository"] = script_info["repository"]
+        begin
+          filter["max_version"] = resource_attrs[:script_version]
+        rescue
+          # Using HEAD, set earlier by the hash default, is fine.
+        end
+      when "arvados_sdk_version"
+        filter["repository"] = "arvados"
+      else
+        raise ArgumentError.new("unknown attribute for git filter: #{attr}")
+      end
+      version_range = Commit.find_commit_range(current_user,
+                                               filter["repository"],
+                                               filter["min_version"],
+                                               filter["max_version"],
+                                               filter["exclude_versions"])
+      if version_range.nil?
+        raise ArgumentError.
+          new("error searching #{filter['repository']} from " +
+              "'#{filter['min_version']}' to '#{filter['max_version']}', " +
+              "excluding #{filter['exclude_versions']}")
+      end
+      @filters.append([attr, "in", version_range])
+    end
+  end
+
+  def load_filters_param
+    begin
+      super
+      load_job_specific_filters
+    rescue ArgumentError => error
+      send_error(error.message)
+      false
+    end
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/keep_disks_controller.rb b/services/api/app/controllers/arvados/v1/keep_disks_controller.rb
new file mode 100644 (file)
index 0000000..e8ccf23
--- /dev/null
@@ -0,0 +1,50 @@
+class Arvados::V1::KeepDisksController < ApplicationController
+  skip_before_filter :require_auth_scope, :only => :ping
+
+  def self._ping_requires_parameters
+    {
+      uuid: {required: false},
+      ping_secret: {required: true},
+      node_uuid: {required: false},
+      filesystem_uuid: {required: false},
+      service_host: {required: false},
+      service_port: {required: true},
+      service_ssl_flag: {required: true}
+    }
+  end
+
+  def ping
+    params[:service_host] ||= request.env['REMOTE_ADDR']
+    act_as_system_user do
+      if not @object.ping params
+        return render_not_found "object not found"
+      end
+      # Render the :superuser view (i.e., include the ping_secret) even
+      # if !current_user.is_admin. This is safe because @object.ping's
+      # success implies the ping_secret was already known by the client.
+      render json: @object.as_api_response(:superuser)
+    end
+  end
+
+  def find_objects_for_index
+    # all users can list all keep disks
+    @objects = model_class.where('1=1')
+    super
+  end
+
+  def find_object_by_uuid
+    @object = KeepDisk.where(uuid: (params[:id] || params[:uuid])).first
+    if !@object && current_user.andand.is_admin
+      # Create a new KeepDisk and ping it.
+      @object = KeepDisk.new(filesystem_uuid: params[:filesystem_uuid])
+      @object.save!
+
+      # In the first ping from this new filesystem_uuid, we can't
+      # expect the keep node to know the ping_secret so we made sure
+      # we got an admin token. Here we add ping_secret to params so
+      # KeepNode.ping() understands this update is properly
+      # authenticated.
+      params[:ping_secret] = @object.ping_secret
+    end
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/keep_services_controller.rb b/services/api/app/controllers/arvados/v1/keep_services_controller.rb
new file mode 100644 (file)
index 0000000..fc2ee93
--- /dev/null
@@ -0,0 +1,21 @@
+class Arvados::V1::KeepServicesController < ApplicationController
+
+  skip_before_filter :find_object_by_uuid, only: :accessible
+  skip_before_filter :render_404_if_no_object, only: :accessible
+
+  def find_objects_for_index
+    # all users can list all keep services
+    @objects = model_class.where('1=1')
+    super
+  end
+
+  def accessible
+    if request.headers['X-External-Client'] == '1'
+      @objects = model_class.where('service_type=?', 'proxy')
+    else
+      @objects = model_class.where('service_type=?', 'disk')
+    end
+    render_list
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/links_controller.rb b/services/api/app/controllers/arvados/v1/links_controller.rb
new file mode 100644 (file)
index 0000000..798217d
--- /dev/null
@@ -0,0 +1,98 @@
+class Arvados::V1::LinksController < ApplicationController
+
+  def check_uuid_kind uuid, kind
+    if kind and ArvadosModel::resource_class_for_uuid(uuid).andand.kind != kind
+      send_error("'#{kind}' does not match uuid '#{uuid}', expected '#{ArvadosModel::resource_class_for_uuid(uuid).andand.kind}'",
+                 status: 422)
+      nil
+    else
+      true
+    end
+  end
+
+  def create
+    return if ! check_uuid_kind resource_attrs[:head_uuid], resource_attrs[:head_kind]
+    return if ! check_uuid_kind resource_attrs[:tail_uuid], resource_attrs[:tail_kind]
+
+    resource_attrs.delete :head_kind
+    resource_attrs.delete :tail_kind
+    super
+  end
+
+  def get_permissions
+    if current_user.can?(manage: @object)
+      # find all links and return them
+      @objects = Link.where(link_class: "permission",
+                            head_uuid: params[:uuid])
+      @offset = 0
+      @limit = @objects.count
+      render_list
+    else
+      render :json => { errors: ['Forbidden'] }.to_json, status: 403
+    end
+  end
+
+  protected
+
+  def find_object_by_uuid
+    if action_name == 'get_permissions'
+      # get_permissions accepts a UUID for any kind of object.
+      @object = ArvadosModel::resource_class_for_uuid(params[:uuid])
+        .readable_by(*@read_users)
+        .where(uuid: params[:uuid])
+        .first
+    else
+      super
+      if @object.nil?
+        # Normally group permission links are not readable_by users.
+        # Make an exception for users with permission to manage the group.
+        # FIXME: Solve this more generally - see the controller tests.
+        link = Link.find_by_uuid(params[:uuid])
+        if (not link.nil?) and
+            (link.link_class == "permission") and
+            (@read_users.any? { |u| u.can?(manage: link.head_uuid) })
+          @object = link
+        end
+      end
+    end
+  end
+
+  # Overrides ApplicationController load_where_param
+  def load_where_param
+    super
+
+    # head_kind and tail_kind columns are now virtual,
+    # equivilent functionality is now provided by
+    # 'is_a', so fix up any old-style 'where' clauses.
+    if @where
+      @filters ||= []
+      if @where[:head_kind]
+        @filters << ['head_uuid', 'is_a', @where[:head_kind]]
+        @where.delete :head_kind
+      end
+      if @where[:tail_kind]
+        @filters << ['tail_uuid', 'is_a', @where[:tail_kind]]
+        @where.delete :tail_kind
+      end
+    end
+  end
+
+  # Overrides ApplicationController load_filters_param
+  def load_filters_param
+    super
+
+    # head_kind and tail_kind columns are now virtual,
+    # equivilent functionality is now provided by
+    # 'is_a', so fix up any old-style 'filter' clauses.
+    @filters = @filters.map do |k|
+      if k[0] == 'head_kind' and k[1] == '='
+        ['head_uuid', 'is_a', k[2]]
+      elsif k[0] == 'tail_kind' and k[1] == '='
+        ['tail_uuid', 'is_a', k[2]]
+      else
+        k
+      end
+    end
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/logs_controller.rb b/services/api/app/controllers/arvados/v1/logs_controller.rb
new file mode 100644 (file)
index 0000000..925eee5
--- /dev/null
@@ -0,0 +1,34 @@
+class Arvados::V1::LogsController < ApplicationController
+  # Overrides ApplicationController load_where_param
+  def load_where_param
+    super
+
+    # object_kind and column is now virtual,
+    # equivilent functionality is now provided by
+    # 'is_a', so fix up any old-style 'where' clauses.
+    if @where
+      @filters ||= []
+      if @where[:object_kind]
+        @filters << ['object_uuid', 'is_a', @where[:object_kind]]
+        @where.delete :object_kind
+      end
+    end
+  end
+
+  # Overrides ApplicationController load_filters_param
+  def load_filters_param
+    super
+
+    # object_kind and column is now virtual,
+    # equivilent functionality is now provided by
+    # 'is_a', so fix up any old-style 'filter' clauses.
+    @filters = @filters.map do |k|
+      if k[0] == 'object_kind' and k[1] == '='
+        ['object_uuid', 'is_a', k[2]]
+      else
+        k
+      end
+    end
+  end
+
+end
diff --git a/services/api/app/controllers/arvados/v1/nodes_controller.rb b/services/api/app/controllers/arvados/v1/nodes_controller.rb
new file mode 100644 (file)
index 0000000..efee982
--- /dev/null
@@ -0,0 +1,59 @@
+class Arvados::V1::NodesController < ApplicationController
+  skip_before_filter :require_auth_scope, :only => :ping
+  skip_before_filter :find_object_by_uuid, :only => :ping
+  skip_before_filter :render_404_if_no_object, :only => :ping
+
+  def update
+    if resource_attrs[:job_uuid]
+      @object.job_readable = readable_job_uuids(resource_attrs[:job_uuid]).any?
+    end
+    super
+  end
+
+  def self._ping_requires_parameters
+    { ping_secret: {required: true} }
+  end
+
+  def ping
+    act_as_system_user do
+      @object = Node.where(uuid: (params[:id] || params[:uuid])).first
+      if !@object
+        return render_not_found
+      end
+      ping_data = {
+        ip: params[:local_ipv4] || request.env['REMOTE_ADDR'],
+        ec2_instance_id: params[:instance_id]
+      }
+      [:ping_secret, :total_cpu_cores, :total_ram_mb, :total_scratch_mb]
+        .each do |key|
+        ping_data[key] = params[key] if params[key]
+      end
+      @object.ping(ping_data)
+      if @object.info['ping_secret'] == params[:ping_secret]
+        render json: @object.as_api_response(:superuser)
+      else
+        raise "Invalid ping_secret after ping"
+      end
+    end
+  end
+
+  def find_objects_for_index
+    if !current_user.andand.is_admin && current_user.andand.is_active
+      # active non-admin users can list nodes that are (or were
+      # recently) working
+      @objects = model_class.where('last_ping_at >= ?', Time.now - 1.hours)
+    end
+    super
+    job_uuids = @objects.map { |n| n[:job_uuid] }.compact
+    assoc_jobs = readable_job_uuids(job_uuids)
+    @objects.each do |node|
+      node.job_readable = assoc_jobs.include?(node[:job_uuid])
+    end
+  end
+
+  protected
+
+  def readable_job_uuids(*uuids)
+    Job.readable_by(*@read_users).select(:uuid).where(uuid: uuids).map(&:uuid)
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb b/services/api/app/controllers/arvados/v1/pipeline_instances_controller.rb
new file mode 100644 (file)
index 0000000..614af68
--- /dev/null
@@ -0,0 +1,5 @@
+class Arvados::V1::PipelineInstancesController < ApplicationController
+  accept_attribute_as_json :components, Hash
+  accept_attribute_as_json :properties, Hash
+  accept_attribute_as_json :components_summary, Hash
+end
diff --git a/services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb b/services/api/app/controllers/arvados/v1/pipeline_templates_controller.rb
new file mode 100644 (file)
index 0000000..a2c6d0b
--- /dev/null
@@ -0,0 +1,3 @@
+class Arvados::V1::PipelineTemplatesController < ApplicationController
+  accept_attribute_as_json :components, Hash
+end
diff --git a/services/api/app/controllers/arvados/v1/repositories_controller.rb b/services/api/app/controllers/arvados/v1/repositories_controller.rb
new file mode 100644 (file)
index 0000000..0452c52
--- /dev/null
@@ -0,0 +1,80 @@
+class Arvados::V1::RepositoriesController < ApplicationController
+  skip_before_filter :find_object_by_uuid, :only => :get_all_permissions
+  skip_before_filter :render_404_if_no_object, :only => :get_all_permissions
+  before_filter :admin_required, :only => :get_all_permissions
+  def get_all_permissions
+    @users = {}
+    User.includes(:authorized_keys).all.each do |u|
+      @users[u.uuid] = u
+    end
+    admins = @users.select { |k,v| v.is_admin }
+    @user_aks = {}
+    @repo_info = {}
+    @repos = Repository.includes(:permissions).all
+    @repos.each do |repo|
+      gitolite_permissions = ''
+      perms = []
+      repo.permissions.each do |perm|
+        if ArvadosModel::resource_class_for_uuid(perm.tail_uuid) == Group
+          @users.each do |user_uuid, user|
+            user.group_permissions.each do |group_uuid, perm_mask|
+              if perm_mask[:manage]
+                perms << {name: 'can_manage', user_uuid: user_uuid}
+              elsif perm_mask[:write]
+                perms << {name: 'can_write', user_uuid: user_uuid}
+              elsif perm_mask[:read]
+                perms << {name: 'can_read', user_uuid: user_uuid}
+              end
+            end
+          end
+        else
+          perms << {name: perm.name, user_uuid: perm.tail_uuid}
+        end
+      end
+      # Owner of the repository, and all admins, can RW
+      ([repo.owner_uuid] + admins.keys).each do |user_uuid|
+        perms << {name: 'can_write', user_uuid: user_uuid}
+      end
+      perms.each do |perm|
+        user_uuid = perm[:user_uuid]
+        @user_aks[user_uuid] = @users[user_uuid].andand.authorized_keys.andand.
+          collect do |ak|
+          {
+            public_key: ak.public_key,
+            authorized_key_uuid: ak.uuid
+          }
+        end || []
+        if @user_aks[user_uuid].any?
+          @repo_info[repo.uuid] ||= {
+            uuid: repo.uuid,
+            name: repo.name,
+            push_url: repo.push_url,
+            fetch_url: repo.fetch_url,
+            user_permissions: {}
+          }
+          ri = (@repo_info[repo.uuid][:user_permissions][user_uuid] ||= {})
+          ri[perm[:name]] = true
+        end
+      end
+    end
+    @repo_info.values.each do |repo_users|
+      repo_users[:user_permissions].each do |user_uuid,perms|
+        if perms['can_manage']
+          perms[:gitolite_permissions] = 'RW'
+          perms['can_write'] = true
+          perms['can_read'] = true
+        elsif perms['can_write']
+          perms[:gitolite_permissions] = 'RW'
+          perms['can_read'] = true
+        elsif perms['can_read']
+          perms[:gitolite_permissions] = 'R'
+        end
+      end
+    end
+    render json: {
+      kind: 'arvados#RepositoryPermissionSnapshot',
+      repositories: @repo_info.values,
+      user_keys: @user_aks
+    }
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
new file mode 100644 (file)
index 0000000..2f7af3c
--- /dev/null
@@ -0,0 +1,396 @@
+class Arvados::V1::SchemaController < ApplicationController
+  skip_before_filter :find_objects_for_index
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+  skip_before_filter :require_auth_scope
+
+  def index
+    expires_in 24.hours, public: true
+    discovery = Rails.cache.fetch 'arvados_v1_rest_discovery' do
+      Rails.application.eager_load!
+      discovery = {
+        kind: "discovery#restDescription",
+        discoveryVersion: "v1",
+        id: "arvados:v1",
+        name: "arvados",
+        version: "v1",
+        revision: "20131114",
+        source_version: (Rails.application.config.source_version ? Rails.application.config.source_version : "No version information available") + (Rails.application.config.local_modified ? Rails.application.config.local_modified.to_s : ''),
+        generatedAt: Time.now.iso8601,
+        title: "Arvados API",
+        description: "The API to interact with Arvados.",
+        documentationLink: "http://doc.arvados.org/api/index.html",
+        protocol: "rest",
+        baseUrl: root_url + "arvados/v1/",
+        basePath: "/arvados/v1/",
+        rootUrl: root_url,
+        servicePath: "arvados/v1/",
+        batchPath: "batch",
+        defaultTrashLifetime: Rails.application.config.default_trash_lifetime,
+        parameters: {
+          alt: {
+            type: "string",
+            description: "Data format for the response.",
+            default: "json",
+            enum: [
+                   "json"
+                  ],
+            enumDescriptions: [
+                               "Responses with Content-Type of application/json"
+                              ],
+            location: "query"
+          },
+          fields: {
+            type: "string",
+            description: "Selector specifying which fields to include in a partial response.",
+            location: "query"
+          },
+          key: {
+            type: "string",
+            description: "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+            location: "query"
+          },
+          oauth_token: {
+            type: "string",
+            description: "OAuth 2.0 token for the current user.",
+            location: "query"
+          }
+        },
+        auth: {
+          oauth2: {
+            scopes: {
+              "https://api.curoverse.com/auth/arvados" => {
+                description: "View and manage objects"
+              },
+              "https://api.curoverse.com/auth/arvados.readonly" => {
+                description: "View objects"
+              }
+            }
+          }
+        },
+        schemas: {},
+        resources: {}
+      }
+
+      if Rails.application.config.websocket_address
+        discovery[:websocketUrl] = Rails.application.config.websocket_address
+      elsif ENV['ARVADOS_WEBSOCKETS']
+        discovery[:websocketUrl] = (root_url.sub /^http/, 'ws') + "websocket"
+      end
+
+      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
+        begin
+          ctl_class = "Arvados::V1::#{k.to_s.pluralize}Controller".constantize
+        rescue
+          # No controller -> no discovery.
+          next
+        end
+        object_properties = {}
+        k.columns.
+          select { |col| col.name != 'id' }.
+          collect do |col|
+          if k.serialized_attributes.has_key? col.name
+            object_properties[col.name] = {
+              type: k.serialized_attributes[col.name].object_class.to_s
+            }
+          else
+            object_properties[col.name] = {
+              type: col.type
+            }
+          end
+        end
+        discovery[:schemas][k.to_s + 'List'] = {
+          id: k.to_s + 'List',
+          description: k.to_s + ' list',
+          type: "object",
+          properties: {
+            kind: {
+              type: "string",
+              description: "Object type. Always arvados##{k.to_s.camelcase(:lower)}List.",
+              default: "arvados##{k.to_s.camelcase(:lower)}List"
+            },
+            etag: {
+              type: "string",
+              description: "List version."
+            },
+            items: {
+              type: "array",
+              description: "The list of #{k.to_s.pluralize}.",
+              items: {
+                "$ref" => k.to_s
+              }
+            },
+            next_link: {
+              type: "string",
+              description: "A link to the next page of #{k.to_s.pluralize}."
+            },
+            next_page_token: {
+              type: "string",
+              description: "The page token for the next page of #{k.to_s.pluralize}."
+            },
+            selfLink: {
+              type: "string",
+              description: "A link back to this list."
+            }
+          }
+        }
+        discovery[:schemas][k.to_s] = {
+          id: k.to_s,
+          description: k.to_s,
+          type: "object",
+          uuidPrefix: (k.respond_to?(:uuid_prefix) ? k.uuid_prefix : nil),
+          properties: {
+            uuid: {
+              type: "string",
+              description: "Object ID."
+            },
+            etag: {
+              type: "string",
+              description: "Object version."
+            }
+          }.merge(object_properties)
+        }
+        discovery[:resources][k.to_s.underscore.pluralize] = {
+          methods: {
+            get: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.get",
+              path: "#{k.to_s.underscore.pluralize}/{uuid}",
+              httpMethod: "GET",
+              description: "Gets a #{k.to_s}'s metadata by UUID.",
+              parameters: {
+                uuid: {
+                  type: "string",
+                  description: "The UUID of the #{k.to_s} in question.",
+                  required: true,
+                  location: "path"
+                }
+              },
+              parameterOrder: [
+                               "uuid"
+                              ],
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados",
+                       "https://api.curoverse.com/auth/arvados.readonly"
+                      ]
+            },
+            list: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.list",
+              path: k.to_s.underscore.pluralize,
+              httpMethod: "GET",
+              description:
+                 %|List #{k.to_s.pluralize}.
+
+                   The <code>list</code> method returns a
+                   <a href="/api/resources.html">resource list</a> of
+                   matching #{k.to_s.pluralize}. For example:
+
+                   <pre>
+                   {
+                    "kind":"arvados##{k.to_s.camelcase(:lower)}List",
+                    "etag":"",
+                    "self_link":"",
+                    "next_page_token":"",
+                    "next_link":"",
+                    "items":[
+                       ...
+                    ],
+                    "items_available":745,
+                    "_profile":{
+                     "request_time":0.157236317
+                    }
+                    </pre>|,
+              parameters: {
+                limit: {
+                  type: "integer",
+                  description: "Maximum number of #{k.to_s.underscore.pluralize} to return.",
+                  default: "100",
+                  format: "int32",
+                  minimum: "0",
+                  location: "query",
+                },
+                offset: {
+                  type: "integer",
+                  description: "Number of #{k.to_s.underscore.pluralize} to skip before first returned record.",
+                  default: "0",
+                  format: "int32",
+                  minimum: "0",
+                  location: "query",
+                  },
+                filters: {
+                  type: "array",
+                  description: "Conditions for filtering #{k.to_s.underscore.pluralize}.",
+                  location: "query"
+                },
+                where: {
+                  type: "object",
+                  description: "Conditions for filtering #{k.to_s.underscore.pluralize}. (Deprecated. Use filters instead.)",
+                  location: "query"
+                },
+                order: {
+                  type: "string",
+                  description: "Order in which to return matching #{k.to_s.underscore.pluralize}.",
+                  location: "query"
+                },
+                select: {
+                  type: "array",
+                  description: "Select which fields to return",
+                  location: "query"
+                },
+                distinct: {
+                  type: "boolean",
+                  description: "Return each distinct object",
+                  location: "query"
+                }
+              },
+              response: {
+                "$ref" => "#{k.to_s}List"
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados",
+                       "https://api.curoverse.com/auth/arvados.readonly"
+                      ]
+            },
+            create: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.create",
+              path: "#{k.to_s.underscore.pluralize}",
+              httpMethod: "POST",
+              description: "Create a new #{k.to_s}.",
+              parameters: {},
+              request: {
+                required: true,
+                properties: {
+                  k.to_s.underscore => {
+                    "$ref" => k.to_s
+                  }
+                }
+              },
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados"
+                      ]
+            },
+            update: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.update",
+              path: "#{k.to_s.underscore.pluralize}/{uuid}",
+              httpMethod: "PUT",
+              description: "Update attributes of an existing #{k.to_s}.",
+              parameters: {
+                uuid: {
+                  type: "string",
+                  description: "The UUID of the #{k.to_s} in question.",
+                  required: true,
+                  location: "path"
+                }
+              },
+              request: {
+                required: true,
+                properties: {
+                  k.to_s.underscore => {
+                    "$ref" => k.to_s
+                  }
+                }
+              },
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados"
+                      ]
+            },
+            delete: {
+              id: "arvados.#{k.to_s.underscore.pluralize}.delete",
+              path: "#{k.to_s.underscore.pluralize}/{uuid}",
+              httpMethod: "DELETE",
+              description: "Delete an existing #{k.to_s}.",
+              parameters: {
+                uuid: {
+                  type: "string",
+                  description: "The UUID of the #{k.to_s} in question.",
+                  required: true,
+                  location: "path"
+                }
+              },
+              response: {
+                "$ref" => k.to_s
+              },
+              scopes: [
+                       "https://api.curoverse.com/auth/arvados"
+                      ]
+            }
+          }
+        }
+        # Check for Rails routes that don't match the usual actions
+        # listed above
+        d_methods = discovery[:resources][k.to_s.underscore.pluralize][:methods]
+        Rails.application.routes.routes.each do |route|
+          action = route.defaults[:action]
+          httpMethod = ['GET', 'POST', 'PUT', 'DELETE'].map { |method|
+            method if route.verb.match(method)
+          }.compact.first
+          if httpMethod and
+              route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize and
+              ctl_class.action_methods.include? action
+            if !d_methods[action.to_sym]
+              method = {
+                id: "arvados.#{k.to_s.underscore.pluralize}.#{action}",
+                path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),
+                httpMethod: httpMethod,
+                description: "#{action} #{k.to_s.underscore.pluralize}",
+                parameters: {},
+                response: {
+                  "$ref" => (action == 'index' ? "#{k.to_s}List" : k.to_s)
+                },
+                scopes: [
+                         "https://api.curoverse.com/auth/arvados"
+                        ]
+              }
+              route.segment_keys.each do |key|
+                if key != :format
+                  key = :uuid if key == :id
+                  method[:parameters][key] = {
+                    type: "string",
+                    description: "",
+                    required: true,
+                    location: "path"
+                  }
+                end
+              end
+            else
+              # We already built a generic method description, but we
+              # might find some more required parameters through
+              # introspection.
+              method = d_methods[action.to_sym]
+            end
+            if ctl_class.respond_to? "_#{action}_requires_parameters".to_sym
+              ctl_class.send("_#{action}_requires_parameters".to_sym).each do |k, v|
+                if v.is_a? Hash
+                  method[:parameters][k] = v
+                else
+                  method[:parameters][k] = {}
+                end
+                if !method[:parameters][k][:default].nil?
+                  # The JAVA SDK is sensitive to all values being strings
+                  method[:parameters][k][:default] = method[:parameters][k][:default].to_s
+                end
+                method[:parameters][k][:type] ||= 'string'
+                method[:parameters][k][:description] ||= ''
+                method[:parameters][k][:location] = (route.segment_keys.include?(k) ? 'path' : 'query')
+                if method[:parameters][k][:required].nil?
+                  method[:parameters][k][:required] = v != false
+                end
+              end
+            end
+            d_methods[action.to_sym] = method
+          end
+        end
+      end
+      discovery
+    end
+    render json: discovery
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/specimens_controller.rb b/services/api/app/controllers/arvados/v1/specimens_controller.rb
new file mode 100644 (file)
index 0000000..24d3830
--- /dev/null
@@ -0,0 +1,2 @@
+class Arvados::V1::SpecimensController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/traits_controller.rb b/services/api/app/controllers/arvados/v1/traits_controller.rb
new file mode 100644 (file)
index 0000000..c4bdc5b
--- /dev/null
@@ -0,0 +1,2 @@
+class Arvados::V1::TraitsController < ApplicationController
+end
diff --git a/services/api/app/controllers/arvados/v1/user_agreements_controller.rb b/services/api/app/controllers/arvados/v1/user_agreements_controller.rb
new file mode 100644 (file)
index 0000000..32adde9
--- /dev/null
@@ -0,0 +1,84 @@
+class Arvados::V1::UserAgreementsController < ApplicationController
+  before_filter :admin_required, except: [:index, :sign, :signatures]
+  skip_before_filter :find_object_by_uuid, only: [:sign, :signatures]
+  skip_before_filter :render_404_if_no_object, only: [:sign, :signatures]
+
+  def model_class
+    Link
+  end
+
+  def table_name
+    'links'
+  end
+
+  def index
+    if not current_user.is_invited
+      # New users cannot see user agreements until/unless invited to
+      # use this installation.
+      @objects = []
+    else
+      current_user_uuid = current_user.uuid
+      act_as_system_user do
+        uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                           system_user_uuid,
+                           'signature',
+                           'require',
+                           system_user_uuid,
+                           Collection.uuid_like_pattern).
+          collect &:head_uuid
+        @objects = Collection.where('uuid in (?)', uuids)
+      end
+    end
+    @response_resource_name = 'collection'
+    super
+  end
+
+  def signatures
+    current_user_uuid = (current_user.andand.is_admin && params[:uuid]) ||
+      current_user.uuid
+    act_as_system_user do
+      @objects = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                            system_user_uuid,
+                            'signature',
+                            'click',
+                            current_user_uuid,
+                            Collection.uuid_like_pattern)
+    end
+    @response_resource_name = 'link'
+    render_list
+  end
+
+  def sign
+    current_user_uuid = current_user.uuid
+    act_as_system_user do
+      @object = Link.create(link_class: 'signature',
+                            name: 'click',
+                            tail_uuid: current_user_uuid,
+                            head_uuid: params[:uuid])
+    end
+    show
+  end
+
+  def create
+    usage_error
+  end
+  
+  def new
+    usage_error
+  end
+
+  def update
+    usage_error
+  end
+
+  def destroy
+    usage_error
+  end
+
+  protected
+  def usage_error
+    raise ArgumentError.new \
+    "Manage user agreements via Collections and Links instead."
+  end
+  
+end
diff --git a/services/api/app/controllers/arvados/v1/users_controller.rb b/services/api/app/controllers/arvados/v1/users_controller.rb
new file mode 100644 (file)
index 0000000..50ee3b0
--- /dev/null
@@ -0,0 +1,153 @@
+class Arvados::V1::UsersController < ApplicationController
+  accept_attribute_as_json :prefs, Hash
+
+  skip_before_filter :find_object_by_uuid, only:
+    [:activate, :current, :system, :setup]
+  skip_before_filter :render_404_if_no_object, only:
+    [:activate, :current, :system, :setup]
+  before_filter :admin_required, only: [:setup, :unsetup]
+
+  def current
+    @object = current_user
+    show
+  end
+  def system
+    @object = system_user
+    show
+  end
+
+  def activate
+    if current_user.andand.is_admin && params[:uuid]
+      @object = User.find params[:uuid]
+    else
+      @object = current_user
+    end
+    if not @object.is_active
+      if not (current_user.is_admin or @object.is_invited)
+        logger.warn "User #{@object.uuid} called users.activate " +
+          "but is not invited"
+        raise ArgumentError.new "Cannot activate without being invited."
+      end
+      act_as_system_user do
+        required_uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                                    system_user_uuid,
+                                    'signature',
+                                    'require',
+                                    system_user_uuid,
+                                    Collection.uuid_like_pattern).
+          collect(&:head_uuid)
+        signed_uuids = Link.where(owner_uuid: system_user_uuid,
+                                  link_class: 'signature',
+                                  name: 'click',
+                                  tail_uuid: @object.uuid,
+                                  head_uuid: required_uuids).
+          collect(&:head_uuid)
+        todo_uuids = required_uuids - signed_uuids
+        if todo_uuids.empty?
+          @object.update_attributes is_active: true
+          logger.info "User #{@object.uuid} activated"
+        else
+          logger.warn "User #{@object.uuid} called users.activate " +
+            "before signing agreements #{todo_uuids.inspect}"
+          raise ArvadosModel::PermissionDeniedError.new \
+          "Cannot activate without user agreements #{todo_uuids.inspect}."
+        end
+      end
+    end
+    show
+  end
+
+  # create user object and all the needed links
+  def setup
+    @object = nil
+    if params[:uuid]
+      @object = User.find_by_uuid params[:uuid]
+      if !@object
+        return render_404_if_no_object
+      end
+      object_found = true
+    else
+      if !params[:user]
+        raise ArgumentError.new "Required uuid or user"
+      else
+        if params[:user]['uuid']
+          @object = User.find_by_uuid params[:user]['uuid']
+          if @object
+            object_found = true
+          end
+        end
+
+        if !@object
+          if !params[:user]['email']
+            raise ArgumentError.new "Require user email"
+          end
+
+          if !params[:openid_prefix]
+            raise ArgumentError.new "Required openid_prefix parameter is missing."
+          end
+
+          @object = model_class.create! resource_attrs
+        end
+      end
+    end
+
+    if object_found
+      @response = @object.setup_repo_vm_links params[:repo_name],
+                    params[:vm_uuid], params[:openid_prefix]
+    else
+      @response = User.setup @object, params[:openid_prefix],
+                    params[:repo_name], params[:vm_uuid]
+    end
+
+    # setup succeeded. send email to user
+    if params[:send_notification_email] == true || params[:send_notification_email] == 'true'
+      UserNotifier.account_is_setup(@object).deliver
+    end
+
+    render json: { kind: "arvados#HashList", items: @response.as_api_response(nil) }
+  end
+
+  # delete user agreements, vm, repository, login links; set state to inactive
+  def unsetup
+    reload_object_before_update
+    @object.unsetup
+    show
+  end
+
+  protected
+
+  def self._setup_requires_parameters
+    {
+      user: {
+        type: 'object', required: false
+      },
+      openid_prefix: {
+        type: 'string', required: false
+      },
+      repo_name: {
+        type: 'string', required: false
+      },
+      vm_uuid: {
+        type: 'string', required: false
+      },
+      send_notification_email: {
+        type: 'boolean', required: false, default: false
+      },
+    }
+  end
+
+  def apply_filters
+    return super if @read_users.any? &:is_admin
+    if params[:uuid] != current_user.andand.uuid
+      # Non-admin index/show returns very basic information about readable users.
+      safe_attrs = ["uuid", "is_active", "email", "first_name", "last_name"]
+      if @select
+        @select = @select & safe_attrs
+      else
+        @select = safe_attrs
+      end
+      @filters += [['is_active', '=', true]]
+    end
+    super
+  end
+end
diff --git a/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb b/services/api/app/controllers/arvados/v1/virtual_machines_controller.rb
new file mode 100644 (file)
index 0000000..e176348
--- /dev/null
@@ -0,0 +1,43 @@
+class Arvados::V1::VirtualMachinesController < ApplicationController
+  skip_before_filter :find_object_by_uuid, :only => :get_all_logins
+  skip_before_filter :render_404_if_no_object, :only => :get_all_logins
+  before_filter(:admin_required,
+                :only => [:logins, :get_all_logins])
+
+  def logins
+    get_all_logins
+  end
+
+  def get_all_logins
+    @users = {}
+    User.includes(:authorized_keys).all.each do |u|
+      @users[u.uuid] = u
+    end
+    @response = []
+    @vms = VirtualMachine.includes(:login_permissions)
+    if @object
+      @vms = @vms.where('uuid=?', @object.uuid)
+    else
+      @vms = @vms.all
+    end
+    @vms.each do |vm|
+      vm.login_permissions.each do |perm|
+        user_uuid = perm.tail_uuid
+        @users[user_uuid].andand.authorized_keys.andand.each do |ak|
+          username = perm.properties.andand['username']
+          if username
+            @response << {
+              username: username,
+              hostname: vm.hostname,
+              public_key: ak.public_key,
+              user_uuid: user_uuid,
+              virtual_machine_uuid: vm.uuid,
+              authorized_key_uuid: ak.uuid
+            }
+          end
+        end
+      end
+    end
+    render json: { kind: "arvados#HashList", items: @response }
+  end
+end
diff --git a/services/api/app/controllers/database_controller.rb b/services/api/app/controllers/database_controller.rb
new file mode 100644 (file)
index 0000000..04c0e79
--- /dev/null
@@ -0,0 +1,73 @@
+class DatabaseController < ApplicationController
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+  before_filter :admin_required
+  def reset
+    raise ArvadosModel::PermissionDeniedError unless Rails.env == 'test'
+
+    # Sanity check: If someone has actually logged in here, this might
+    # not really be a throwaway database. Client test suites should
+    # use @example.com email addresses when creating user records, so
+    # we can tell they're not valuable.
+    user_uuids = User.
+      where('email is null or email not like ?', '%@example.com').
+      collect &:uuid
+    fixture_uuids =
+      YAML::load_file(File.expand_path('../../../test/fixtures/users.yml',
+                                       __FILE__)).
+      values.collect { |u| u['uuid'] }
+    unexpected_uuids = user_uuids - fixture_uuids
+    if unexpected_uuids.any?
+      logger.error("Running in test environment, but non-fixture users exist: " +
+                   "#{unexpected_uuids}")
+      raise ArvadosModel::PermissionDeniedError
+    end
+
+    require 'active_record/fixtures'
+
+    # What kinds of fixtures do we have?
+    fixturesets = Dir.glob(Rails.root.join('test', 'fixtures', '*.yml')).
+      collect { |yml| yml.match(/([^\/]*)\.yml$/)[1] }
+
+    table_names = '"' + ActiveRecord::Base.connection.tables.join('","') + '"'
+
+    attempts_left = 20
+    begin
+      ActiveRecord::Base.transaction do
+        # Avoid deadlock by locking all tables before doing anything
+        # drastic.
+        ActiveRecord::Base.connection.execute \
+        "LOCK TABLE #{table_names} IN ACCESS EXCLUSIVE MODE"
+
+        # Delete existing fixtures (and everything else) from fixture
+        # tables
+        fixturesets.each do |x|
+          x.classify.constantize.unscoped.delete_all
+        end
+
+        # create_fixtures() is a no-op for cached fixture sets, so
+        # uncache them all.
+        ActiveRecord::Fixtures.reset_cache
+        ActiveRecord::Fixtures.
+          create_fixtures(Rails.root.join('test', 'fixtures'), fixturesets)
+
+        # Dump cache of permissions etc.
+        Rails.cache.clear
+        ActiveRecord::Base.connection.clear_query_cache
+
+        # Reload database seeds
+        DatabaseSeeds.install
+      end
+    rescue ActiveRecord::StatementInvalid => e
+      if "#{e.inspect}" =~ /deadlock detected/i and (attempts_left -= 1) > 0
+        logger.info "Waiting for lock -- #{e.inspect}"
+        sleep 0.5
+        retry
+      end
+      raise
+    end
+
+    # Done.
+    render json: {success: true}
+  end
+end
diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb
new file mode 100644 (file)
index 0000000..9c66f01
--- /dev/null
@@ -0,0 +1,27 @@
+class StaticController < ApplicationController
+  respond_to :json, :html
+
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+  skip_before_filter :require_auth_scope, only: [:home, :empty, :login_failure]
+
+  def home
+    respond_to do |f|
+      f.html do
+        if Rails.configuration.workbench_address
+          redirect_to Rails.configuration.workbench_address
+        else
+          render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
+        end
+      end
+      f.json do
+        render_not_found "Path not found."
+      end
+    end
+  end
+
+  def empty
+    render text: "-"
+  end
+
+end
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
new file mode 100644 (file)
index 0000000..256a67b
--- /dev/null
@@ -0,0 +1,149 @@
+class UserSessionsController < ApplicationController
+  before_filter :require_auth_scope, :only => [ :destroy ]
+
+  skip_before_filter :set_cors_headers
+  skip_before_filter :find_object_by_uuid
+  skip_before_filter :render_404_if_no_object
+
+  respond_to :html
+
+  # omniauth callback method
+  def create
+    omniauth = env['omniauth.auth']
+
+    identity_url_ok = (omniauth['info']['identity_url'].length > 0) rescue false
+    unless identity_url_ok
+      # Whoa. This should never happen.
+      logger.error "UserSessionsController.create: omniauth object missing/invalid"
+      logger.error "omniauth.pretty_inspect():\n\n#{omniauth.pretty_inspect()}"
+
+      return redirect_to login_failure_url
+    end
+
+    user = User.find_by_identity_url(omniauth['info']['identity_url'])
+    if not user
+      # Check for permission to log in to an existing User record with
+      # a different identity_url
+      Link.where("link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                 'permission',
+                 'can_login',
+                 omniauth['info']['email'],
+                 User.uuid_like_pattern).each do |link|
+        if prefix = link.properties['identity_url_prefix']
+          if prefix == omniauth['info']['identity_url'][0..prefix.size-1]
+            user = User.find_by_uuid(link.head_uuid)
+            break if user
+          end
+        end
+      end
+    end
+    if not user
+      # New user registration
+      user = User.new(:email => omniauth['info']['email'],
+                      :first_name => omniauth['info']['first_name'],
+                      :last_name => omniauth['info']['last_name'],
+                      :identity_url => omniauth['info']['identity_url'],
+                      :is_active => Rails.configuration.new_users_are_active,
+                      :owner_uuid => system_user_uuid)
+      act_as_system_user do
+        user.save or raise Exception.new(user.errors.messages)
+      end
+    else
+      user.email = omniauth['info']['email']
+      user.first_name = omniauth['info']['first_name']
+      user.last_name = omniauth['info']['last_name']
+      if user.identity_url.nil?
+        # First login to a pre-activated account
+        user.identity_url = omniauth['info']['identity_url']
+      end
+    end
+
+    # For the benefit of functional and integration tests:
+    @user = user
+
+    # prevent ArvadosModel#before_create and _update from throwing
+    # "unauthorized":
+    Thread.current[:user] = user
+
+    user.save or raise Exception.new(user.errors.messages)
+
+    omniauth.delete('extra')
+
+    # Give the authenticated user a cookie for direct API access
+    session[:user_id] = user.id
+    session[:api_client_uuid] = nil
+    session[:api_client_trusted] = true # full permission to see user's secrets
+
+    @redirect_to = root_path
+    if params.has_key?(:return_to)
+      return send_api_token_to(params[:return_to], user)
+    end
+    redirect_to @redirect_to
+  end
+
+  # Omniauth failure callback
+  def failure
+    flash[:notice] = params[:message]
+  end
+
+  # logout - Clear our rack session BUT essentially redirect to the provider
+  # to clean up the Devise session from there too !
+  def logout
+    session[:user_id] = nil
+
+    flash[:notice] = 'You have logged off'
+    return_to = params[:return_to] || root_url
+    redirect_to "#{CUSTOM_PROVIDER_URL}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
+  end
+
+  # login - Just bounce to /auth/joshid. The only purpose of this function is
+  # to save the return_to parameter (if it exists; see the application
+  # controller). /auth/joshid bypasses the application controller.
+  def login
+    auth_provider = if params[:auth_provider] then "auth_provider=#{CGI.escape(params[:auth_provider])}" else "" end
+
+    if current_user and params[:return_to]
+      # Already logged in; just need to send a token to the requesting
+      # API client.
+      #
+      # FIXME: if current_user has never authorized this app before,
+      # ask for confirmation here!
+
+      send_api_token_to(params[:return_to], current_user)
+    elsif params[:return_to]
+      redirect_to "/auth/joshid?return_to=#{CGI.escape(params[:return_to])}&#{auth_provider}"
+    else
+      redirect_to "/auth/joshid?#{auth_provider}"
+    end
+  end
+
+  def send_api_token_to(callback_url, user)
+    # Give the API client a token for making API calls on behalf of
+    # the authenticated user
+
+    # Stub: automatically register all new API clients
+    api_client_url_prefix = callback_url.match(%r{^.*?://[^/]+})[0] + '/'
+    act_as_system_user do
+      @api_client = ApiClient.find_or_create_by_url_prefix api_client_url_prefix
+    end
+
+    api_client_auth = ApiClientAuthorization.
+      new(user: user,
+          api_client: @api_client,
+          created_by_ip_address: remote_ip,
+          scopes: ["all"])
+    api_client_auth.save!
+
+    if callback_url.index('?')
+      callback_url += '&'
+    else
+      callback_url += '?'
+    end
+    callback_url += 'api_token=' + api_client_auth.api_token
+    redirect_to callback_url
+  end
+
+  def cross_origin_forbidden
+    send_error 'Forbidden', status: 403
+  end
+end
diff --git a/services/api/app/helpers/api_client_authorizations_helper.rb b/services/api/app/helpers/api_client_authorizations_helper.rb
new file mode 100644 (file)
index 0000000..98ddddc
--- /dev/null
@@ -0,0 +1,2 @@
+module ApiClientAuthorizationsHelper
+end
diff --git a/services/api/app/helpers/api_clients_helper.rb b/services/api/app/helpers/api_clients_helper.rb
new file mode 100644 (file)
index 0000000..2432bfe
--- /dev/null
@@ -0,0 +1,2 @@
+module ApiClientsHelper
+end
diff --git a/services/api/app/helpers/application_helper.rb b/services/api/app/helpers/application_helper.rb
new file mode 100644 (file)
index 0000000..c5999b3
--- /dev/null
@@ -0,0 +1,3 @@
+module ApplicationHelper
+  include CurrentApiClient
+end
diff --git a/services/api/app/helpers/authorized_keys_helper.rb b/services/api/app/helpers/authorized_keys_helper.rb
new file mode 100644 (file)
index 0000000..9a486f2
--- /dev/null
@@ -0,0 +1,2 @@
+module AuthorizedKeysHelper
+end
diff --git a/services/api/app/helpers/collections_helper.rb b/services/api/app/helpers/collections_helper.rb
new file mode 100644 (file)
index 0000000..3017985
--- /dev/null
@@ -0,0 +1,2 @@
+module CollectionsHelper
+end
diff --git a/services/api/app/helpers/commit_ancestors_helper.rb b/services/api/app/helpers/commit_ancestors_helper.rb
new file mode 100644 (file)
index 0000000..de40939
--- /dev/null
@@ -0,0 +1,2 @@
+module CommitAncestorsHelper
+end
diff --git a/services/api/app/helpers/commits_helper.rb b/services/api/app/helpers/commits_helper.rb
new file mode 100644 (file)
index 0000000..60b037d
--- /dev/null
@@ -0,0 +1,2 @@
+module CommitsHelper
+end
diff --git a/services/api/app/helpers/groups_helper.rb b/services/api/app/helpers/groups_helper.rb
new file mode 100644 (file)
index 0000000..c091b2f
--- /dev/null
@@ -0,0 +1,2 @@
+module GroupsHelper
+end
diff --git a/services/api/app/helpers/humans_helper.rb b/services/api/app/helpers/humans_helper.rb
new file mode 100644 (file)
index 0000000..ca84af0
--- /dev/null
@@ -0,0 +1,2 @@
+module HumansHelper
+end
diff --git a/services/api/app/helpers/job_tasks_helper.rb b/services/api/app/helpers/job_tasks_helper.rb
new file mode 100644 (file)
index 0000000..b08a1ae
--- /dev/null
@@ -0,0 +1,2 @@
+module JobTasksHelper
+end
diff --git a/services/api/app/helpers/jobs_helper.rb b/services/api/app/helpers/jobs_helper.rb
new file mode 100644 (file)
index 0000000..44c7bf6
--- /dev/null
@@ -0,0 +1,2 @@
+module JobsHelper
+end
diff --git a/services/api/app/helpers/keep_disks_helper.rb b/services/api/app/helpers/keep_disks_helper.rb
new file mode 100644 (file)
index 0000000..9cf6b4a
--- /dev/null
@@ -0,0 +1,2 @@
+module KeepDisksHelper
+end
diff --git a/services/api/app/helpers/links_helper.rb b/services/api/app/helpers/links_helper.rb
new file mode 100644 (file)
index 0000000..f6bc988
--- /dev/null
@@ -0,0 +1,2 @@
+module LinksHelper
+end
diff --git a/services/api/app/helpers/logs_helper.rb b/services/api/app/helpers/logs_helper.rb
new file mode 100644 (file)
index 0000000..99736f0
--- /dev/null
@@ -0,0 +1,2 @@
+module LogsHelper
+end
diff --git a/services/api/app/helpers/nodes_helper.rb b/services/api/app/helpers/nodes_helper.rb
new file mode 100644 (file)
index 0000000..673b561
--- /dev/null
@@ -0,0 +1,2 @@
+module NodesHelper
+end
diff --git a/services/api/app/helpers/pipeline_instances_helper.rb b/services/api/app/helpers/pipeline_instances_helper.rb
new file mode 100644 (file)
index 0000000..8ad94c4
--- /dev/null
@@ -0,0 +1,2 @@
+module PipelineInstancesHelper
+end
diff --git a/services/api/app/helpers/pipeline_templates_helper.rb b/services/api/app/helpers/pipeline_templates_helper.rb
new file mode 100644 (file)
index 0000000..be82878
--- /dev/null
@@ -0,0 +1,2 @@
+module PipelineTemplatesHelper
+end
diff --git a/services/api/app/helpers/repositories_helper.rb b/services/api/app/helpers/repositories_helper.rb
new file mode 100644 (file)
index 0000000..2860b5a
--- /dev/null
@@ -0,0 +1,2 @@
+module RepositoriesHelper
+end
diff --git a/services/api/app/helpers/specimens_helper.rb b/services/api/app/helpers/specimens_helper.rb
new file mode 100644 (file)
index 0000000..8c30d97
--- /dev/null
@@ -0,0 +1,2 @@
+module SpecimensHelper
+end
diff --git a/services/api/app/helpers/traits_helper.rb b/services/api/app/helpers/traits_helper.rb
new file mode 100644 (file)
index 0000000..a4260eb
--- /dev/null
@@ -0,0 +1,2 @@
+module TraitsHelper
+end
diff --git a/services/api/app/helpers/virtual_machines_helper.rb b/services/api/app/helpers/virtual_machines_helper.rb
new file mode 100644 (file)
index 0000000..cbb398d
--- /dev/null
@@ -0,0 +1,2 @@
+module VirtualMachinesHelper
+end
diff --git a/services/api/app/mailers/.gitkeep b/services/api/app/mailers/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/app/mailers/admin_notifier.rb b/services/api/app/mailers/admin_notifier.rb
new file mode 100644 (file)
index 0000000..5dd36c0
--- /dev/null
@@ -0,0 +1,34 @@
+class AdminNotifier < ActionMailer::Base
+  include AbstractController::Callbacks
+
+  default from: Rails.configuration.admin_notifier_email_from
+
+  def new_user(user)
+    @user = user
+    if not Rails.configuration.new_user_notification_recipients.empty? then
+      @recipients = Rails.configuration.new_user_notification_recipients
+      logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
+
+      add_to_subject = ''
+      if Rails.configuration.auto_setup_new_users
+        add_to_subject = @user.is_invited ? ' and setup' : ', but not setup'
+      end
+
+      mail(to: @recipients,
+           subject: "#{Rails.configuration.email_subject_prefix}New user created#{add_to_subject} notification"
+          )
+    end
+  end
+
+  def new_inactive_user(user)
+    @user = user
+    if not Rails.configuration.new_inactive_user_notification_recipients.empty? then
+      @recipients = Rails.configuration.new_inactive_user_notification_recipients
+      logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
+      mail(to: @recipients,
+           subject: "#{Rails.configuration.email_subject_prefix}New inactive user notification"
+          )
+    end
+  end
+
+end
diff --git a/services/api/app/mailers/profile_notifier.rb b/services/api/app/mailers/profile_notifier.rb
new file mode 100644 (file)
index 0000000..13e3b34
--- /dev/null
@@ -0,0 +1,8 @@
+class ProfileNotifier < ActionMailer::Base
+  default from: Rails.configuration.admin_notifier_email_from
+
+  def profile_created(user, address)
+    @user = user
+    mail(to: address, subject: "Profile created by #{@user.email}")
+  end
+end
diff --git a/services/api/app/mailers/user_notifier.rb b/services/api/app/mailers/user_notifier.rb
new file mode 100644 (file)
index 0000000..055fe3a
--- /dev/null
@@ -0,0 +1,11 @@
+class UserNotifier < ActionMailer::Base
+  include AbstractController::Callbacks
+
+  default from: Rails.configuration.user_notifier_email_from
+
+  def account_is_setup(user)
+    @user = user
+    mail(to: user.email, subject: 'Welcome to Curoverse')
+  end
+
+end
diff --git a/services/api/app/middlewares/arvados_api_token.rb b/services/api/app/middlewares/arvados_api_token.rb
new file mode 100644 (file)
index 0000000..57d3ad0
--- /dev/null
@@ -0,0 +1,61 @@
+# Perform api_token checking very early in the request process.  We want to do
+# this in the Rack stack instead of in ApplicationController because
+# websockets needs access to authentication but doesn't use any of the rails
+# active dispatch infrastructure.
+class ArvadosApiToken
+
+  # Create a new ArvadosApiToken handler
+  # +app+  The next layer of the Rack stack.
+  def initialize(app = nil, options = nil)
+    @app = app if app.respond_to?(:call)
+  end
+
+  def call env
+    # First, clean up just in case we have a multithreaded server and thread
+    # local variables are still set from a prior request.  Also useful for
+    # tests that call this code to set up the environment.
+    Thread.current[:api_client_ip_address] = nil
+    Thread.current[:api_client_authorization] = nil
+    Thread.current[:api_client_uuid] = nil
+    Thread.current[:api_client] = nil
+    Thread.current[:user] = nil
+
+    request = Rack::Request.new(env)
+    params = request.params
+    remote_ip = env["action_dispatch.remote_ip"]
+
+    Thread.current[:request_starttime] = Time.now
+    user = nil
+    api_client = nil
+    api_client_auth = nil
+    supplied_token =
+      params["api_token"] ||
+      params["oauth_token"] ||
+      env["HTTP_AUTHORIZATION"].andand.match(/OAuth2 ([a-z0-9]+)/).andand[1]
+    if supplied_token
+      api_client_auth = ApiClientAuthorization.
+        includes(:api_client, :user).
+        where('api_token=? and (expires_at is null or expires_at > CURRENT_TIMESTAMP)', supplied_token).
+        first
+      if api_client_auth.andand.user
+        user = api_client_auth.user
+        api_client = api_client_auth.api_client
+      else
+        # Token seems valid, but points to a non-existent (deleted?) user.
+        api_client_auth = nil
+      end
+    end
+    Thread.current[:api_client_ip_address] = remote_ip
+    Thread.current[:api_client_authorization] = api_client_auth
+    Thread.current[:api_client_uuid] = api_client.andand.uuid
+    Thread.current[:api_client] = api_client
+    Thread.current[:user] = user
+    if api_client_auth
+      api_client_auth.last_used_at = Time.now
+      api_client_auth.last_used_by_ip_address = remote_ip.to_s
+      api_client_auth.save validate: false
+    end
+
+    @app.call env if @app
+  end
+end
diff --git a/services/api/app/middlewares/rack_socket.rb b/services/api/app/middlewares/rack_socket.rb
new file mode 100644 (file)
index 0000000..19350c4
--- /dev/null
@@ -0,0 +1,86 @@
+require 'rack'
+require 'faye/websocket'
+require 'eventmachine'
+
+# A Rack middleware to handle inbound websocket connection requests and hand
+# them over to the faye websocket library.
+class RackSocket
+
+  DEFAULT_ENDPOINT  = '/websocket'
+
+  # Stop EventMachine on signal, this should give it a chance to to unwind any
+  # open connections.
+  def die_gracefully_on_signal
+    Signal.trap("INT") { EM.stop }
+    Signal.trap("TERM") { EM.stop }
+  end
+
+  # Create a new RackSocket handler
+  # +app+  The next layer of the Rack stack.
+  #
+  # Accepts options:
+  # +:handler+ (Required) A class to handle new connections.  #initialize will
+  # call handler.new to create the actual handler instance object.  When a new
+  # websocket connection is established, #on_connect on the handler instance
+  # object will be called with the new connection.
+  #
+  # +:mount+ The HTTP request path that will be recognized for websocket
+  # connect requests, defaults to '/websocket'.
+  #
+  # +:websocket_only+  If true, the server will only handle websocket requests,
+  # and all other requests will result in an error.  If false, unhandled
+  # non-websocket requests will be passed along on to 'app' in the usual Rack
+  # way.
+  def initialize(app = nil, options = nil)
+    @app = app if app.respond_to?(:call)
+    @options = [app, options].grep(Hash).first || {}
+    @endpoint = @options[:mount] || DEFAULT_ENDPOINT
+    @websocket_only = @options[:websocket_only] || false
+
+    # from https://gist.github.com/eatenbyagrue/1338545#file-eventmachine-rb
+    if defined?(PhusionPassenger)
+      PhusionPassenger.on_event(:starting_worker_process) do |forked|
+        # for passenger, we need to avoid orphaned threads
+        if forked && EM.reactor_running?
+          EM.stop
+        end
+        Thread.new {
+          EM.run
+        }
+        die_gracefully_on_signal
+      end
+    else
+      # faciliates debugging
+      Thread.abort_on_exception = true
+      # just spawn a thread and start it up
+      Thread.new {
+        EM.run
+      }
+    end
+
+    # Create actual handler instance object from handler class.
+    @handler = @options[:handler].new
+  end
+
+  # Handle websocket connection request, or pass on to the next middleware
+  # supplied in +app+ initialize (unless +:websocket_only+ option is true, in
+  # which case return an error response.)
+  # +env+ the Rack environment with information about the request.
+  def call env
+    request = Rack::Request.new(env)
+    if request.path_info == @endpoint and Faye::WebSocket.websocket?(env)
+      ws = Faye::WebSocket.new(env, nil, :ping => 30)
+
+      # Notify handler about new connection
+      @handler.on_connect ws
+
+      # Return async Rack response
+      ws.rack_response
+    elsif not @websocket_only
+      @app.call env
+    else
+      [406, {"Content-Type" => "text/plain"}, ["Only websocket connections are permitted on this port."]]
+    end
+  end
+
+end
diff --git a/services/api/app/models/.gitkeep b/services/api/app/models/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/app/models/api_client.rb b/services/api/app/models/api_client.rb
new file mode 100644 (file)
index 0000000..75a800b
--- /dev/null
@@ -0,0 +1,12 @@
+class ApiClient < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  has_many :api_client_authorizations
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :url_prefix
+    t.add :is_trusted
+  end
+end
diff --git a/services/api/app/models/api_client_authorization.rb b/services/api/app/models/api_client_authorization.rb
new file mode 100644 (file)
index 0000000..5817ff6
--- /dev/null
@@ -0,0 +1,98 @@
+class ApiClientAuthorization < ArvadosModel
+  include KindAndEtag
+  include CommonApiTemplate
+
+  belongs_to :api_client
+  belongs_to :user
+  after_initialize :assign_random_api_token
+  serialize :scopes, Array
+
+  api_accessible :user, extend: :common do |t|
+    t.add :owner_uuid
+    t.add :user_id
+    t.add :api_client_id
+    t.add :api_token
+    t.add :created_by_ip_address
+    t.add :default_owner_uuid
+    t.add :expires_at
+    t.add :last_used_at
+    t.add :last_used_by_ip_address
+    t.add :scopes
+  end
+
+  UNLOGGED_CHANGES = ['last_used_at', 'last_used_by_ip_address', 'updated_at']
+
+  def assign_random_api_token
+    self.api_token ||= rand(2**256).to_s(36)
+  end
+
+  def owner_uuid
+    self.user.andand.uuid
+  end
+  def owner_uuid_was
+    self.user_id_changed? ? User.where(id: self.user_id_was).first.andand.uuid : self.user.andand.uuid
+  end
+  def owner_uuid_changed?
+    self.user_id_changed?
+  end
+
+  def uuid
+    self.api_token
+  end
+  def uuid=(x) end
+  def uuid_was
+    self.api_token_was
+  end
+  def uuid_changed?
+    self.api_token_changed?
+  end
+
+  def modified_by_client_uuid
+    nil
+  end
+  def modified_by_client_uuid=(x) end
+
+  def modified_by_user_uuid
+    nil
+  end
+  def modified_by_user_uuid=(x) end
+
+  def modified_at
+    nil
+  end
+  def modified_at=(x) end
+
+  def scopes_allow?(req_s)
+    scopes.each do |scope|
+      return true if (scope == 'all') or (scope == req_s) or
+        ((scope.end_with? '/') and (req_s.start_with? scope))
+    end
+    false
+  end
+
+  def scopes_allow_request?(request)
+    scopes_allow? [request.request_method, request.path].join(' ')
+  end
+
+  def logged_attributes
+    attrs = attributes.dup
+    attrs.delete('api_token')
+    attrs
+  end
+
+  protected
+
+  def permission_to_create
+    current_user.andand.is_admin or (current_user.andand.id == self.user_id)
+  end
+
+  def permission_to_update
+    (permission_to_create and
+     not self.user_id_changed? and
+     not self.owner_uuid_changed?)
+  end
+
+  def log_update
+    super unless (changed - UNLOGGED_CHANGES).empty?
+  end
+end
diff --git a/services/api/app/models/arvados_model.rb b/services/api/app/models/arvados_model.rb
new file mode 100644 (file)
index 0000000..a170fb9
--- /dev/null
@@ -0,0 +1,557 @@
+require 'has_uuid'
+
+class ArvadosModel < ActiveRecord::Base
+  self.abstract_class = true
+
+  include CurrentApiClient      # current_user, current_api_client, etc.
+
+  attr_protected :created_at
+  attr_protected :modified_by_user_uuid
+  attr_protected :modified_by_client_uuid
+  attr_protected :modified_at
+  after_initialize :log_start_state
+  before_save :ensure_permission_to_save
+  before_save :ensure_owner_uuid_is_permitted
+  before_save :ensure_ownership_path_leads_to_user
+  before_destroy :ensure_owner_uuid_is_permitted
+  before_destroy :ensure_permission_to_destroy
+  before_create :update_modified_by_fields
+  before_update :maybe_update_modified_by_fields
+  after_create :log_create
+  after_update :log_update
+  after_destroy :log_destroy
+  after_find :convert_serialized_symbols_to_strings
+  before_validation :normalize_collection_uuids
+  validate :ensure_serialized_attribute_type
+  validate :ensure_valid_uuids
+
+  # Note: This only returns permission links. It does not account for
+  # permissions obtained via user.is_admin or
+  # user.uuid==object.owner_uuid.
+  has_many :permissions, :foreign_key => :head_uuid, :class_name => 'Link', :primary_key => :uuid, :conditions => "link_class = 'permission'"
+
+  class PermissionDeniedError < StandardError
+    def http_status
+      403
+    end
+  end
+
+  class AlreadyLockedError < StandardError
+    def http_status
+      403
+    end
+  end
+
+  class UnauthorizedError < StandardError
+    def http_status
+      401
+    end
+  end
+
+  def self.kind_class(kind)
+    kind.match(/^arvados\#(.+)$/)[1].classify.safe_constantize rescue nil
+  end
+
+  def href
+    "#{current_api_base}/#{self.class.to_s.pluralize.underscore}/#{self.uuid}"
+  end
+
+  def self.searchable_columns operator
+    textonly_operator = !operator.match(/[<=>]/)
+    self.columns.select do |col|
+      case col.type
+      when :string, :text
+        true
+      when :datetime, :integer, :boolean
+        !textonly_operator
+      else
+        false
+      end
+    end.map(&:name)
+  end
+
+  def self.attribute_column attr
+    self.columns.select { |col| col.name == attr.to_s }.first
+  end
+
+  def self.attributes_required_columns
+    # This method returns a hash.  Each key is the name of an API attribute,
+    # and it's mapped to a list of database columns that must be fetched
+    # to generate that attribute.
+    # This implementation generates a simple map of attributes to
+    # matching column names.  Subclasses can override this method
+    # to specify that method-backed API attributes need to fetch
+    # specific columns from the database.
+    all_columns = columns.map(&:name)
+    api_column_map = Hash.new { |hash, key| hash[key] = [] }
+    methods.grep(/^api_accessible_\w+$/).each do |method_name|
+      next if method_name == :api_accessible_attributes
+      send(method_name).each_pair do |api_attr_name, col_name|
+        col_name = col_name.to_s
+        if all_columns.include?(col_name)
+          api_column_map[api_attr_name.to_s] |= [col_name]
+        end
+      end
+    end
+    api_column_map
+  end
+
+  # If current user can manage the object, return an array of uuids of
+  # users and groups that have permission to write the object. The
+  # first two elements are always [self.owner_uuid, current user's
+  # uuid].
+  #
+  # If current user can write but not manage the object, return
+  # [self.owner_uuid, current user's uuid].
+  #
+  # If current user cannot write this object, just return
+  # [self.owner_uuid].
+  def writable_by
+    unless (owner_uuid == current_user.uuid or
+            current_user.is_admin or
+            (current_user.groups_i_can(:manage) & [uuid, owner_uuid]).any?)
+      if ((current_user.groups_i_can(:write) + [current_user.uuid]) &
+          [uuid, owner_uuid]).any?
+        return [owner_uuid, current_user.uuid]
+      else
+        return [owner_uuid]
+      end
+    end
+    [owner_uuid, current_user.uuid] + permissions.collect do |p|
+      if ['can_write', 'can_manage'].index p.name
+        p.tail_uuid
+      end
+    end.compact.uniq
+  end
+
+  # Return a query with read permissions restricted to the union of of the
+  # permissions of the members of users_list, i.e. if something is readable by
+  # any user in users_list, it will be readable in the query returned by this
+  # function.
+  def self.readable_by(*users_list)
+    # Get rid of troublesome nils
+    users_list.compact!
+
+    # Load optional keyword arguments, if they exist.
+    if users_list.last.is_a? Hash
+      kwargs = users_list.pop
+    else
+      kwargs = {}
+    end
+
+    # Check if any of the users are admin.  If so, we're done.
+    if users_list.select { |u| u.is_admin }.any?
+      return self
+    end
+
+    # Collect the uuids for each user and any groups readable by each user.
+    user_uuids = users_list.map { |u| u.uuid }
+    uuid_list = user_uuids + users_list.flat_map { |u| u.groups_i_can(:read) }
+    sql_conds = []
+    sql_params = []
+    sql_table = kwargs.fetch(:table_name, table_name)
+    or_object_uuid = ''
+
+    # This row is owned by a member of users_list, or owned by a group
+    # readable by a member of users_list
+    # or
+    # This row uuid is the uuid of a member of users_list
+    # or
+    # A permission link exists ('write' and 'manage' implicitly include
+    # 'read') from a member of users_list, or a group readable by users_list,
+    # to this row, or to the owner of this row (see join() below).
+    sql_conds += ["#{sql_table}.uuid in (?)"]
+    sql_params += [user_uuids]
+
+    if uuid_list.any?
+      sql_conds += ["#{sql_table}.owner_uuid in (?)"]
+      sql_params += [uuid_list]
+
+      sanitized_uuid_list = uuid_list.
+        collect { |uuid| sanitize(uuid) }.join(', ')
+      permitted_uuids = "(SELECT head_uuid FROM links WHERE link_class='permission' AND tail_uuid IN (#{sanitized_uuid_list}))"
+      sql_conds += ["#{sql_table}.uuid IN #{permitted_uuids}"]
+    end
+
+    if sql_table == "links" and users_list.any?
+      # This row is a 'permission' or 'resources' link class
+      # The uuid for a member of users_list is referenced in either the head
+      # or tail of the link
+      sql_conds += ["(#{sql_table}.link_class in (#{sanitize 'permission'}, #{sanitize 'resources'}) AND (#{sql_table}.head_uuid IN (?) OR #{sql_table}.tail_uuid IN (?)))"]
+      sql_params += [user_uuids, user_uuids]
+    end
+
+    if sql_table == "logs" and users_list.any?
+      # Link head points to the object described by this row
+      sql_conds += ["#{sql_table}.object_uuid IN #{permitted_uuids}"]
+
+      # This object described by this row is owned by this user, or owned by a group readable by this user
+      sql_conds += ["#{sql_table}.object_owner_uuid in (?)"]
+      sql_params += [uuid_list]
+    end
+
+    # Link head points to this row, or to the owner of this row (the
+    # thing to be read)
+    #
+    # Link tail originates from this user, or a group that is readable
+    # by this user (the identity with authorization to read)
+    #
+    # Link class is 'permission' ('write' and 'manage' implicitly
+    # include 'read')
+    where(sql_conds.join(' OR '), *sql_params)
+  end
+
+  def logged_attributes
+    attributes
+  end
+
+  protected
+
+  def ensure_ownership_path_leads_to_user
+    if new_record? or owner_uuid_changed?
+      uuid_in_path = {owner_uuid => true, uuid => true}
+      x = owner_uuid
+      while (owner_class = ArvadosModel::resource_class_for_uuid(x)) != User
+        begin
+          if x == uuid
+            # Test for cycles with the new version, not the DB contents
+            x = owner_uuid
+          elsif !owner_class.respond_to? :find_by_uuid
+            raise ActiveRecord::RecordNotFound.new
+          else
+            x = owner_class.find_by_uuid(x).owner_uuid
+          end
+        rescue ActiveRecord::RecordNotFound => e
+          errors.add :owner_uuid, "is not owned by any user: #{e}"
+          return false
+        end
+        if uuid_in_path[x]
+          if x == owner_uuid
+            errors.add :owner_uuid, "would create an ownership cycle"
+          else
+            errors.add :owner_uuid, "has an ownership cycle"
+          end
+          return false
+        end
+        uuid_in_path[x] = true
+      end
+    end
+    true
+  end
+
+  def ensure_owner_uuid_is_permitted
+    raise PermissionDeniedError if !current_user
+
+    if new_record? and respond_to? :owner_uuid=
+      self.owner_uuid ||= current_user.uuid
+    end
+
+    if self.owner_uuid.nil?
+      errors.add :owner_uuid, "cannot be nil"
+      raise PermissionDeniedError
+    end
+
+    rsc_class = ArvadosModel::resource_class_for_uuid owner_uuid
+    unless rsc_class == User or rsc_class == Group
+      errors.add :owner_uuid, "must be set to User or Group"
+      raise PermissionDeniedError
+    end
+
+    # Verify "write" permission on old owner
+    # default fail unless one of:
+    # owner_uuid did not change
+    # previous owner_uuid is nil
+    # current user is the old owner
+    # current user is this object
+    # current user can_write old owner
+    unless !owner_uuid_changed? or
+        owner_uuid_was.nil? or
+        current_user.uuid == self.owner_uuid_was or
+        current_user.uuid == self.uuid or
+        current_user.can? write: self.owner_uuid_was
+      logger.warn "User #{current_user.uuid} tried to modify #{self.class.to_s} #{uuid} but does not have permission to write old owner_uuid #{owner_uuid_was}"
+      errors.add :owner_uuid, "cannot be changed without write permission on old owner"
+      raise PermissionDeniedError
+    end
+
+    # Verify "write" permission on new owner
+    # default fail unless one of:
+    # current_user is this object
+    # current user can_write new owner
+    unless current_user == self or current_user.can? write: owner_uuid
+      logger.warn "User #{current_user.uuid} tried to modify #{self.class.to_s} #{uuid} but does not have permission to write new owner_uuid #{owner_uuid}"
+      errors.add :owner_uuid, "cannot be changed without write permission on new owner"
+      raise PermissionDeniedError
+    end
+
+    true
+  end
+
+  def ensure_permission_to_save
+    unless (new_record? ? permission_to_create : permission_to_update)
+      raise PermissionDeniedError
+    end
+  end
+
+  def permission_to_create
+    current_user.andand.is_active
+  end
+
+  def permission_to_update
+    if !current_user
+      logger.warn "Anonymous user tried to update #{self.class.to_s} #{self.uuid_was}"
+      return false
+    end
+    if !current_user.is_active
+      logger.warn "Inactive user #{current_user.uuid} tried to update #{self.class.to_s} #{self.uuid_was}"
+      return false
+    end
+    return true if current_user.is_admin
+    if self.uuid_changed?
+      logger.warn "User #{current_user.uuid} tried to change uuid of #{self.class.to_s} #{self.uuid_was} to #{self.uuid}"
+      return false
+    end
+    return true
+  end
+
+  def ensure_permission_to_destroy
+    raise PermissionDeniedError unless permission_to_destroy
+  end
+
+  def permission_to_destroy
+    permission_to_update
+  end
+
+  def maybe_update_modified_by_fields
+    update_modified_by_fields if self.changed? or self.new_record?
+    true
+  end
+
+  def update_modified_by_fields
+    self.updated_at = Time.now
+    self.owner_uuid ||= current_default_owner if self.respond_to? :owner_uuid=
+    self.modified_at = Time.now
+    self.modified_by_user_uuid = current_user ? current_user.uuid : nil
+    self.modified_by_client_uuid = current_api_client ? current_api_client.uuid : nil
+    true
+  end
+
+  def self.has_symbols? x
+    if x.is_a? Hash
+      x.each do |k,v|
+        return true if has_symbols?(k) or has_symbols?(v)
+      end
+      false
+    elsif x.is_a? Array
+      x.each do |k|
+        return true if has_symbols?(k)
+      end
+      false
+    else
+      (x.class == Symbol)
+    end
+  end
+
+  def self.recursive_stringify x
+    if x.is_a? Hash
+      Hash[x.collect do |k,v|
+             [recursive_stringify(k), recursive_stringify(v)]
+           end]
+    elsif x.is_a? Array
+      x.collect do |k|
+        recursive_stringify k
+      end
+    elsif x.is_a? Symbol
+      x.to_s
+    else
+      x
+    end
+  end
+
+  def ensure_serialized_attribute_type
+    # Specifying a type in the "serialize" declaration causes rails to
+    # raise an exception if a different data type is retrieved from
+    # the database during load().  The validation preventing such
+    # crash-inducing records from being inserted in the database in
+    # the first place seems to have been left as an exercise to the
+    # developer.
+    self.class.serialized_attributes.each do |colname, attr|
+      if attr.object_class
+        if self.attributes[colname].class != attr.object_class
+          self.errors.add colname.to_sym, "must be a #{attr.object_class.to_s}, not a #{self.attributes[colname].class.to_s}"
+        elsif self.class.has_symbols? attributes[colname]
+          self.errors.add colname.to_sym, "must not contain symbols: #{attributes[colname].inspect}"
+        end
+      end
+    end
+  end
+
+  def convert_serialized_symbols_to_strings
+    # ensure_serialized_attribute_type should prevent symbols from
+    # getting into the database in the first place. If someone managed
+    # to get them into the database (perhaps using an older version)
+    # we'll convert symbols to strings when loading from the
+    # database. (Otherwise, loading and saving an object with existing
+    # symbols in a serialized field will crash.)
+    self.class.serialized_attributes.each do |colname, attr|
+      if self.class.has_symbols? attributes[colname]
+        attributes[colname] = self.class.recursive_stringify attributes[colname]
+        self.send(colname + '=',
+                  self.class.recursive_stringify(attributes[colname]))
+      end
+    end
+  end
+
+  def foreign_key_attributes
+    attributes.keys.select { |a| a.match /_uuid$/ }
+  end
+
+  def skip_uuid_read_permission_check
+    %w(modified_by_client_uuid)
+  end
+
+  def skip_uuid_existence_check
+    []
+  end
+
+  def normalize_collection_uuids
+    foreign_key_attributes.each do |attr|
+      attr_value = send attr
+      if attr_value.is_a? String and
+          attr_value.match /^[0-9a-f]{32,}(\+[@\w]+)*$/
+        begin
+          send "#{attr}=", Collection.normalize_uuid(attr_value)
+        rescue
+          # TODO: abort instead of silently accepting unnormalizable value?
+        end
+      end
+    end
+  end
+
+  @@prefixes_hash = nil
+  def self.uuid_prefixes
+    unless @@prefixes_hash
+      @@prefixes_hash = {}
+      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|
+        if k.respond_to?(:uuid_prefix)
+          @@prefixes_hash[k.uuid_prefix] = k
+        end
+      end
+    end
+    @@prefixes_hash
+  end
+
+  def self.uuid_like_pattern
+    "_____-#{uuid_prefix}-_______________"
+  end
+
+  def self.uuid_regex
+    %r/[a-z0-9]{5}-#{uuid_prefix}-[a-z0-9]{15}/
+  end
+
+  def ensure_valid_uuids
+    specials = [system_user_uuid]
+
+    foreign_key_attributes.each do |attr|
+      if new_record? or send (attr + "_changed?")
+        next if skip_uuid_existence_check.include? attr
+        attr_value = send attr
+        next if specials.include? attr_value
+        if attr_value
+          if (r = ArvadosModel::resource_class_for_uuid attr_value)
+            unless skip_uuid_read_permission_check.include? attr
+              r = r.readable_by(current_user)
+            end
+            if r.where(uuid: attr_value).count == 0
+              errors.add(attr, "'#{attr_value}' not found")
+            end
+          end
+        end
+      end
+    end
+  end
+
+  class Email
+    def self.kind
+      "email"
+    end
+
+    def kind
+      self.class.kind
+    end
+
+    def self.readable_by (*u)
+      self
+    end
+
+    def self.where (u)
+      [{:uuid => u[:uuid]}]
+    end
+  end
+
+  def self.resource_class_for_uuid(uuid)
+    if uuid.is_a? ArvadosModel
+      return uuid.class
+    end
+    unless uuid.is_a? String
+      return nil
+    end
+    resource_class = nil
+
+    Rails.application.eager_load!
+    uuid.match HasUuid::UUID_REGEX do |re|
+      return uuid_prefixes[re[1]] if uuid_prefixes[re[1]]
+    end
+
+    if uuid.match /.+@.+/
+      return Email
+    end
+
+    nil
+  end
+
+  # ArvadosModel.find_by_uuid needs extra magic to allow it to return
+  # an object in any class.
+  def self.find_by_uuid uuid
+    if self == ArvadosModel
+      # If called directly as ArvadosModel.find_by_uuid rather than via subclass,
+      # delegate to the appropriate subclass based on the given uuid.
+      self.resource_class_for_uuid(uuid).find_by_uuid(uuid)
+    else
+      super
+    end
+  end
+
+  def log_start_state
+    @old_etag = etag
+    @old_attributes = logged_attributes
+  end
+
+  def log_change(event_type)
+    log = Log.new(event_type: event_type).fill_object(self)
+    yield log
+    log.save!
+    log_start_state
+  end
+
+  def log_create
+    log_change('create') do |log|
+      log.fill_properties('old', nil, nil)
+      log.update_to self
+    end
+  end
+
+  def log_update
+    log_change('update') do |log|
+      log.fill_properties('old', @old_etag, @old_attributes)
+      log.update_to self
+    end
+  end
+
+  def log_destroy
+    log_change('destroy') do |log|
+      log.fill_properties('old', @old_etag, @old_attributes)
+      log.update_to nil
+    end
+  end
+end
diff --git a/services/api/app/models/authorized_key.rb b/services/api/app/models/authorized_key.rb
new file mode 100644 (file)
index 0000000..b156a1d
--- /dev/null
@@ -0,0 +1,51 @@
+class AuthorizedKey < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  before_create :permission_to_set_authorized_user_uuid
+  before_update :permission_to_set_authorized_user_uuid
+
+  belongs_to :authorized_user, :foreign_key => :authorized_user_uuid, :class_name => 'User', :primary_key => :uuid
+
+  validate :public_key_must_be_unique
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :key_type
+    t.add :authorized_user_uuid
+    t.add :public_key
+    t.add :expires_at
+  end
+
+  def permission_to_set_authorized_user_uuid
+    # Anonymous users cannot do anything here
+    return false if !current_user
+
+    # Administrators can attach a key to any user account
+    return true if current_user.is_admin
+
+    # All users can attach keys to their own accounts
+    return true if current_user.uuid == authorized_user_uuid
+
+    # Default = deny.
+    false
+  end
+
+  def public_key_must_be_unique
+    if self.public_key
+      #key = /^ssh-(rsa|dss) [A-Za-z0-9+\/=\+]+\b/.match(self.public_key)
+      valid_key = SSHKey.valid_ssh_public_key? self.public_key
+
+      if not valid_key
+        errors.add(:public_key, "does not appear to be a valid ssh-rsa or dsa public key")
+      else
+        # Valid if no other rows have this public key
+        if self.class.where('public_key like ?', "%#{self.public_key}%").any?
+          errors.add(:public_key, "already exists in the database, use a different key.")
+          return false
+        end
+      end
+    end
+    return true
+  end
+end
diff --git a/services/api/app/models/blob.rb b/services/api/app/models/blob.rb
new file mode 100644 (file)
index 0000000..7d16048
--- /dev/null
@@ -0,0 +1,113 @@
+class Blob
+
+  def initialize locator
+    @locator = locator
+  end
+
+  def empty?
+    !!@locator.match(/^d41d8cd98f00b204e9800998ecf8427e(\+.*)?$/)
+  end
+
+  # In order to get a Blob from Keep, you have to prove either
+  # [a] you have recently written it to Keep yourself, or
+  # [b] apiserver has recently decided that you should be able to read it
+  #
+  # To ensure that the requestor of a blob is authorized to read it,
+  # Keep requires clients to timestamp the blob locator with an expiry
+  # time, and to sign the timestamped locator with their API token.
+  #
+  # A signed blob locator has the form:
+  #     locator_hash +A blob_signature @ timestamp
+  # where the timestamp is a Unix time expressed as a hexadecimal value,
+  # and the blob_signature is the signed locator_hash + API token + timestamp.
+  # 
+  class InvalidSignatureError < StandardError
+  end
+
+  # Blob.sign_locator: return a signed and timestamped blob locator.
+  #
+  # The 'opts' argument should include:
+  #   [required] :key       - the Arvados server-side blobstore key
+  #   [required] :api_token - user's API token
+  #   [optional] :ttl       - number of seconds before signature should expire
+  #   [optional] :expire    - unix timestamp when signature should expire
+  #
+  def self.sign_locator blob_locator, opts
+    # We only use the hash portion for signatures.
+    blob_hash = blob_locator.split('+').first
+
+    # Generate an expiry timestamp (seconds after epoch, base 16)
+    if opts[:expire]
+      if opts[:ttl]
+        raise "Cannot specify both :ttl and :expire options"
+      end
+      timestamp = opts[:expire]
+    else
+      timestamp = Time.now.to_i + (opts[:ttl] || 1209600)
+    end
+    timestamp_hex = timestamp.to_s(16)
+    # => "53163cb4"
+
+    # Generate a signature.
+    signature =
+      generate_signature opts[:key], blob_hash, opts[:api_token], timestamp_hex
+
+    blob_locator + '+A' + signature + '@' + timestamp_hex
+  end
+
+  # Blob.verify_signature
+  #   Safely verify the signature on a blob locator.
+  #   Return value: true if the locator has a valid signature, false otherwise
+  #   Arguments: signed_blob_locator, opts
+  #
+  def self.verify_signature *args
+    begin
+      self.verify_signature! *args
+      true
+    rescue Blob::InvalidSignatureError
+      false
+    end
+  end
+
+  # Blob.verify_signature!
+  #   Verify the signature on a blob locator.
+  #   Return value: true if the locator has a valid signature
+  #   Arguments: signed_blob_locator, opts
+  #   Exceptions:
+  #     Blob::InvalidSignatureError if the blob locator does not include a
+  #     valid signature
+  #
+  def self.verify_signature! signed_blob_locator, opts
+    blob_hash = signed_blob_locator.split('+').first
+    given_signature, timestamp = signed_blob_locator.
+      split('+A').last.
+      split('+').first.
+      split('@')
+
+    if !timestamp
+      raise Blob::InvalidSignatureError.new 'No signature provided.'
+    end
+    if !timestamp.match /^[\da-f]+$/
+      raise Blob::InvalidSignatureError.new 'Timestamp is not a base16 number.'
+    end
+    if timestamp.to_i(16) < Time.now.to_i
+      raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
+    end
+
+    my_signature =
+      generate_signature opts[:key], blob_hash, opts[:api_token], timestamp
+
+    if my_signature != given_signature
+      raise Blob::InvalidSignatureError.new 'Signature is invalid.'
+    end
+
+    true
+  end
+
+  def self.generate_signature key, blob_hash, api_token, timestamp
+    OpenSSL::HMAC.hexdigest('sha1', key,
+                            [blob_hash,
+                             api_token,
+                             timestamp].join('@'))
+  end
+end
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
new file mode 100644 (file)
index 0000000..66d7add
--- /dev/null
@@ -0,0 +1,287 @@
+require 'arvados/keep'
+
+class Collection < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  before_validation :check_encoding
+  before_validation :check_signatures
+  before_validation :strip_manifest_text
+  before_validation :set_portable_data_hash
+  validate :ensure_hash_matches_manifest_text
+
+  # Query only undeleted collections by default.
+  default_scope where("expires_at IS NULL or expires_at > CURRENT_TIMESTAMP")
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :description
+    t.add :properties
+    t.add :portable_data_hash
+    t.add :signed_manifest_text, as: :manifest_text
+  end
+
+  def self.attributes_required_columns
+    # If we don't list this explicitly, the params[:select] code gets
+    # confused by the way we expose signed_manifest_text as
+    # manifest_text in the API response, and never let clients select
+    # the manifest_text column.
+    super.merge('manifest_text' => ['manifest_text'])
+  end
+
+  def check_signatures
+    return false if self.manifest_text.nil?
+
+    return true if current_user.andand.is_admin
+
+    # Provided the manifest_text hasn't changed materially since an
+    # earlier validation, it's safe to pass this validation on
+    # subsequent passes without checking any signatures. This is
+    # important because the signatures have probably been stripped off
+    # by the time we get to a second validation pass!
+    return true if @signatures_checked and @signatures_checked == compute_pdh
+
+    if self.manifest_text_changed?
+      # Check permissions on the collection manifest.
+      # If any signature cannot be verified, raise PermissionDeniedError
+      # which will return 403 Permission denied to the client.
+      api_token = current_api_client_authorization.andand.api_token
+      signing_opts = {
+        key: Rails.configuration.blob_signing_key,
+        api_token: api_token,
+        ttl: Rails.configuration.blob_signing_ttl,
+      }
+      self.manifest_text.lines.each do |entry|
+        entry.split[1..-1].each do |tok|
+          if /^[[:digit:]]+:[[:digit:]]+:/.match tok
+            # This is a filename token, not a blob locator. Note that we
+            # keep checking tokens after this, even though manifest
+            # format dictates that all subsequent tokens will also be
+            # filenames. Safety first!
+          elsif Blob.verify_signature tok, signing_opts
+            # OK.
+          elsif Keep::Locator.parse(tok).andand.signature
+            # Signature provided, but verify_signature did not like it.
+            logger.warn "Invalid signature on locator #{tok}"
+            raise ArvadosModel::PermissionDeniedError
+          elsif Rails.configuration.permit_create_collection_with_unsigned_manifest
+            # No signature provided, but we are running in insecure mode.
+            logger.debug "Missing signature on locator #{tok} ignored"
+          elsif Blob.new(tok).empty?
+            # No signature provided -- but no data to protect, either.
+          else
+            logger.warn "Missing signature on locator #{tok}"
+            raise ArvadosModel::PermissionDeniedError
+          end
+        end
+      end
+    end
+    @signatures_checked = compute_pdh
+  end
+
+  def strip_manifest_text
+    if self.manifest_text_changed?
+      # Remove any permission signatures from the manifest.
+      self.class.munge_manifest_locators!(self[:manifest_text]) do |loc|
+        loc.without_signature.to_s
+      end
+    end
+    true
+  end
+
+  def set_portable_data_hash
+    if (portable_data_hash.nil? or
+        portable_data_hash == "" or
+        (manifest_text_changed? and !portable_data_hash_changed?))
+      @need_pdh_validation = false
+      self.portable_data_hash = compute_pdh
+    elsif portable_data_hash_changed?
+      @need_pdh_validation = true
+      begin
+        loc = Keep::Locator.parse!(self.portable_data_hash)
+        loc.strip_hints!
+        if loc.size
+          self.portable_data_hash = loc.to_s
+        else
+          self.portable_data_hash = "#{loc.hash}+#{portable_manifest_text.bytesize}"
+        end
+      rescue ArgumentError => e
+        errors.add(:portable_data_hash, "#{e}")
+        return false
+      end
+    end
+    true
+  end
+
+  def ensure_hash_matches_manifest_text
+    return true unless manifest_text_changed? or portable_data_hash_changed?
+    # No need verify it if :set_portable_data_hash just computed it!
+    return true if not @need_pdh_validation
+    expect_pdh = compute_pdh
+    if expect_pdh != portable_data_hash
+      errors.add(:portable_data_hash,
+                 "does not match computed hash #{expect_pdh}")
+      return false
+    end
+  end
+
+  def check_encoding
+    if manifest_text.encoding.name == 'UTF-8' and manifest_text.valid_encoding?
+      true
+    else
+      begin
+        # If Ruby thinks the encoding is something else, like 7-bit
+        # ASCII, but its stored bytes are equal to the (valid) UTF-8
+        # encoding of the same string, we declare it to be a UTF-8
+        # string.
+        utf8 = manifest_text
+        utf8.force_encoding Encoding::UTF_8
+        if utf8.valid_encoding? and utf8 == manifest_text.encode(Encoding::UTF_8)
+          manifest_text = utf8
+          return true
+        end
+      rescue
+      end
+      errors.add :manifest_text, "must use UTF-8 encoding"
+      false
+    end
+  end
+
+  def redundancy_status
+    if redundancy_confirmed_as.nil?
+      'unconfirmed'
+    elsif redundancy_confirmed_as < redundancy
+      'degraded'
+    else
+      if redundancy_confirmed_at.nil?
+        'unconfirmed'
+      elsif Time.now - redundancy_confirmed_at < 7.days
+        'OK'
+      else
+        'stale'
+      end
+    end
+  end
+
+  def signed_manifest_text
+    if has_attribute? :manifest_text
+      token = current_api_client_authorization.andand.api_token
+      @signed_manifest_text = self.class.sign_manifest manifest_text, token
+    end
+  end
+
+  def self.sign_manifest manifest, token
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: token,
+      ttl: Rails.configuration.blob_signing_ttl,
+    }
+    m = manifest.dup
+    munge_manifest_locators!(m) do |loc|
+      Blob.sign_locator(loc.to_s, signing_opts)
+    end
+    return m
+  end
+
+  def self.munge_manifest_locators! manifest
+    # Given a manifest text and a block, yield each locator,
+    # and replace it with whatever the block returns.
+    manifest.andand.gsub!(/ [[:xdigit:]]{32}(\+[[:digit:]]+)?(\+\S+)/) do |word|
+      if loc = Keep::Locator.parse(word.strip)
+        " " + yield(loc)
+      else
+        " " + word
+      end
+    end
+  end
+
+  def self.normalize_uuid uuid
+    hash_part = nil
+    size_part = nil
+    uuid.split('+').each do |token|
+      if token.match /^[0-9a-f]{32,}$/
+        raise "uuid #{uuid} has multiple hash parts" if hash_part
+        hash_part = token
+      elsif token.match /^\d+$/
+        raise "uuid #{uuid} has multiple size parts" if size_part
+        size_part = token
+      end
+    end
+    raise "uuid #{uuid} has no hash part" if !hash_part
+    [hash_part, size_part].compact.join '+'
+  end
+
+  # Return array of Collection objects
+  def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil)
+    readers ||= [Thread.current[:user]]
+    base_search = Link.
+      readable_by(*readers).
+      readable_by(*readers, table_name: "collections").
+      joins("JOIN collections ON links.head_uuid = collections.uuid").
+      order("links.created_at DESC")
+
+    # If the search term is a Collection locator that contains one file
+    # that looks like a Docker image, return it.
+    if loc = Keep::Locator.parse(search_term)
+      loc.strip_hints!
+      coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1).first
+      if coll_match
+        # Check if the Collection contains exactly one file whose name
+        # looks like a saved Docker image.
+        manifest = Keep::Manifest.new(coll_match.manifest_text)
+        if manifest.exact_file_count?(1) and
+            (manifest.files[0][1] =~ /^[0-9A-Fa-f]{64}\.tar$/)
+          return [coll_match]
+        end
+      end
+    end
+
+    if search_tag.nil? and (n = search_term.index(":"))
+      search_tag = search_term[n+1..-1]
+      search_term = search_term[0..n-1]
+    end
+
+    # Find Collections with matching Docker image repository+tag pairs.
+    matches = base_search.
+      where(link_class: "docker_image_repo+tag",
+            name: "#{search_term}:#{search_tag || 'latest'}")
+
+    # If that didn't work, find Collections with matching Docker image hashes.
+    if matches.empty?
+      matches = base_search.
+        where("link_class = ? and links.name LIKE ?",
+              "docker_image_hash", "#{search_term}%")
+    end
+
+    # Generate an order key for each result.  We want to order the results
+    # so that anything with an image timestamp is considered more recent than
+    # anything without; then we use the link's created_at as a tiebreaker.
+    uuid_timestamps = {}
+    matches.all.map do |link|
+      uuid_timestamps[link.head_uuid] = [(-link.properties["image_timestamp"].to_datetime.to_i rescue 0),
+       -link.created_at.to_i]
+    end
+    Collection.where('uuid in (?)', uuid_timestamps.keys).sort_by { |c| uuid_timestamps[c.uuid] }
+  end
+
+  def self.for_latest_docker_image(search_term, search_tag=nil, readers=nil)
+    find_all_for_docker_image(search_term, search_tag, readers).first
+  end
+
+  protected
+  def portable_manifest_text
+    portable_manifest = self[:manifest_text].dup
+    self.class.munge_manifest_locators!(portable_manifest) do |loc|
+      loc.hash + '+' + loc.size.to_s
+    end
+    portable_manifest
+  end
+
+  def compute_pdh
+    portable_manifest = portable_manifest_text
+    (Digest::MD5.hexdigest(portable_manifest) +
+     '+' +
+     portable_manifest.bytesize.to_s)
+  end
+end
diff --git a/services/api/app/models/commit.rb b/services/api/app/models/commit.rb
new file mode 100644 (file)
index 0000000..0f62737
--- /dev/null
@@ -0,0 +1,156 @@
+class Commit < ActiveRecord::Base
+  require 'shellwords'
+
+  def self.git_check_ref_format(e)
+    if !e or e.empty? or e[0] == '-' or e[0] == '$'
+      # definitely not valid
+      false
+    else
+      `git check-ref-format --allow-onelevel #{e.shellescape}`
+      $?.success?
+    end
+  end
+
+  def self.find_commit_range(current_user, repository, minimum, maximum, exclude)
+    if minimum and minimum.empty?
+      minimum = nil
+    end
+
+    if minimum and !git_check_ref_format(minimum)
+      logger.warn "find_commit_range called with invalid minimum revision: '#{minimum}'"
+      return nil
+    end
+
+    if maximum and !git_check_ref_format(maximum)
+      logger.warn "find_commit_range called with invalid maximum revision: '#{maximum}'"
+      return nil
+    end
+
+    if !maximum
+      maximum = "HEAD"
+    end
+
+    # Get list of actual repository directories under management
+    on_disk_repos = repositories
+
+    # Get list of repository objects readable by user
+    readable = Repository.readable_by(current_user)
+
+    # filter repository objects on requested repository name
+    if repository
+      readable = readable.where(name: repository)
+    end
+
+    commits = []
+    readable.each do |r|
+      if on_disk_repos[r.name]
+        ENV['GIT_DIR'] = on_disk_repos[r.name][:git_dir]
+
+        # We've filtered for invalid characters, so we can pass the contents of
+        # minimum and maximum safely on the command line
+
+        # Get the commit hash for the upper bound
+        max_hash = nil
+        IO.foreach("|git rev-list --max-count=1 #{maximum.shellescape} --") do |line|
+          max_hash = line.strip
+        end
+
+        # If not found or string is invalid, nothing else to do
+        next if !max_hash or !git_check_ref_format(max_hash)
+
+        resolved_exclude = nil
+        if exclude
+          resolved_exclude = []
+          exclude.each do |e|
+            if git_check_ref_format(e)
+              IO.foreach("|git rev-list --max-count=1 #{e.shellescape} --") do |line|
+                resolved_exclude.push(line.strip)
+              end
+            else
+              logger.warn "find_commit_range called with invalid exclude invalid characters: '#{exclude}'"
+              return nil
+            end
+          end
+        end
+
+        if minimum
+          # Get the commit hash for the lower bound
+          min_hash = nil
+          IO.foreach("|git rev-list --max-count=1 #{minimum.shellescape} --") do |line|
+            min_hash = line.strip
+          end
+
+          # If not found or string is invalid, nothing else to do
+          next if !min_hash or !git_check_ref_format(min_hash)
+
+          # Now find all commits between them
+          IO.foreach("|git rev-list #{min_hash.shellescape}..#{max_hash.shellescape} --") do |line|
+            hash = line.strip
+            commits.push(hash) if !resolved_exclude or !resolved_exclude.include? hash
+          end
+
+          commits.push(min_hash) if !resolved_exclude or !resolved_exclude.include? min_hash
+        else
+          commits.push(max_hash) if !resolved_exclude or !resolved_exclude.include? max_hash
+        end
+      else
+        logger.warn "Repository #{r.name} exists in table but not found on disk"
+      end
+    end
+
+    if !commits or commits.empty?
+      nil
+    else
+      commits
+    end
+  end
+
+  # Import all commits from configured git directory into the commits
+  # database.
+
+  def self.import_all
+    repositories.each do |repo_name, repo|
+      stat = { true => 0, false => 0 }
+      ENV['GIT_DIR'] = repo[:git_dir]
+      IO.foreach("|git rev-list --format=oneline --all") do |line|
+        sha1, message = line.strip.split " ", 2
+        imported = false
+        Commit.find_or_create_by_repository_name_and_sha1_and_message(repo_name, sha1, message[0..254]) do
+          imported = true
+        end
+        stat[!!imported] += 1
+        if (stat[true] + stat[false]) % 100 == 0
+          if $stdout.tty? or ARGV[0] == '-v'
+            puts "#{$0} #{$$}: repo #{repo_name} add #{stat[true]} skip #{stat[false]}"
+          end
+        end
+      end
+      if $stdout.tty? or ARGV[0] == '-v'
+        puts "#{$0} #{$$}: repo #{repo_name} add #{stat[true]} skip #{stat[false]}"
+      end
+    end
+  end
+
+  def self.refresh_repositories
+    @repositories = nil
+  end
+
+  protected
+
+  def self.repositories
+    return @repositories if @repositories
+
+    @repositories = {}
+    @gitdirbase = Rails.configuration.git_repositories_dir
+    Dir.foreach @gitdirbase do |repo|
+      next if repo.match /^\./
+      git_dir = File.join(@gitdirbase,
+                          repo.match(/\.git$/) ? repo : File.join(repo, '.git'))
+      next if git_dir == Rails.configuration.git_internal_dir
+      repo_name = repo.sub(/\.git$/, '')
+      @repositories[repo_name] = {git_dir: git_dir}
+    end
+
+    @repositories
+  end
+end
diff --git a/services/api/app/models/commit_ancestor.rb b/services/api/app/models/commit_ancestor.rb
new file mode 100644 (file)
index 0000000..71ea57f
--- /dev/null
@@ -0,0 +1,40 @@
+# Usage:
+#
+# x = CommitAncestor.find_or_create_by_descendant_and_ancestor(a, b)
+# "b is an ancestor of a" if x.is
+#
+
+class CommitAncestor < ActiveRecord::Base
+  before_create :ask_git_whether_is
+
+  class CommitNotFoundError < ArgumentError
+  end
+
+  protected
+
+  def ask_git_whether_is
+    @gitdirbase = Rails.configuration.git_repositories_dir
+    self.is = nil
+    Dir.foreach @gitdirbase do |repo|
+      next if repo.match /^\./
+      git_dir = repo.match(/\.git$/) ? repo : File.join(repo, '.git')
+      repo_name = repo.sub(/\.git$/, '')
+      ENV['GIT_DIR'] = File.join(@gitdirbase, git_dir)
+      IO.foreach("|git rev-list --format=oneline '#{self.descendant.gsub /[^0-9a-f]/,""}'") do |line|
+        self.is = false
+        sha1, message = line.strip.split(" ", 2)
+        if sha1 == self.ancestor
+          self.is = true
+          break
+        end
+      end
+      if !self.is.nil?
+        self.repository_name = repo_name
+        break
+      end
+    end
+    if self.is.nil?
+      raise CommitNotFoundError.new "Specified commit was not found"
+    end
+  end
+end
diff --git a/services/api/app/models/database_seeds.rb b/services/api/app/models/database_seeds.rb
new file mode 100644 (file)
index 0000000..bc68283
--- /dev/null
@@ -0,0 +1,11 @@
+class DatabaseSeeds
+  extend CurrentApiClient
+  def self.install
+    system_user
+    system_group
+    all_users_group
+    anonymous_group
+    anonymous_user
+    empty_collection
+  end
+end
diff --git a/services/api/app/models/group.rb b/services/api/app/models/group.rb
new file mode 100644 (file)
index 0000000..0e857ad
--- /dev/null
@@ -0,0 +1,40 @@
+require 'can_be_an_owner'
+
+class Group < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  include CanBeAnOwner
+  after_create :invalidate_permissions_cache
+  after_update :maybe_invalidate_permissions_cache
+  before_create :assign_name
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :group_class
+    t.add :description
+    t.add :writable_by
+  end
+
+  def maybe_invalidate_permissions_cache
+    if uuid_changed? or owner_uuid_changed?
+      # This can change users' permissions on other groups as well as
+      # this one.
+      invalidate_permissions_cache
+    end
+  end
+
+  def invalidate_permissions_cache
+    # Ensure a new group can be accessed by the appropriate users
+    # immediately after being created.
+    User.invalidate_permissions_cache
+  end
+
+  def assign_name
+    if self.new_record? and (self.name.nil? or self.name.empty?)
+      self.name = self.uuid
+    end
+    true
+  end
+
+end
diff --git a/services/api/app/models/human.rb b/services/api/app/models/human.rb
new file mode 100644 (file)
index 0000000..32f2906
--- /dev/null
@@ -0,0 +1,10 @@
+class Human < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :properties
+  end
+end
diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb
new file mode 100644 (file)
index 0000000..0444528
--- /dev/null
@@ -0,0 +1,410 @@
+class Job < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  attr_protected :arvados_sdk_version, :docker_image_locator
+  serialize :script_parameters, Hash
+  serialize :runtime_constraints, Hash
+  serialize :tasks_summary, Hash
+  before_create :ensure_unique_submit_id
+  after_commit :trigger_crunch_dispatch_if_cancelled, :on => :update
+  before_validation :set_priority
+  before_validation :update_state_from_old_state_attrs
+  validate :ensure_script_version_is_commit
+  validate :find_arvados_sdk_version
+  validate :find_docker_image_locator
+  validate :validate_status
+  validate :validate_state_change
+  validate :ensure_no_collection_uuids_in_script_params
+  before_save :update_timestamps_when_state_changes
+
+  has_many :commit_ancestors, :foreign_key => :descendant, :primary_key => :script_version
+  has_many(:nodes, foreign_key: :job_uuid, primary_key: :uuid)
+
+  class SubmitIdReused < StandardError
+  end
+
+  api_accessible :user, extend: :common do |t|
+    t.add :submit_id
+    t.add :priority
+    t.add :script
+    t.add :script_parameters
+    t.add :script_version
+    t.add :cancelled_at
+    t.add :cancelled_by_client_uuid
+    t.add :cancelled_by_user_uuid
+    t.add :started_at
+    t.add :finished_at
+    t.add :output
+    t.add :success
+    t.add :running
+    t.add :state
+    t.add :is_locked_by_uuid
+    t.add :log
+    t.add :runtime_constraints
+    t.add :tasks_summary
+    t.add :dependencies
+    t.add :nondeterministic
+    t.add :repository
+    t.add :supplied_script_version
+    t.add :arvados_sdk_version
+    t.add :docker_image_locator
+    t.add :queue_position
+    t.add :node_uuids
+    t.add :description
+  end
+
+  # Supported states for a job
+  States = [
+            (Queued = 'Queued'),
+            (Running = 'Running'),
+            (Cancelled = 'Cancelled'),
+            (Failed = 'Failed'),
+            (Complete = 'Complete'),
+           ]
+
+  def assert_finished
+    update_attributes(finished_at: finished_at || Time.now,
+                      success: success.nil? ? false : success,
+                      running: false)
+  end
+
+  def node_uuids
+    nodes.map(&:uuid)
+  end
+
+  def self.queue
+    self.where('state = ?', Queued).order('priority desc, created_at')
+  end
+
+  def queue_position
+    Job::queue.each_with_index do |job, index|
+      if job[:uuid] == self.uuid
+        return index
+      end
+    end
+    nil
+  end
+
+  def self.running
+    self.where('running = ?', true).
+      order('priority desc, created_at')
+  end
+
+  def lock locked_by_uuid
+    transaction do
+      self.reload
+      unless self.state == Queued and self.is_locked_by_uuid.nil?
+        raise AlreadyLockedError
+      end
+      self.state = Running
+      self.is_locked_by_uuid = locked_by_uuid
+      self.save!
+    end
+  end
+
+  protected
+
+  def foreign_key_attributes
+    super + %w(output log)
+  end
+
+  def skip_uuid_read_permission_check
+    super + %w(cancelled_by_client_uuid)
+  end
+
+  def skip_uuid_existence_check
+    super + %w(output log)
+  end
+
+  def set_priority
+    if self.priority.nil?
+      self.priority = 0
+    end
+    true
+  end
+
+  def ensure_script_version_is_commit
+    if self.state == Running
+      # Apparently client has already decided to go for it. This is
+      # needed to run a local job using a local working directory
+      # instead of a commit-ish.
+      return true
+    end
+    if new_record? or script_version_changed?
+      sha1 = Commit.find_commit_range(current_user, self.repository, nil, self.script_version, nil)[0] rescue nil
+      if sha1
+        self.supplied_script_version = self.script_version if self.supplied_script_version.nil? or self.supplied_script_version.empty?
+        self.script_version = sha1
+      else
+        self.errors.add :script_version, "#{self.script_version} does not resolve to a commit"
+        return false
+      end
+    end
+  end
+
+  def ensure_unique_submit_id
+    if !submit_id.nil?
+      if Job.where('submit_id=?',self.submit_id).first
+        raise SubmitIdReused.new
+      end
+    end
+    true
+  end
+
+  def resolve_runtime_constraint(key, attr_sym)
+    if ((runtime_constraints.is_a? Hash) and
+        (search = runtime_constraints[key]))
+      ok, result = yield search
+    else
+      ok, result = true, nil
+    end
+    if ok
+      send("#{attr_sym}=".to_sym, result)
+    else
+      errors.add(attr_sym, result)
+    end
+    ok
+  end
+
+  def find_arvados_sdk_version
+    resolve_runtime_constraint("arvados_sdk_version",
+                               :arvados_sdk_version) do |git_search|
+      commits = Commit.find_commit_range(current_user, "arvados",
+                                         nil, git_search, nil)
+      if commits.nil? or commits.empty?
+        [false, "#{git_search} does not resolve to a commit"]
+      elsif not runtime_constraints["docker_image"]
+        [false, "cannot be specified without a Docker image constraint"]
+      else
+        [true, commits.first]
+      end
+    end
+  end
+
+  def find_docker_image_locator
+    resolve_runtime_constraint("docker_image",
+                               :docker_image_locator) do |image_search|
+      image_tag = runtime_constraints['docker_image_tag']
+      if coll = Collection.for_latest_docker_image(image_search, image_tag)
+        [true, coll.portable_data_hash]
+      else
+        [false, "not found for #{image_search}"]
+      end
+    end
+  end
+
+  def dependencies
+    deps = {}
+    queue = self.script_parameters.values
+    while not queue.empty?
+      queue = queue.flatten.compact.collect do |v|
+        if v.is_a? Hash
+          v.values
+        elsif v.is_a? String
+          v.match(/^(([0-9a-f]{32})\b(\+[^,]+)?,?)*$/) do |locator|
+            deps[locator.to_s] = true
+          end
+          nil
+        end
+      end
+    end
+    deps.keys
+  end
+
+  def permission_to_update
+    if is_locked_by_uuid_was and !(current_user and
+                                   (current_user.uuid == is_locked_by_uuid_was or
+                                    current_user.uuid == system_user.uuid))
+      if script_changed? or
+          script_parameters_changed? or
+          script_version_changed? or
+          (!cancelled_at_was.nil? and
+           (cancelled_by_client_uuid_changed? or
+            cancelled_by_user_uuid_changed? or
+            cancelled_at_changed?)) or
+          started_at_changed? or
+          finished_at_changed? or
+          running_changed? or
+          success_changed? or
+          output_changed? or
+          log_changed? or
+          tasks_summary_changed? or
+          state_changed?
+        logger.warn "User #{current_user.uuid if current_user} tried to change protected job attributes on locked #{self.class.to_s} #{uuid_was}"
+        return false
+      end
+    end
+    if !is_locked_by_uuid_changed?
+      super
+    else
+      if !current_user
+        logger.warn "Anonymous user tried to change lock on #{self.class.to_s} #{uuid_was}"
+        false
+      elsif is_locked_by_uuid_was and is_locked_by_uuid_was != current_user.uuid
+        logger.warn "User #{current_user.uuid} tried to steal lock on #{self.class.to_s} #{uuid_was} from #{is_locked_by_uuid_was}"
+        false
+      elsif !is_locked_by_uuid.nil? and is_locked_by_uuid != current_user.uuid
+        logger.warn "User #{current_user.uuid} tried to lock #{self.class.to_s} #{uuid_was} with uuid #{is_locked_by_uuid}"
+        false
+      else
+        super
+      end
+    end
+  end
+
+  def update_modified_by_fields
+    if self.cancelled_at_changed?
+      # Ensure cancelled_at cannot be set to arbitrary non-now times,
+      # or changed once it is set.
+      if self.cancelled_at and not self.cancelled_at_was
+        self.cancelled_at = Time.now
+        self.cancelled_by_user_uuid = current_user.uuid
+        self.cancelled_by_client_uuid = current_api_client.andand.uuid
+        @need_crunch_dispatch_trigger = true
+      else
+        self.cancelled_at = self.cancelled_at_was
+        self.cancelled_by_user_uuid = self.cancelled_by_user_uuid_was
+        self.cancelled_by_client_uuid = self.cancelled_by_client_uuid_was
+      end
+    end
+    super
+  end
+
+  def trigger_crunch_dispatch_if_cancelled
+    if @need_crunch_dispatch_trigger
+      File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
+        # That's all, just create/touch a file for crunch-job to see.
+      end
+    end
+  end
+
+  def update_timestamps_when_state_changes
+    return if not (state_changed? or new_record?)
+
+    case state
+    when Running
+      self.started_at ||= Time.now
+    when Failed, Complete
+      self.finished_at ||= Time.now
+    when Cancelled
+      self.cancelled_at ||= Time.now
+    end
+
+    # TODO: Remove the following case block when old "success" and
+    # "running" attrs go away. Until then, this ensures we still
+    # expose correct success/running flags to older clients, even if
+    # some new clients are writing only the new state attribute.
+    case state
+    when Queued
+      self.running = false
+      self.success = nil
+    when Running
+      self.running = true
+      self.success = nil
+    when Cancelled, Failed
+      self.running = false
+      self.success = false
+    when Complete
+      self.running = false
+      self.success = true
+    end
+    self.running ||= false # Default to false instead of nil.
+
+    @need_crunch_dispatch_trigger = true
+
+    true
+  end
+
+  def update_state_from_old_state_attrs
+    # If a client has touched the legacy state attrs, update the
+    # "state" attr to agree with the updated values of the legacy
+    # attrs.
+    #
+    # TODO: Remove this method when old "success" and "running" attrs
+    # go away.
+    if cancelled_at_changed? or
+        success_changed? or
+        running_changed? or
+        state.nil?
+      if cancelled_at
+        self.state = Cancelled
+      elsif success == false
+        self.state = Failed
+      elsif success == true
+        self.state = Complete
+      elsif running == true
+        self.state = Running
+      else
+        self.state = Queued
+      end
+    end
+    true
+  end
+
+  def validate_status
+    if self.state.in?(States)
+      true
+    else
+      errors.add :state, "#{state.inspect} must be one of: #{States.inspect}"
+      false
+    end
+  end
+
+  def validate_state_change
+    ok = true
+    if self.state_changed?
+      ok = case self.state_was
+           when nil
+             # state isn't set yet
+             true
+           when Queued
+             # Permit going from queued to any state
+             true
+           when Running
+             # From running, may only transition to a finished state
+             [Complete, Failed, Cancelled].include? self.state
+           when Complete, Failed, Cancelled
+             # Once in a finished state, don't permit any more state changes
+             false
+           else
+             # Any other state transition is also invalid
+             false
+           end
+      if not ok
+        errors.add :state, "invalid change from #{self.state_was} to #{self.state}"
+      end
+    end
+    ok
+  end
+
+  def ensure_no_collection_uuids_in_script_params
+    # recursive_hash_search searches recursively through hashes and
+    # arrays in 'thing' for string fields matching regular expression
+    # 'pattern'.  Returns true if pattern is found, false otherwise.
+    def recursive_hash_search thing, pattern
+      if thing.is_a? Hash
+        thing.each do |k, v|
+          return true if recursive_hash_search v, pattern
+        end
+      elsif thing.is_a? Array
+        thing.each do |k|
+          return true if recursive_hash_search k, pattern
+        end
+      elsif thing.is_a? String
+        return true if thing.match pattern
+      end
+      false
+    end
+
+    # Fail validation if any script_parameters field includes a string containing a
+    # collection uuid pattern.
+    if self.script_parameters_changed?
+      if recursive_hash_search(self.script_parameters, Collection.uuid_regex)
+        self.errors.add :script_parameters, "must use portable_data_hash instead of collection uuid"
+        return false
+      end
+    end
+    true
+  end
+end
diff --git a/services/api/app/models/job_task.rb b/services/api/app/models/job_task.rb
new file mode 100644 (file)
index 0000000..5f83fbe
--- /dev/null
@@ -0,0 +1,34 @@
+class JobTask < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :parameters, Hash
+  before_create :set_default_qsequence
+  after_update :delete_created_job_tasks_if_failed
+
+  api_accessible :user, extend: :common do |t|
+    t.add :job_uuid
+    t.add :created_by_job_task_uuid
+    t.add :sequence
+    t.add :qsequence
+    t.add :parameters
+    t.add :output
+    t.add :progress
+    t.add :success
+    t.add :started_at
+    t.add :finished_at
+  end
+
+  protected
+
+  def delete_created_job_tasks_if_failed
+    if self.success == false and self.success != self.success_was
+      JobTask.delete_all ['created_by_job_task_uuid = ?', self.uuid]
+    end
+  end
+
+  def set_default_qsequence
+    self.qsequence ||= self.class.connection.
+      select_value("SELECT nextval('job_tasks_qsequence_seq')")
+  end
+end
diff --git a/services/api/app/models/keep_disk.rb b/services/api/app/models/keep_disk.rb
new file mode 100644 (file)
index 0000000..da421eb
--- /dev/null
@@ -0,0 +1,75 @@
+class KeepDisk < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  before_validation :ensure_ping_secret
+
+  api_accessible :user, extend: :common do |t|
+    t.add :node_uuid
+    t.add :filesystem_uuid
+    t.add :bytes_total
+    t.add :bytes_free
+    t.add :is_readable
+    t.add :is_writable
+    t.add :last_read_at
+    t.add :last_write_at
+    t.add :last_ping_at
+    t.add :service_host
+    t.add :service_port
+    t.add :service_ssl_flag
+    t.add :keep_service_uuid
+  end
+  api_accessible :superuser, :extend => :user do |t|
+    t.add :ping_secret
+  end
+
+  def foreign_key_attributes
+    super.reject { |a| a == "filesystem_uuid" }
+  end
+
+  def ping(o)
+    raise "must have :service_host and :ping_secret" unless o[:service_host] and o[:ping_secret]
+
+    if o[:ping_secret] != self.ping_secret
+      logger.info "Ping: secret mismatch: received \"#{o[:ping_secret]}\" != \"#{self.ping_secret}\""
+      return nil
+    end
+
+    @bypass_arvados_authorization = true
+    self.update_attributes!(o.select { |k,v|
+                             [:bytes_total,
+                              :bytes_free,
+                              :is_readable,
+                              :is_writable,
+                              :last_read_at,
+                              :last_write_at
+                             ].collect(&:to_s).index k
+                           }.merge(last_ping_at: Time.now))
+  end
+
+  def service_host
+    KeepService.find_by_uuid(self.keep_service_uuid).andand.service_host
+  end
+
+  def service_port
+    KeepService.find_by_uuid(self.keep_service_uuid).andand.service_port
+  end
+
+  def service_ssl_flag
+    KeepService.find_by_uuid(self.keep_service_uuid).andand.service_ssl_flag
+  end
+
+  protected
+
+  def ensure_ping_secret
+    self.ping_secret ||= rand(2**256).to_s(36)
+  end
+
+  def permission_to_update
+    @bypass_arvados_authorization or super
+  end
+
+  def permission_to_create
+    current_user and current_user.is_admin
+  end
+end
diff --git a/services/api/app/models/keep_service.rb b/services/api/app/models/keep_service.rb
new file mode 100644 (file)
index 0000000..3baf098
--- /dev/null
@@ -0,0 +1,15 @@
+class KeepService < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  api_accessible :user, extend: :common do |t|
+    t.add  :service_host
+    t.add  :service_port
+    t.add  :service_ssl_flag
+    t.add  :service_type
+  end
+  api_accessible :superuser, :extend => :user do |t|
+  end
+
+end
diff --git a/services/api/app/models/link.rb b/services/api/app/models/link.rb
new file mode 100644 (file)
index 0000000..d9b8f6f
--- /dev/null
@@ -0,0 +1,100 @@
+class Link < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+  before_create :permission_to_attach_to_objects
+  before_update :permission_to_attach_to_objects
+  after_update :maybe_invalidate_permissions_cache
+  after_create :maybe_invalidate_permissions_cache
+  after_destroy :maybe_invalidate_permissions_cache
+  attr_accessor :head_kind, :tail_kind
+  validate :name_links_are_obsolete
+
+  api_accessible :user, extend: :common do |t|
+    t.add :tail_uuid
+    t.add :link_class
+    t.add :name
+    t.add :head_uuid
+    t.add :head_kind
+    t.add :tail_kind
+    t.add :properties
+  end
+
+  def properties
+    @properties ||= Hash.new
+    super
+  end
+
+  def head_kind
+    if k = ArvadosModel::resource_class_for_uuid(head_uuid)
+      k.kind
+    end
+  end
+
+  def tail_kind
+    if k = ArvadosModel::resource_class_for_uuid(tail_uuid)
+      k.kind
+    end
+  end
+
+  protected
+
+  def permission_to_attach_to_objects
+    # Anonymous users cannot write links
+    return false if !current_user
+
+    # All users can write links that don't affect permissions
+    return true if self.link_class != 'permission'
+
+    # Administrators can grant permissions
+    return true if current_user.is_admin
+
+    # All users can grant permissions on objects they own or can manage
+    head_obj = ArvadosModel.find_by_uuid(head_uuid)
+    return true if current_user.can?(manage: head_obj)
+
+    # Default = deny.
+    false
+  end
+
+  def maybe_invalidate_permissions_cache
+    if self.link_class == 'permission'
+      # Clearing the entire permissions cache can generate many
+      # unnecessary queries if many active users are not affected by
+      # this change. In such cases it would be better to search cached
+      # permissions for head_uuid and tail_uuid, and invalidate the
+      # cache for only those users. (This would require a browseable
+      # cache.)
+      User.invalidate_permissions_cache
+    end
+  end
+
+  def name_links_are_obsolete
+    if link_class == 'name'
+      errors.add('name', 'Name links are obsolete')
+      false
+    else
+      true
+    end
+  end
+
+  # A user is permitted to create, update or modify a permission link
+  # if and only if they have "manage" permission on the object
+  # indicated by the permission link's head_uuid.
+  #
+  # All other links are treated as regular ArvadosModel objects.
+  #
+  def ensure_owner_uuid_is_permitted
+    if link_class == 'permission'
+      ob = ArvadosModel.find_by_uuid(head_uuid)
+      raise PermissionDeniedError unless current_user.can?(manage: ob)
+      # All permission links should be owned by the system user.
+      self.owner_uuid = system_user_uuid
+      return true
+    else
+      super
+    end
+  end
+
+end
diff --git a/services/api/app/models/log.rb b/services/api/app/models/log.rb
new file mode 100644 (file)
index 0000000..39f789e
--- /dev/null
@@ -0,0 +1,88 @@
+class Log < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+  before_validation :set_default_event_at
+  attr_accessor :object, :object_kind
+  after_save :send_notify
+
+  api_accessible :user, extend: :common do |t|
+    t.add :id
+    t.add :object_uuid
+    t.add :object_owner_uuid
+    t.add :object_kind
+    t.add :event_at
+    t.add :event_type
+    t.add :summary
+    t.add :properties
+  end
+
+  def object_kind
+    if k = ArvadosModel::resource_class_for_uuid(object_uuid)
+      k.kind
+    end
+  end
+
+  def fill_object(thing)
+    self.object_uuid ||= thing.uuid
+    if respond_to? :object_owner_uuid=
+      # Skip this if the object_owner_uuid migration hasn't happened
+      # yet, i.e., we're in the process of migrating an old database.
+      self.object_owner_uuid = thing.owner_uuid
+    end
+    self.summary ||= "#{self.event_type} of #{thing.uuid}"
+    self
+  end
+
+  def fill_properties(age, etag_prop, attrs_prop)
+    self.properties.merge!({"#{age}_etag" => etag_prop,
+                             "#{age}_attributes" => attrs_prop})
+  end
+
+  def update_to(thing)
+    fill_properties('new', thing.andand.etag, thing.andand.logged_attributes)
+    case event_type
+    when "create"
+      self.event_at = thing.created_at
+    when "update"
+      self.event_at = thing.modified_at
+    when "destroy"
+      self.event_at = Time.now
+    end
+    self
+  end
+
+  protected
+
+  def permission_to_create
+    true
+  end
+
+  def permission_to_update
+    current_user.andand.is_admin
+  end
+
+  alias_method :permission_to_delete, :permission_to_update
+
+  def set_default_event_at
+    self.event_at ||= Time.now
+  end
+
+  def log_start_state
+    # don't log start state on logs
+  end
+
+  def log_change(event_type)
+    # Don't log changes to logs.
+  end
+
+  def ensure_valid_uuids
+    # logs can have references to deleted objects
+  end
+
+  def send_notify
+    connection.execute "NOTIFY logs, '#{self.id}'"
+  end
+
+end
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
new file mode 100644 (file)
index 0000000..c38f681
--- /dev/null
@@ -0,0 +1,214 @@
+class Node < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :info, Hash
+  serialize :properties, Hash
+  before_validation :ensure_ping_secret
+  after_update :dns_server_update
+
+  # Only a controller can figure out whether or not the current API tokens
+  # have access to the associated Job.  They're expected to set
+  # job_readable=true if the Job UUID can be included in the API response.
+  belongs_to(:job, foreign_key: :job_uuid, primary_key: :uuid)
+  attr_accessor :job_readable
+
+  MAX_SLOTS = 64
+
+  @@dns_server_conf_dir = Rails.configuration.dns_server_conf_dir
+  @@dns_server_conf_template = Rails.configuration.dns_server_conf_template
+  @@dns_server_reload_command = Rails.configuration.dns_server_reload_command
+  @@uuid_prefix = Rails.configuration.uuid_prefix
+  @@domain = Rails.configuration.compute_node_domain rescue `hostname --domain`.strip
+  @@nameservers = Rails.configuration.compute_node_nameservers
+
+  api_accessible :user, :extend => :common do |t|
+    t.add :hostname
+    t.add :domain
+    t.add :ip_address
+    t.add :last_ping_at
+    t.add :slot_number
+    t.add :status
+    t.add :api_job_uuid, as: :job_uuid
+    t.add :crunch_worker_state
+    t.add :properties
+  end
+  api_accessible :superuser, :extend => :user do |t|
+    t.add :first_ping_at
+    t.add :info
+    t.add lambda { |x| @@nameservers }, :as => :nameservers
+  end
+
+  def domain
+    super || @@domain
+  end
+
+  def api_job_uuid
+    job_readable ? job_uuid : nil
+  end
+
+  def crunch_worker_state
+    return 'down' if slot_number.nil?
+    case self.info.andand['slurm_state']
+    when 'alloc', 'comp'
+      'busy'
+    when 'idle'
+      'idle'
+    else
+      'down'
+    end
+  end
+
+  def status
+    if !self.last_ping_at
+      if Time.now - self.created_at > 5.minutes
+        'startup-fail'
+      else
+        'pending'
+      end
+    elsif Time.now - self.last_ping_at > 1.hours
+      'missing'
+    else
+      'running'
+    end
+  end
+
+  def ping(o)
+    raise "must have :ip and :ping_secret" unless o[:ip] and o[:ping_secret]
+
+    if o[:ping_secret] != self.info['ping_secret']
+      logger.info "Ping: secret mismatch: received \"#{o[:ping_secret]}\" != \"#{self.info['ping_secret']}\""
+      raise ArvadosModel::UnauthorizedError.new("Incorrect ping_secret")
+    end
+    self.last_ping_at = Time.now
+
+    @bypass_arvados_authorization = true
+
+    # Record IP address
+    if self.ip_address.nil?
+      logger.info "#{self.uuid} ip_address= #{o[:ip]}"
+      self.ip_address = o[:ip]
+      self.first_ping_at = Time.now
+    end
+
+    # Record instance ID if not already known
+    if o[:ec2_instance_id]
+      if !self.info['ec2_instance_id']
+        self.info['ec2_instance_id'] = o[:ec2_instance_id]
+      elsif self.info['ec2_instance_id'] != o[:ec2_instance_id]
+        logger.debug "Multiple nodes have credentials for #{self.uuid}"
+        raise "#{self.uuid} is already running at #{self.info['ec2_instance_id']} so rejecting ping from #{o[:ec2_instance_id]}"
+      end
+    end
+
+    # Assign hostname
+    if self.slot_number.nil?
+      try_slot = 0
+      begin
+        self.slot_number = try_slot
+        begin
+          self.save!
+          break
+        rescue ActiveRecord::RecordNotUnique
+          try_slot += 1
+        end
+        raise "No available node slots" if try_slot == MAX_SLOTS
+      end while true
+      self.hostname = self.class.hostname_for_slot(self.slot_number)
+    end
+
+    # Record other basic stats
+    ['total_cpu_cores', 'total_ram_mb', 'total_scratch_mb'].each do |key|
+      if value = (o[key] or o[key.to_sym])
+        self.properties[key] = value.to_i
+      else
+        self.properties.delete(key)
+      end
+    end
+
+    save!
+  end
+
+  protected
+
+  def ensure_ping_secret
+    self.info['ping_secret'] ||= rand(2**256).to_s(36)
+  end
+
+  def dns_server_update
+    if self.hostname_changed? or self.ip_address_changed?
+      if not self.ip_address.nil?
+        stale_conflicting_nodes = Node.where('id != ? and ip_address = ? and last_ping_at < ?',self.id,self.ip_address,10.minutes.ago)
+        if not stale_conflicting_nodes.empty?
+          # One or more stale compute node records have the same IP address as the new node.
+          # Clear the ip_address field on the stale nodes.
+          stale_conflicting_nodes.each do |stale_node|
+            stale_node.ip_address = nil
+            stale_node.save!
+          end
+        end
+      end
+      if self.hostname and self.ip_address
+        self.class.dns_server_update(self.hostname, self.ip_address)
+      end
+    end
+  end
+
+  def self.dns_server_update(hostname, ip_address)
+    return unless @@dns_server_conf_dir and @@dns_server_conf_template
+    ptr_domain = ip_address.
+      split('.').reverse.join('.').concat('.in-addr.arpa')
+    hostfile = File.join @@dns_server_conf_dir, "#{hostname}.conf"
+
+    begin
+      template = IO.read(@@dns_server_conf_template)
+    rescue => e
+      STDERR.puts "Unable to read dns_server_conf_template #{@@dns_server_conf_template}: #{e.message}"
+      return
+    end
+
+    populated = template % {hostname:hostname, uuid_prefix:@@uuid_prefix, ip_address:ip_address, ptr_domain:ptr_domain}
+
+    begin
+      File.open hostfile, 'w' do |f|
+        f.puts populated
+      end
+    rescue => e
+      STDERR.puts "Unable to write #{hostfile}: #{e.message}"
+      return
+    end
+    File.open(File.join(@@dns_server_conf_dir, 'restart.txt'), 'w') do |f|
+      # this will trigger a dns server restart
+      f.puts @@dns_server_reload_command
+    end
+  end
+
+  def self.hostname_for_slot(slot_number)
+    "compute#{slot_number}"
+  end
+
+  # At startup, make sure all DNS entries exist.  Otherwise, slurmctld
+  # will refuse to start.
+  if @@dns_server_conf_dir and @@dns_server_conf_template
+    (0..MAX_SLOTS-1).each do |slot_number|
+      hostname = hostname_for_slot(slot_number)
+      hostfile = File.join @@dns_server_conf_dir, "#{hostname}.conf"
+      if !File.exists? hostfile
+        n = Node.where(:slot_number => slot_number).first
+        if n.nil? or n.ip_address.nil?
+          dns_server_update(hostname, '127.40.4.0')
+        else
+          dns_server_update(hostname, n.ip_address)
+        end
+      end
+    end
+  end
+
+  def permission_to_update
+    @bypass_arvados_authorization or super
+  end
+
+  def permission_to_create
+    current_user and current_user.is_admin
+  end
+end
diff --git a/services/api/app/models/pipeline_instance.rb b/services/api/app/models/pipeline_instance.rb
new file mode 100644 (file)
index 0000000..28345d5
--- /dev/null
@@ -0,0 +1,169 @@
+class PipelineInstance < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :components, Hash
+  serialize :properties, Hash
+  serialize :components_summary, Hash
+  belongs_to :pipeline_template, :foreign_key => :pipeline_template_uuid, :primary_key => :uuid
+
+  before_validation :bootstrap_components
+  before_validation :update_state
+  before_validation :verify_status
+  before_create :set_state_before_save
+  before_save :set_state_before_save
+
+  api_accessible :user, extend: :common do |t|
+    t.add :pipeline_template_uuid
+    t.add :pipeline_template, :if => :pipeline_template
+    t.add :name
+    t.add :components
+    t.add :dependencies
+    t.add :properties
+    t.add :state
+    t.add :components_summary
+    t.add :description
+    t.add :started_at
+    t.add :finished_at
+  end
+
+  # Supported states for a pipeline instance
+  States =
+    [
+     (New = 'New'),
+     (Ready = 'Ready'),
+     (RunningOnServer = 'RunningOnServer'),
+     (RunningOnClient = 'RunningOnClient'),
+     (Paused = 'Paused'),
+     (Failed = 'Failed'),
+     (Complete = 'Complete'),
+    ]
+
+  def dependencies
+    dependency_search(self.components).keys
+  end
+
+  # if all components have input, the pipeline is Ready
+  def components_look_ready?
+    if !self.components || self.components.empty?
+      return false
+    end
+
+    all_components_have_input = true
+    self.components.each do |name, component|
+      component['script_parameters'].andand.each do |parametername, parameter|
+        parameter = { 'value' => parameter } unless parameter.is_a? Hash
+        if parameter['value'].nil? and parameter['required']
+          if parameter['output_of']
+            next
+          end
+          all_components_have_input = false
+          break
+        end
+      end
+    end
+    return all_components_have_input
+  end
+
+  def progress_table
+    begin
+      # v0 pipeline format
+      nrow = -1
+      components['steps'].collect do |step|
+        nrow += 1
+        row = [nrow, step['name']]
+        if step['complete'] and step['complete'] != 0
+          if step['output_data_locator']
+            row << 1.0
+          else
+            row << 0.0
+          end
+        else
+          row << 0.0
+          if step['failed']
+            self.state = Failed
+          end
+        end
+        row << (step['warehousejob']['id'] rescue nil)
+        row << (step['warehousejob']['revision'] rescue nil)
+        row << step['output_data_locator']
+        row << (Time.parse(step['warehousejob']['finishtime']) rescue nil)
+        row
+      end
+    rescue
+      []
+    end
+  end
+
+  def progress_ratio
+    t = progress_table
+    return 0 if t.size < 1
+    t.collect { |r| r[2] }.inject(0.0) { |sum,a| sum += a } / t.size
+  end
+
+  def self.queue
+    self.where("state = 'RunningOnServer'")
+  end
+
+  protected
+  def bootstrap_components
+    if pipeline_template and (!components or components.empty?)
+      self.components = pipeline_template.components.deep_dup
+    end
+  end
+
+  def update_state
+    if components and progress_ratio == 1.0
+      self.state = Complete
+    end
+  end
+
+  def dependency_search(haystack)
+    if haystack.is_a? String
+      if (re = haystack.match /^([0-9a-f]{32}(\+[^,]+)*)+/)
+        {re[1] => true}
+      else
+        {}
+      end
+    elsif haystack.is_a? Array
+      deps = {}
+      haystack.each do |value|
+        deps.merge! dependency_search(value)
+      end
+      deps
+    elsif haystack.respond_to? :keys
+      deps = {}
+      haystack.each do |key, value|
+        deps.merge! dependency_search(value)
+      end
+      deps
+    else
+      {}
+    end
+  end
+
+  def verify_status
+    changed_attributes = self.changed
+
+    if new_record? or 'components'.in? changed_attributes
+      self.state ||= New
+      if (self.state == New) and self.components_look_ready?
+        self.state = Ready
+      end
+    end
+
+    if self.state.in?(States)
+      true
+    else
+      errors.add :state, "'#{state.inspect} must be one of: [#{States.join ', '}]"
+      false
+    end
+  end
+
+  def set_state_before_save
+    if self.components_look_ready? && (!self.state || self.state == New)
+      self.state = Ready
+    end
+  end
+
+end
diff --git a/services/api/app/models/pipeline_template.rb b/services/api/app/models/pipeline_template.rb
new file mode 100644 (file)
index 0000000..b016ce1
--- /dev/null
@@ -0,0 +1,12 @@
+class PipelineTemplate < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :components, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :components
+    t.add :description
+  end
+end
diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb
new file mode 100644 (file)
index 0000000..f159b48
--- /dev/null
@@ -0,0 +1,28 @@
+class Repository < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :fetch_url
+    t.add :push_url
+  end
+
+  def push_url
+    super || self.name && "git@git.#{Rails.configuration.uuid_prefix}.arvadosapi.com:#{self.name}.git"
+  end
+
+  def fetch_url
+    super || push_url
+  end
+
+  protected
+
+  def permission_to_create
+    current_user and current_user.is_admin
+  end
+  def permission_to_update
+    current_user and current_user.is_admin
+  end
+end
diff --git a/services/api/app/models/specimen.rb b/services/api/app/models/specimen.rb
new file mode 100644 (file)
index 0000000..d39c612
--- /dev/null
@@ -0,0 +1,16 @@
+class Specimen < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :material
+    t.add :properties
+  end
+
+  def properties
+    @properties ||= Hash.new
+    super
+  end
+end
diff --git a/services/api/app/models/trait.rb b/services/api/app/models/trait.rb
new file mode 100644 (file)
index 0000000..a59c007
--- /dev/null
@@ -0,0 +1,11 @@
+class Trait < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  serialize :properties, Hash
+
+  api_accessible :user, extend: :common do |t|
+    t.add :name
+    t.add :properties
+  end
+end
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
new file mode 100644 (file)
index 0000000..a32ce39
--- /dev/null
@@ -0,0 +1,485 @@
+require 'can_be_an_owner'
+
+class User < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+  include CanBeAnOwner
+
+  serialize :prefs, Hash
+  has_many :api_client_authorizations
+  before_update :prevent_privilege_escalation
+  before_update :prevent_inactive_admin
+  before_create :check_auto_admin
+  after_create :add_system_group_permission_link
+  after_create :auto_setup_new_user
+  after_create :send_admin_notifications
+  after_update :send_profile_created_notification
+
+
+  has_many :authorized_keys, :foreign_key => :authorized_user_uuid, :primary_key => :uuid
+
+  api_accessible :user, extend: :common do |t|
+    t.add :email
+    t.add :full_name
+    t.add :first_name
+    t.add :last_name
+    t.add :identity_url
+    t.add :is_active
+    t.add :is_admin
+    t.add :is_invited
+    t.add :prefs
+    t.add :writable_by
+  end
+
+  ALL_PERMISSIONS = {read: true, write: true, manage: true}
+
+  def full_name
+    "#{first_name} #{last_name}".strip
+  end
+
+  def is_invited
+    !!(self.is_active ||
+       Rails.configuration.new_users_are_active ||
+       self.groups_i_can(:read).select { |x| x.match /-f+$/ }.first)
+  end
+
+  def groups_i_can(verb)
+    my_groups = self.group_permissions.select { |uuid, mask| mask[verb] }.keys
+    if verb == :read
+      my_groups << anonymous_group_uuid
+    end
+    my_groups
+  end
+
+  def can?(actions)
+    return true if is_admin
+    actions.each do |action, target|
+      unless target.nil?
+        if target.respond_to? :uuid
+          target_uuid = target.uuid
+        else
+          target_uuid = target
+          target = ArvadosModel.find_by_uuid(target_uuid)
+        end
+      end
+      next if target_uuid == self.uuid
+      next if (group_permissions[target_uuid] and
+               group_permissions[target_uuid][action])
+      if target.respond_to? :owner_uuid
+        next if target.owner_uuid == self.uuid
+        next if (group_permissions[target.owner_uuid] and
+                 group_permissions[target.owner_uuid][action])
+      end
+      sufficient_perms = case action
+                         when :manage
+                           ['can_manage']
+                         when :write
+                           ['can_manage', 'can_write']
+                         when :read
+                           ['can_manage', 'can_write', 'can_read']
+                         else
+                           # (Skip this kind of permission opportunity
+                           # if action is an unknown permission type)
+                         end
+      if sufficient_perms
+        # Check permission links with head_uuid pointing directly at
+        # the target object. If target is a Group, this is redundant
+        # and will fail except [a] if permission caching is broken or
+        # [b] during a race condition, where a permission link has
+        # *just* been added.
+        if Link.where(link_class: 'permission',
+                      name: sufficient_perms,
+                      tail_uuid: groups_i_can(action) + [self.uuid],
+                      head_uuid: target_uuid).any?
+          next
+        end
+      end
+      return false
+    end
+    true
+  end
+
+  def self.invalidate_permissions_cache
+    Rails.cache.delete_matched(/^groups_for_user_/)
+  end
+
+  # Return a hash of {group_uuid: perm_hash} where perm_hash[:read]
+  # and perm_hash[:write] are true if this user can read and write
+  # objects owned by group_uuid.
+  #
+  # The permission graph is built by repeatedly enumerating all
+  # permission links reachable from self.uuid, and then calling
+  # search_permissions
+  def group_permissions
+    Rails.cache.fetch "groups_for_user_#{self.uuid}" do
+      permissions_from = {}
+      todo = {self.uuid => true}
+      done = {}
+      # Build the equivalence class of permissions starting with
+      # self.uuid. On each iteration of this loop, todo contains
+      # the next set of uuids in the permission equivalence class
+      # to evaluate.
+      while !todo.empty?
+        lookup_uuids = todo.keys
+        lookup_uuids.each do |uuid| done[uuid] = true end
+        todo = {}
+        newgroups = []
+        # include all groups owned by the current set of uuids.
+        Group.where('owner_uuid in (?)', lookup_uuids).each do |group|
+          newgroups << [group.owner_uuid, group.uuid, 'can_manage']
+        end
+        # add any permission links from the current lookup_uuids to a Group.
+        Link.where('link_class = ? and tail_uuid in (?) and ' \
+                   '(head_uuid like ? or (name = ? and head_uuid like ?))',
+                   'permission',
+                   lookup_uuids,
+                   Group.uuid_like_pattern,
+                   'can_manage',
+                   User.uuid_like_pattern).each do |link|
+          newgroups << [link.tail_uuid, link.head_uuid, link.name]
+        end
+        newgroups.each do |tail_uuid, head_uuid, perm_name|
+          unless done.has_key? head_uuid
+            todo[head_uuid] = true
+          end
+          link_permissions = {}
+          case perm_name
+          when 'can_read'
+            link_permissions = {read:true}
+          when 'can_write'
+            link_permissions = {read:true,write:true}
+          when 'can_manage'
+            link_permissions = ALL_PERMISSIONS
+          end
+          permissions_from[tail_uuid] ||= {}
+          permissions_from[tail_uuid][head_uuid] ||= {}
+          link_permissions.each do |k,v|
+            permissions_from[tail_uuid][head_uuid][k] ||= v
+          end
+        end
+      end
+      search_permissions(self.uuid, permissions_from)
+    end
+  end
+
+  def self.setup(user, openid_prefix, repo_name=nil, vm_uuid=nil)
+    return user.setup_repo_vm_links(repo_name, vm_uuid, openid_prefix)
+  end
+
+  # create links
+  def setup_repo_vm_links(repo_name, vm_uuid, openid_prefix)
+    oid_login_perm = create_oid_login_perm openid_prefix
+    repo_perm = create_user_repo_link repo_name
+    vm_login_perm = create_vm_login_permission_link vm_uuid, repo_name
+    group_perm = create_user_group_link
+
+    return [oid_login_perm, repo_perm, vm_login_perm, group_perm, self].compact
+  end
+
+  # delete user signatures, login, repo, and vm perms, and mark as inactive
+  def unsetup
+    # delete oid_login_perms for this user
+    Link.destroy_all(tail_uuid: self.email,
+                     link_class: 'permission',
+                     name: 'can_login')
+
+    # delete repo_perms for this user
+    Link.destroy_all(tail_uuid: self.uuid,
+                     link_class: 'permission',
+                     name: 'can_manage')
+
+    # delete vm_login_perms for this user
+    Link.destroy_all(tail_uuid: self.uuid,
+                     link_class: 'permission',
+                     name: 'can_login')
+
+    # delete "All users" group read permissions for this user
+    group = Group.where(name: 'All users').select do |g|
+      g[:uuid].match /-f+$/
+    end.first
+    Link.destroy_all(tail_uuid: self.uuid,
+                     head_uuid: group[:uuid],
+                     link_class: 'permission',
+                     name: 'can_read')
+
+    # delete any signatures by this user
+    Link.destroy_all(link_class: 'signature',
+                     tail_uuid: self.uuid)
+
+    # delete user preferences (including profile)
+    self.prefs = {}
+
+    # mark the user as inactive
+    self.is_active = false
+    self.save!
+  end
+
+  protected
+
+  def ensure_ownership_path_leads_to_user
+    true
+  end
+
+  def permission_to_update
+    # users must be able to update themselves (even if they are
+    # inactive) in order to create sessions
+    self == current_user or super
+  end
+
+  def permission_to_create
+    current_user.andand.is_admin or
+      (self == current_user and
+       self.is_active == Rails.configuration.new_users_are_active)
+  end
+
+  def check_auto_admin
+    return if self.uuid.end_with?('anonymouspublic')
+    if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and
+        Rails.configuration.auto_admin_user and self.email == Rails.configuration.auto_admin_user) or
+       (User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and 
+        Rails.configuration.auto_admin_first_user)
+      self.is_admin = true
+      self.is_active = true
+    end
+  end
+
+  def prevent_privilege_escalation
+    if current_user.andand.is_admin
+      return true
+    end
+    if self.is_active_changed?
+      if self.is_active != self.is_active_was
+        logger.warn "User #{current_user.uuid} tried to change is_active from #{self.is_admin_was} to #{self.is_admin} for #{self.uuid}"
+        self.is_active = self.is_active_was
+      end
+    end
+    if self.is_admin_changed?
+      if self.is_admin != self.is_admin_was
+        logger.warn "User #{current_user.uuid} tried to change is_admin from #{self.is_admin_was} to #{self.is_admin} for #{self.uuid}"
+        self.is_admin = self.is_admin_was
+      end
+    end
+    true
+  end
+
+  def prevent_inactive_admin
+    if self.is_admin and not self.is_active
+      # There is no known use case for the strange set of permissions
+      # that would result from this change. It's safest to assume it's
+      # a mistake and disallow it outright.
+      raise "Admin users cannot be inactive"
+    end
+    true
+  end
+
+  def search_permissions(start, graph, merged={}, upstream_mask=nil, upstream_path={})
+    nextpaths = graph[start]
+    return merged if !nextpaths
+    return merged if upstream_path.has_key? start
+    upstream_path[start] = true
+    upstream_mask ||= ALL_PERMISSIONS
+    nextpaths.each do |head, mask|
+      merged[head] ||= {}
+      mask.each do |k,v|
+        merged[head][k] ||= v if upstream_mask[k]
+      end
+      search_permissions(head, graph, merged, upstream_mask.select { |k,v| v && merged[head][k] }, upstream_path)
+    end
+    upstream_path.delete start
+    merged
+  end
+
+  def create_oid_login_perm (openid_prefix)
+    login_perm_props = { "identity_url_prefix" => openid_prefix}
+
+    # Check oid_login_perm
+    oid_login_perms = Link.where(tail_uuid: self.email,
+                                   link_class: 'permission',
+                                   name: 'can_login').where("head_uuid = ?", self.uuid)
+
+    if !oid_login_perms.any?
+      # create openid login permission
+      oid_login_perm = Link.create(link_class: 'permission',
+                                   name: 'can_login',
+                                   tail_uuid: self.email,
+                                   head_uuid: self.uuid,
+                                   properties: login_perm_props
+                                  )
+      logger.info { "openid login permission: " + oid_login_perm[:uuid] }
+    else
+      oid_login_perm = oid_login_perms.first
+    end
+
+    return oid_login_perm
+  end
+
+  def create_user_repo_link(repo_name)
+    # repo_name is optional
+    if not repo_name
+      logger.warn ("Repository name not given for #{self.uuid}.")
+      return
+    end
+
+    # Check for an existing repository with the same name we're about to use.
+    repo = Repository.where(name: repo_name).first
+
+    if repo
+      logger.warn "Repository exists for #{repo_name}: #{repo[:uuid]}."
+
+      # Look for existing repository access for this repo
+      repo_perms = Link.where(tail_uuid: self.uuid,
+                              head_uuid: repo[:uuid],
+                              link_class: 'permission',
+                              name: 'can_manage')
+      if repo_perms.any?
+        logger.warn "User already has repository access " +
+            repo_perms.collect { |p| p[:uuid] }.inspect
+        return repo_perms.first
+      end
+    end
+
+    # create repo, if does not already exist
+    repo ||= Repository.create(name: repo_name)
+    logger.info { "repo uuid: " + repo[:uuid] }
+
+    repo_perm = Link.create(tail_uuid: self.uuid,
+                            head_uuid: repo[:uuid],
+                            link_class: 'permission',
+                            name: 'can_manage')
+    logger.info { "repo permission: " + repo_perm[:uuid] }
+    return repo_perm
+  end
+
+  # create login permission for the given vm_uuid, if it does not already exist
+  def create_vm_login_permission_link(vm_uuid, repo_name)
+    begin
+
+      # vm uuid is optional
+      if vm_uuid
+        vm = VirtualMachine.where(uuid: vm_uuid).first
+
+        if not vm
+          logger.warn "Could not find virtual machine for #{vm_uuid.inspect}"
+          raise "No vm found for #{vm_uuid}"
+        end
+      else
+        return
+      end
+
+      logger.info { "vm uuid: " + vm[:uuid] }
+
+      login_perms = Link.where(tail_uuid: self.uuid,
+                              head_uuid: vm[:uuid],
+                              link_class: 'permission',
+                              name: 'can_login')
+
+      perm_exists = false
+      login_perms.each do |perm|
+        if perm.properties['username'] == repo_name
+          perm_exists = perm
+          break
+        end
+      end
+
+      if perm_exists
+        login_perm = perm_exists
+      else
+        login_perm = Link.create(tail_uuid: self.uuid,
+                                 head_uuid: vm[:uuid],
+                                 link_class: 'permission',
+                                 name: 'can_login',
+                                 properties: {'username' => repo_name})
+        logger.info { "login permission: " + login_perm[:uuid] }
+      end
+
+      return login_perm
+    end
+  end
+
+  # add the user to the 'All users' group
+  def create_user_group_link
+    return (Link.where(tail_uuid: self.uuid,
+                       head_uuid: all_users_group[:uuid],
+                       link_class: 'permission',
+                       name: 'can_read').first or
+            Link.create(tail_uuid: self.uuid,
+                        head_uuid: all_users_group[:uuid],
+                        link_class: 'permission',
+                        name: 'can_read'))
+  end
+
+  # Give the special "System group" permission to manage this user and
+  # all of this user's stuff.
+  #
+  def add_system_group_permission_link
+    act_as_system_user do
+      Link.create(link_class: 'permission',
+                  name: 'can_manage',
+                  tail_uuid: system_group_uuid,
+                  head_uuid: self.uuid)
+    end
+  end
+
+  # Send admin notifications
+  def send_admin_notifications
+    AdminNotifier.new_user(self).deliver
+    if not self.is_active then
+      AdminNotifier.new_inactive_user(self).deliver
+    end
+  end
+
+  # Automatically setup new user during creation
+  def auto_setup_new_user
+    return true if !Rails.configuration.auto_setup_new_users
+    return true if !self.email
+    return true if self.uuid == system_user_uuid
+    return true if self.uuid == anonymous_user_uuid
+
+    if Rails.configuration.auto_setup_new_users_with_vm_uuid ||
+       Rails.configuration.auto_setup_new_users_with_repository
+      username = self.email.partition('@')[0] if self.email
+      return true if !username
+
+      blacklisted_usernames = Rails.configuration.auto_setup_name_blacklist
+      if blacklisted_usernames.include?(username)
+        return true
+      elsif !(/^[a-zA-Z][-._a-zA-Z0-9]{0,30}[a-zA-Z0-9]$/.match(username))
+        return true
+      else
+        return true if !(username = derive_unique_username username)
+      end
+    end
+
+    # setup user
+    setup_repo_vm_links(username,
+                        Rails.configuration.auto_setup_new_users_with_vm_uuid,
+                        Rails.configuration.default_openid_prefix)
+  end
+
+  # Find a username that starts with the given string and does not collide
+  # with any existing repository name or VM login name
+  def derive_unique_username username
+    while true
+      if Repository.where(name: username).empty?
+        login_collisions = Link.where(link_class: 'permission',
+                                      name: 'can_login').select do |perm|
+          perm.properties['username'] == username
+        end
+        return username if login_collisions.empty?
+      end
+      username = username + SecureRandom.random_number(10).to_s
+    end
+  end
+
+  # Send notification if the user saved profile for the first time
+  def send_profile_created_notification
+    if self.prefs_changed?
+      if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
+        profile_notification_address = Rails.configuration.user_profile_notification_address
+        ProfileNotifier.profile_created(self, profile_notification_address).deliver if profile_notification_address
+      end
+    end
+  end
+
+end
diff --git a/services/api/app/models/user_agreement.rb b/services/api/app/models/user_agreement.rb
new file mode 100644 (file)
index 0000000..1790dea
--- /dev/null
@@ -0,0 +1,4 @@
+class UserAgreement < Collection
+  # This class exists so that Arvados::V1::SchemaController includes
+  # UserAgreementsController's methods in the discovery document.
+end
diff --git a/services/api/app/models/virtual_machine.rb b/services/api/app/models/virtual_machine.rb
new file mode 100644 (file)
index 0000000..094591e
--- /dev/null
@@ -0,0 +1,20 @@
+class VirtualMachine < ArvadosModel
+  include HasUuid
+  include KindAndEtag
+  include CommonApiTemplate
+
+  has_many :login_permissions, :foreign_key => :head_uuid, :class_name => 'Link', :primary_key => :uuid, :conditions => "link_class = 'permission' and name = 'can_login'"
+
+  api_accessible :user, extend: :common do |t|
+    t.add :hostname
+  end
+
+  protected
+
+  def permission_to_create
+    current_user and current_user.is_admin
+  end
+  def permission_to_update
+    current_user and current_user.is_admin
+  end
+end
diff --git a/services/api/app/views/admin_notifier/new_inactive_user.text.erb b/services/api/app/views/admin_notifier/new_inactive_user.text.erb
new file mode 100644 (file)
index 0000000..53a2b98
--- /dev/null
@@ -0,0 +1,13 @@
+
+A new user landed on the inactive user page:
+
+  <%= @user.full_name %> <<%= @user.email %>>
+
+<% if Rails.configuration.workbench_address -%>
+Please see workbench for more information:
+
+  <%= Rails.configuration.workbench_address %>
+
+<% end -%>
+Thanks,
+Your friendly Arvados robot.
diff --git a/services/api/app/views/admin_notifier/new_user.text.erb b/services/api/app/views/admin_notifier/new_user.text.erb
new file mode 100644 (file)
index 0000000..88ecbe3
--- /dev/null
@@ -0,0 +1,21 @@
+<%
+  add_to_message = ''
+  if Rails.configuration.auto_setup_new_users
+    add_to_message = @user.is_invited ? ' and setup' : ', but not setup'
+  end
+%>
+A new user has been created<%=add_to_message%>:
+
+  <%= @user.full_name %> <<%= @user.email %>>
+
+This user is <%= @user.is_active ? '' : 'NOT ' %>active.
+
+<% if Rails.configuration.workbench_address -%>
+Please see workbench for more information:
+
+  <%= Rails.configuration.workbench_address %>
+
+<% end -%>
+Thanks,
+Your friendly Arvados robot.
+
diff --git a/services/api/app/views/layouts/application.html.erb b/services/api/app/views/layouts/application.html.erb
new file mode 100644 (file)
index 0000000..e54fa0c
--- /dev/null
@@ -0,0 +1,45 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>Server</title>
+  <%= stylesheet_link_tag    "application" %>
+  <%= javascript_include_tag "application" %>
+  <%= csrf_meta_tags %>
+</head>
+<body>
+<div id="header">
+  <div class="apptitle">ARVADOS <span class="beta"><span>BETA</span></span></div>
+  <div style="float:right">
+    <% if current_user %>
+    <%= current_user.full_name %>
+    <% if current_user.is_admin %>
+    &nbsp;&bull;&nbsp;
+    <a class="logout" href="/admin/users">Admin</a>
+    <% end %>
+    &nbsp;&bull;&nbsp;
+    <a class="logout" href="/logout">Log out</a>
+    <% else %>
+    <a class="logout" href="/auth/joshid">Log in</a>
+    <% end %>
+
+    <% if current_user and session[:real_uid] and session[:switch_back_to] and User.find(session[:real_uid].to_i).verify_userswitch_cookie(session[:switch_back_to]) %>
+    &nbsp;&bull;&nbsp;
+    <span class="sudo-warning">Logged in as <b><%= current_user.full_name %></b>. <%= link_to "Back to #{User.find(session[:real_uid]).full_name}", switch_to_user_path(session[:real_uid]), :method => :post, :class => 'sudo-logout' %></span>
+    <% end %>
+  </div>
+</div>
+
+
+<%= yield %>
+
+<div style="clear:both"></div>
+
+<% if current_user or session['invite_code'] %>
+<div id="footer">
+  <div style="float:right">Questions &rarr; <a href="mailto:arvados@curoverse.com">arvados@curoverse.com</a></div>
+  <div style="clear:both"></div>
+</div>
+<% end %>
+
+</body>
+</html>
diff --git a/services/api/app/views/profile_notifier/profile_created.text.erb b/services/api/app/views/profile_notifier/profile_created.text.erb
new file mode 100644 (file)
index 0000000..73adf28
--- /dev/null
@@ -0,0 +1,2 @@
+Profile created by user <%=@user.full_name%> <%=@user.email%>
+User's profile: <%=@user.prefs['profile']%>
diff --git a/services/api/app/views/static/intro.html.erb b/services/api/app/views/static/intro.html.erb
new file mode 100644 (file)
index 0000000..8ab41cb
--- /dev/null
@@ -0,0 +1,38 @@
+<% content_for :js do %>
+$(function(){
+  $('button.login').button().click(function(){window.location=$(this).attr('href')});
+});
+<% end %>
+<div id="intropage">
+  <img class="curoverse-logo" src="<%= asset_path('logo.png') %>" style="display:block; margin:2em auto"/>
+  <div style="width:30em; margin:2em auto 0 auto">
+    <h1>Welcome</h1>
+    <h4>Curoverse ARVADOS</h4>
+
+    <% if !current_user and session['invite_code'] %>
+
+    <p>Curoverse Arvados lets you manage and process human genomes and exomes.  You can start using the private beta
+    now with your Google account.</p>
+    <p style="float:right;margin-top:1em">
+      <button class="login" href="/auth/joshid">Log in and get started</button>
+    </p>
+
+    <% else %>
+
+    <p>Curoverse ARVADOS is transforming how researchers and
+    clinical geneticists use whole genome sequences. </p>
+    <p>If you&rsquo;re interested in learning more, we&rsquo;d love to hear
+    from you &mdash;
+    contact <a href="mailto:arvados@curoverse.com">arvados@curoverse.com</a>.</p>
+
+    <% if !current_user %>
+    <p style="float:right;margin-top:1em">
+      <a href="/auth/joshid">Log in here.</a>
+    </p>
+    <% end %>
+
+    <% end %>
+
+    <div style="clear:both;height:8em"></div>
+  </div>
+</div>
diff --git a/services/api/app/views/static/login_failure.html.erb b/services/api/app/views/static/login_failure.html.erb
new file mode 100644 (file)
index 0000000..830942c
--- /dev/null
@@ -0,0 +1,22 @@
+<% content_for :js do %>
+$(function(){
+  $('button.login').button().click(function(){window.location=$(this).attr('href')});
+});
+<% end %>
+
+
+<div id="intropage">
+  <img class="curoverse-logo" src="<%= asset_path('logo.png') %>" style="display:block; margin:2em auto"/>
+  <div style="width:30em; margin:2em auto 0 auto">
+
+    <h1>Error</h1>
+
+    <p>Sorry, something went wrong logging you in. Please try again.</p>
+
+    <p style="float:right;margin-top:1em">
+      <a href="/auth/joshid">Log in here.</a>
+    </p>
+
+    <div style="clear:both;height:8em"></div>
+  </div>
+</div>
diff --git a/services/api/app/views/user_notifier/account_is_setup.text.erb b/services/api/app/views/user_notifier/account_is_setup.text.erb
new file mode 100644 (file)
index 0000000..5d8c9e7
--- /dev/null
@@ -0,0 +1,13 @@
+<% if not @user.full_name.empty? -%>
+<%= @user.full_name %>,
+<% else -%>
+Hi there,
+<% end -%>
+
+Your Arvados account has been set up. You can log in with your Google account
+associated with the e-mail address <%= @user.email %><% if Rails.configuration.workbench_address %> at:
+
+  <%= Rails.configuration.workbench_address %><% else %>.<% end %>
+
+Thanks,
+The Arvados team.
diff --git a/services/api/app/views/user_sessions/failure.html.erb b/services/api/app/views/user_sessions/failure.html.erb
new file mode 100644 (file)
index 0000000..cdea96d
--- /dev/null
@@ -0,0 +1,6 @@
+<h1>Fail</h1>
+
+<%= notice %>
+
+<br/>
+<a href="/auth/joshid">Retry Login</a>
diff --git a/services/api/config.ru b/services/api/config.ru
new file mode 100644 (file)
index 0000000..db437d4
--- /dev/null
@@ -0,0 +1,4 @@
+# This file is used by Rack-based servers to start the application.
+
+require ::File.expand_path('../config/environment',  __FILE__)
+run Server::Application
diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml
new file mode 100644 (file)
index 0000000..ed2c533
--- /dev/null
@@ -0,0 +1,248 @@
+# Do not use this file for site configuration. Create application.yml
+# instead (see application.yml.example).
+
+development:
+  force_ssl: false
+  cache_classes: false
+  whiny_nils: true
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_mailer.raise_delivery_errors: false
+  action_mailer.perform_deliveries: false
+  active_support.deprecation: :log
+  action_dispatch.best_standards_support: :builtin
+  active_record.mass_assignment_sanitizer: :strict
+  active_record.auto_explain_threshold_in_seconds: 0.5
+  assets.compress: false
+  assets.debug: true
+  local_modified: "<%= '-modified' if `git status -s` != '' %>"
+
+production:
+  force_ssl: true
+  cache_classes: true
+  consider_all_requests_local: false
+  action_controller.perform_caching: true
+  serve_static_assets: false
+  assets.compress: true
+  assets.compile: false
+  assets.digest: true
+
+test:
+  force_ssl: false
+  cache_classes: true
+  serve_static_assets: true
+  static_cache_control: public, max-age=3600
+  whiny_nils: true
+  consider_all_requests_local: true
+  action_controller.perform_caching: false
+  action_dispatch.show_exceptions: false
+  action_controller.allow_forgery_protection: false
+  action_mailer.delivery_method: :test
+  active_support.deprecation: :stderr
+  active_record.mass_assignment_sanitizer: :strict
+  uuid_prefix: zzzzz
+  secret_token: <%= rand(2**512).to_s(36) %>
+  blob_signing_key: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc
+  user_profile_notification_address: arvados@example.com
+  workbench_address: https://localhost:3001/
+  websocket_address: ws://127.0.0.1:3333/websocket
+
+common:
+  uuid_prefix: <%= Digest::MD5.hexdigest(`hostname`).to_i(16).to_s(36)[0..4] %>
+
+  # If not false, this is the hostname that will be used for root_url and
+  # advertised in the discovery document.  By default, use the default Rails
+  # logic for deciding on a hostname.
+  host: false
+
+  # If this is not false, HTML requests at the API server's root URL
+  # are redirected to this location, and it is provided in the text of
+  # user activation notification email messages to remind them where
+  # to log in.
+  workbench_address: false
+
+  # Git repositories must be readable by api server, or you won't be
+  # able to submit crunch jobs. To pass the test suites, put a clone
+  # of the arvados tree in {git_repositories_dir}/arvados.git or
+  # {git_repositories_dir}/arvados/.git
+  git_repositories_dir: /var/lib/arvados/git
+
+  # This is a (bare) repository that stores commits used in jobs.  When a job
+  # runs, the source commits are first fetched into this repository, then this
+  # repository is used to deploy to compute nodes.  This should NOT be a
+  # subdirectory of {git_repositiories_dir}.
+  git_internal_dir: /var/lib/arvados/internal.git
+
+  # :none or :slurm_immediate
+  crunch_job_wrapper: :none
+
+  # username, or false = do not set uid when running jobs.
+  crunch_job_user: crunch
+
+  # The web service must be able to create/write this file, and
+  # crunch-job must be able to stat() it.
+  crunch_refresh_trigger: /tmp/crunch_refresh_trigger
+
+  # These two settings control how frequently log events are flushed to the
+  # database.  Log lines are buffered until either crunch_log_bytes_per_event
+  # has been reached or crunch_log_seconds_between_events has elapsed since
+  # the last flush.
+  crunch_log_bytes_per_event: 4096
+  crunch_log_seconds_between_events: 1
+
+  # The sample period for throttling logs, in seconds.
+  crunch_log_throttle_period: 60
+
+  # Maximum number of bytes that job can log over crunch_log_throttle_period
+  # before being silenced until the end of the period.
+  crunch_log_throttle_bytes: 65536
+
+  # Maximum number of lines that job can log over crunch_log_throttle_period
+  # before being silenced until the end of the period.
+  crunch_log_throttle_lines: 1024
+
+  # Maximum bytes that may be logged by a single job.  Log bytes that are
+  # silenced by throttling are not counted against this total.
+  crunch_limit_log_bytes_per_job: 67108864
+
+  # Path to dns server configuration directory (e.g. /etc/unbound.d/conf.d),
+  # or false = do not update dns server data.
+  dns_server_conf_dir: false
+
+  # Template for the dns server host snippets. See unbound.template in this directory for
+  # an example. Set to false to disable.
+  dns_server_conf_template: false
+
+  # Dns server reload command, or false = do not reload dns server after data change
+  dns_server_reload_command: false
+
+  # Example for unbound
+  #dns_server_conf_dir: /etc/unbound/conf.d
+  #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
+  #dns_server_reload_command: /etc/init.d/unbound reload
+
+  compute_node_domain: false
+  compute_node_nameservers:
+    - 192.168.1.1
+
+  # The version below is suitable for AWS.
+  # To use it, copy it to your application.yml, uncomment, and change <%# to <%=
+  # compute_node_nameservers: <%#
+  #   require 'net/http'
+  #   ['local', 'public'].collect do |iface|
+  #     Net::HTTP.get(URI("http://169.254.169.254/latest/meta-data/#{iface}-ipv4")).match(/^[\d\.]+$/)[0]
+  #   end << '172.16.0.23'
+  # %>
+
+  accept_api_token: {}
+
+  # When new_users_are_active is set to true, the user agreement check is skipped.
+  new_users_are_active: false
+
+  admin_notifier_email_from: arvados@example.com
+  email_subject_prefix: "[ARVADOS] "
+  user_notifier_email_from: arvados@example.com
+  new_user_notification_recipients: [ ]
+  new_inactive_user_notification_recipients: [ ]
+
+  # The e-mail address of the user you would like to become marked as an admin
+  # user on their first login.
+  # In the default configuration, authentication happens through the Arvados SSO
+  # server, which uses openid against Google's servers, so in that case this
+  # should be an address associated with a Google account.
+  auto_admin_user: false
+
+  # If auto_admin_first_user is set to true, the first user to log in when no
+  # other admin users exist will automatically become an admin user.
+  auto_admin_first_user: false
+
+  ## Set Time.zone default to the specified zone and make Active
+  ## Record auto-convert to this zone.  Run "rake -D time" for a list
+  ## of tasks for finding time zone names. Default is UTC.
+  #time_zone: Central Time (US & Canada)
+
+  ## Default encoding used in templates for Ruby 1.9.
+  encoding: utf-8
+
+  # Enable the asset pipeline
+  assets.enabled: true
+
+  # Version of your assets, change this if you want to expire all your assets
+  assets.version: "1.0"
+
+  arvados_theme: default
+
+  # The ARVADOS_WEBSOCKETS environment variable determines whether to
+  # serve http, websockets, or both.
+  #
+  # If ARVADOS_WEBSOCKETS="true", http and websockets are both served
+  # from the same process.
+  #
+  # If ARVADOS_WEBSOCKETS="ws-only", only websockets is served.
+  #
+  # If ARVADOS_WEBSOCKETS="false" or not set at all, only http is
+  # served. In this case, you should have a separate process serving
+  # websockets, and the address of that service should be given here
+  # as websocket_address.
+  #
+  # If websocket_address is false (which is the default), the
+  # discovery document will tell clients to use the current server as
+  # the websocket service, or (if the current server does not have
+  # websockets enabled) not to use websockets at all.
+  #
+  # Example: Clients will connect to the specified endpoint.
+  #websocket_address: wss://127.0.0.1:3333/websocket
+  # Default: Clients will connect to this server if it's running
+  # websockets, otherwise none at all.
+  websocket_address: false
+
+  # blob_signing_key is a string of alphanumeric characters used to
+  # generate permission signatures for Keep locators. It must be
+  # identical to the permission key given to Keep. IMPORTANT: This is
+  # a site secret. It should be at least 50 characters.
+  blob_signing_key: ~
+
+  # Amount of time (in seconds) for which a blob permission signature
+  # remains valid.  Default: 2 weeks (1209600 seconds)
+  blob_signing_ttl: 1209600
+
+  # Allow clients to create collections by providing a manifest with
+  # unsigned data blob locators. IMPORTANT: This effectively disables
+  # access controls for data stored in Keep: a client who knows a hash
+  # can write a manifest that references the hash, pass it to
+  # collections.create (which will create a permission link), use
+  # collections.get to obtain a signature for that data locator, and
+  # use that signed locator to retrieve the data from Keep. Therefore,
+  # do not turn this on if your users expect to keep data private from
+  # one another!
+  permit_create_collection_with_unsigned_manifest: false
+
+  # secret_token is a string of alphanumeric characters used by Rails
+  # to sign session tokens. IMPORTANT: This is a site secret. It
+  # should be at least 50 characters.
+  secret_token: ~
+
+  # email address to which mail should be sent when the user creates profile for the first time
+  user_profile_notification_address: false
+
+  default_openid_prefix: https://www.google.com/accounts/o8/id
+
+  # Config parameters to automatically setup new users.
+  # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+  # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+  auto_setup_new_users: false
+  auto_setup_new_users_with_vm_uuid: false
+  auto_setup_new_users_with_repository: false
+  auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+  # source_version
+  source_version: "<%= `git log -n 1 --format=%h`.strip %>"
+  local_modified: false
+
+  # Default lifetime for ephemeral collections: 2 weeks.
+  default_trash_lifetime: 1209600
+
+  # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the Single Sign
+  # On (sso) server.  Should only be enabled during development when the SSO
+  # server is using a self-signed cert.
+  sso_insecure: false
\ No newline at end of file
diff --git a/services/api/config/application.rb b/services/api/config/application.rb
new file mode 100644 (file)
index 0000000..4211df2
--- /dev/null
@@ -0,0 +1,39 @@
+require File.expand_path('../boot', __FILE__)
+
+require 'rails/all'
+require 'digest'
+
+if defined?(Bundler)
+  # If you precompile assets before deploying to production, use this line
+  Bundler.require(*Rails.groups(:assets => %w(development test)))
+  # If you want your assets lazily compiled in production, use this line
+  # Bundler.require(:default, :assets, Rails.env)
+end
+
+module Server
+  class Application < Rails::Application
+    # Settings in config/environments/* take precedence over those specified here.
+    # Application configuration should go into files in config/initializers
+    # -- all .rb files in that directory are automatically loaded.
+
+    # Custom directories with classes and modules you want to be autoloadable.
+    # config.autoload_paths += %W(#{config.root}/extras)
+
+    # Only load the plugins named here, in the order given (default is alphabetical).
+    # :all can be used as a placeholder for all plugins not explicitly named.
+    # config.plugins = [ :exception_notification, :ssl_requirement, :all ]
+
+    # Activate observers that should always be running.
+    # config.active_record.observers = :cacher, :garbage_collector, :forum_observer
+    config.active_record.schema_format = :sql
+
+    # The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
+    # config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
+    # config.i18n.default_locale = :de
+
+    # Configure sensitive parameters which will be filtered from the log file.
+    config.filter_parameters += [:password]
+
+    I18n.enforce_available_locales = false
+  end
+end
diff --git a/services/api/config/application.yml.example b/services/api/config/application.yml.example
new file mode 100644 (file)
index 0000000..c3e599f
--- /dev/null
@@ -0,0 +1,34 @@
+# Copy this file to application.yml and edit to suit.
+#
+# Consult application.default.yml for the full list of configuration
+# settings.
+#
+# The order of precedence is:
+# 1. config/environments/{RAILS_ENV}.rb (deprecated)
+# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)
+# 3. Section in application.yml called "common"
+# 4. Section in application.default.yml corresponding to RAILS_ENV
+# 5. Section in application.default.yml called "common"
+
+development:
+  # Mandatory site secrets. See application.default.yml for more info.
+  secret_token: ~
+  blob_signing_key: ~
+  uuid_prefix: bogus
+  workbench_address: https://localhost:3031
+
+production:
+  # Mandatory site secrets. See application.default.yml for more info.
+  secret_token: ~
+  blob_signing_key: ~
+  uuid_prefix: bogus
+  workbench_address: https://workbench.bogus.arvadosapi.com
+
+test:
+  # Tests should be able to run without further configuration, but if you do
+  # want to change your local test configuration, this is where to do it.
+
+common:
+  # Settings in this section will be used in all environments
+  # (development, production, test) except when overridden in the
+  # environment-specific sections above.
diff --git a/services/api/config/boot.rb b/services/api/config/boot.rb
new file mode 100644 (file)
index 0000000..4489e58
--- /dev/null
@@ -0,0 +1,6 @@
+require 'rubygems'
+
+# Set up gems listed in the Gemfile.
+ENV['BUNDLE_GEMFILE'] ||= File.expand_path('../../Gemfile', __FILE__)
+
+require 'bundler/setup' if File.exists?(ENV['BUNDLE_GEMFILE'])
diff --git a/services/api/config/database.yml.sample b/services/api/config/database.yml.sample
new file mode 100644 (file)
index 0000000..6a27016
--- /dev/null
@@ -0,0 +1,24 @@
+development:
+  adapter: postgresql
+  encoding: utf8
+  database: arvados_development
+  username: arvados
+  password: xxxxxxxx
+  host: localhost
+
+test:
+  adapter: postgresql
+  encoding: utf8
+  database: arvados_test
+  username: arvados
+  password: xxxxxxxx
+  host: localhost
+
+production:
+  adapter: postgresql
+  encoding: utf8
+  database: arvados_production
+  username: arvados
+  password: xxxxxxxx
+  host: localhost
+
diff --git a/services/api/config/environment.rb b/services/api/config/environment.rb
new file mode 100644 (file)
index 0000000..4ccec85
--- /dev/null
@@ -0,0 +1,12 @@
+# Load the rails application
+require File.expand_path('../application', __FILE__)
+require 'josh_id'
+
+# Initialize the rails application
+Server::Application.initialize!
+begin
+  Rails.cache.clear
+rescue Errno::ENOENT => e
+  # Cache directory does not exist? Then cache is clear, proceed.
+  Rails.logger.warn "In Rails.cache.clear, ignoring #{e.inspect}"
+end
diff --git a/services/api/config/environments/development.rb.example b/services/api/config/environments/development.rb.example
new file mode 100644 (file)
index 0000000..b6c4c92
--- /dev/null
@@ -0,0 +1,41 @@
+Server::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # In the development environment your application's code is reloaded on
+  # every request.  This slows down response time but is perfect for development
+  # since you don't have to restart the web server when you make code changes.
+  config.cache_classes = false
+
+  # Log error messages when you accidentally call methods on nil.
+  config.whiny_nils = true
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Don't care if the mailer can't send
+  config.action_mailer.raise_delivery_errors = false
+  config.action_mailer.perform_deliveries = false
+
+  # Print deprecation notices to the Rails logger
+  config.active_support.deprecation = :log
+
+  # Only use best-standards-support built into browsers
+  config.action_dispatch.best_standards_support = :builtin
+
+  # Raise exception on mass assignment protection for Active Record models
+  config.active_record.mass_assignment_sanitizer = :strict
+
+  # Log the query plan for queries taking more than this (works
+  # with SQLite, MySQL, and PostgreSQL)
+  config.active_record.auto_explain_threshold_in_seconds = 0.5
+
+  # Do not compress assets
+  config.assets.compress = false
+
+  # Expands the lines which load the assets
+  config.assets.debug = true
+
+  config.force_ssl = false
+
+end
diff --git a/services/api/config/environments/production.rb.example b/services/api/config/environments/production.rb.example
new file mode 100644 (file)
index 0000000..c1092d3
--- /dev/null
@@ -0,0 +1,62 @@
+Server::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # Code is not reloaded between requests
+  config.cache_classes = true
+
+  # Full error reports are disabled and caching is turned on
+  config.consider_all_requests_local       = false
+  config.action_controller.perform_caching = true
+
+  # Disable Rails's static asset server (Apache or nginx will already do this)
+  config.serve_static_assets = false
+
+  # Compress JavaScripts and CSS
+  config.assets.compress = true
+
+  # Don't fallback to assets pipeline if a precompiled asset is missed
+  config.assets.compile = false
+
+  # Generate digests for assets URLs
+  config.assets.digest = true
+
+  # Defaults to Rails.root.join("public/assets")
+  # config.assets.manifest = YOUR_PATH
+
+  # Specifies the header that your server uses for sending files
+  # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache
+  # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx
+
+  # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.
+  # config.force_ssl = true
+
+  # See everything in the log (default is :info)
+  # config.log_level = :debug
+
+  # Use a different logger for distributed setups
+  # config.logger = SyslogLogger.new
+
+  # Use a different cache store in production
+  # config.cache_store = :mem_cache_store
+
+  # Enable serving of images, stylesheets, and JavaScripts from an asset server
+  # config.action_controller.asset_host = "http://assets.example.com"
+
+  # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)
+  # config.assets.precompile += %w( search.js )
+
+  # Disable delivery errors, bad email addresses will be ignored
+  # config.action_mailer.raise_delivery_errors = false
+  # config.action_mailer.perform_deliveries = true
+
+  # Enable threaded mode
+  # config.threadsafe!
+
+  # Enable locale fallbacks for I18n (makes lookups for any locale fall back to
+  # the I18n.default_locale when a translation can not be found)
+  config.i18n.fallbacks = true
+
+  # Send deprecation notices to registered listeners
+  config.active_support.deprecation = :notify
+
+end
diff --git a/services/api/config/environments/test.rb.example b/services/api/config/environments/test.rb.example
new file mode 100644 (file)
index 0000000..5baf09d
--- /dev/null
@@ -0,0 +1,49 @@
+Server::Application.configure do
+  # Settings specified here will take precedence over those in config/application.rb
+
+  # The test environment is used exclusively to run your application's
+  # test suite.  You never need to work with it otherwise.  Remember that
+  # your test database is "scratch space" for the test suite and is wiped
+  # and recreated between test runs.  Don't rely on the data there!
+  config.cache_classes = true
+
+  # Configure static asset server for tests with Cache-Control for performance
+  config.serve_static_assets = true
+  config.static_cache_control = "public, max-age=3600"
+
+  # Log error messages when you accidentally call methods on nil
+  config.whiny_nils = true
+
+  # Show full error reports and disable caching
+  config.consider_all_requests_local       = true
+  config.action_controller.perform_caching = false
+
+  # Raise exceptions instead of rendering exception templates
+  config.action_dispatch.show_exceptions = false
+
+  # Disable request forgery protection in test environment
+  config.action_controller.allow_forgery_protection    = false
+
+  # Tell Action Mailer not to deliver emails to the real world.
+  # The :test delivery method accumulates sent emails in the
+  # ActionMailer::Base.deliveries array.
+  config.action_mailer.delivery_method = :test
+
+  # Use SQL instead of Active Record's schema dumper when creating the test database.
+  # This is necessary if your schema can't be completely dumped by the schema dumper,
+  # like if you have constraints or database-specific column types
+  # config.active_record.schema_format = :sql
+
+  # Print deprecation notices to the stderr
+  config.active_support.deprecation = :stderr
+
+  # Raise exception on mass assignment protection for Active Record models
+  config.active_record.mass_assignment_sanitizer = :strict
+
+  # No need for SSL while testing
+  config.force_ssl = false
+
+  # I18n likes to warn when this variable is not set
+  I18n.enforce_available_locales = true
+
+end
diff --git a/services/api/config/initializers/andand.rb b/services/api/config/initializers/andand.rb
new file mode 100644 (file)
index 0000000..c3930d9
--- /dev/null
@@ -0,0 +1 @@
+require 'andand'
diff --git a/services/api/config/initializers/authorization.rb b/services/api/config/initializers/authorization.rb
new file mode 100644 (file)
index 0000000..08189fa
--- /dev/null
@@ -0,0 +1,5 @@
+Server::Application.configure do
+  config.middleware.delete ActionDispatch::RemoteIp
+  config.middleware.insert 0, ActionDispatch::RemoteIp
+  config.middleware.insert 1, ArvadosApiToken
+end
diff --git a/services/api/config/initializers/backtrace_silencers.rb b/services/api/config/initializers/backtrace_silencers.rb
new file mode 100644 (file)
index 0000000..59385cd
--- /dev/null
@@ -0,0 +1,7 @@
+# Be sure to restart your server when you modify this file.
+
+# You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces.
+# Rails.backtrace_cleaner.add_silencer { |line| line =~ /my_noisy_library/ }
+
+# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code.
+# Rails.backtrace_cleaner.remove_silencers!
diff --git a/services/api/config/initializers/common_api_template.rb b/services/api/config/initializers/common_api_template.rb
new file mode 100644 (file)
index 0000000..b30bda7
--- /dev/null
@@ -0,0 +1 @@
+require 'common_api_template'
diff --git a/services/api/config/initializers/current_api_client.rb b/services/api/config/initializers/current_api_client.rb
new file mode 100644 (file)
index 0000000..6680266
--- /dev/null
@@ -0,0 +1 @@
+require 'current_api_client'
diff --git a/services/api/config/initializers/eventbus.rb b/services/api/config/initializers/eventbus.rb
new file mode 100644 (file)
index 0000000..ea1c210
--- /dev/null
@@ -0,0 +1,19 @@
+require 'eventbus'
+
+# See application.yml for details about configuring the websocket service.
+
+Server::Application.configure do
+  # Enables websockets if ARVADOS_WEBSOCKETS is defined with any value.  If
+  # ARVADOS_WEBSOCKETS=ws-only, server will only accept websocket connections
+  # and return an error response for all other requests.
+  if ENV['ARVADOS_WEBSOCKETS']
+    config.middleware.insert_after ArvadosApiToken, RackSocket, {
+      :handler => EventBus,
+      :mount => "/websocket",
+      :websocket_only => (ENV['ARVADOS_WEBSOCKETS'] == "ws-only")
+    }
+    Rails.logger.info "Websockets #{ENV['ARVADOS_WEBSOCKETS']}, running at /websocket"
+  else
+    Rails.logger.info "Websockets disabled"
+  end
+end
diff --git a/services/api/config/initializers/hardcoded_api_tokens.rb.example b/services/api/config/initializers/hardcoded_api_tokens.rb.example
new file mode 100644 (file)
index 0000000..6339bf6
--- /dev/null
@@ -0,0 +1,3 @@
+Server::Application.configure do
+  config.accept_api_token = { 'foobar' => true }
+end
diff --git a/services/api/config/initializers/inflections.rb b/services/api/config/initializers/inflections.rb
new file mode 100644 (file)
index 0000000..79bca3a
--- /dev/null
@@ -0,0 +1,17 @@
+# Be sure to restart your server when you modify this file.
+
+# Add new inflection rules using the following format
+# (all these examples are active by default):
+# ActiveSupport::Inflector.inflections do |inflect|
+#   inflect.plural /^(ox)$/i, '\1en'
+#   inflect.singular /^(ox)en/i, '\1'
+#   inflect.irregular 'person', 'people'
+#   inflect.uncountable %w( fish sheep )
+# end
+
+ActiveSupport::Inflector.inflections do |inflect|
+  inflect.plural /^([Ss]pecimen)$/i, '\1s'
+  inflect.singular /^([Ss]pecimen)s?/i, '\1'
+  inflect.plural /^([Hh]uman)$/i, '\1s'
+  inflect.singular /^([Hh]uman)s?/i, '\1'
+end
diff --git a/services/api/config/initializers/kind_and_etag.rb b/services/api/config/initializers/kind_and_etag.rb
new file mode 100644 (file)
index 0000000..ea214fd
--- /dev/null
@@ -0,0 +1 @@
+require 'kind_and_etag'
diff --git a/services/api/config/initializers/mime_types.rb b/services/api/config/initializers/mime_types.rb
new file mode 100644 (file)
index 0000000..72aca7e
--- /dev/null
@@ -0,0 +1,5 @@
+# Be sure to restart your server when you modify this file.
+
+# Add new mime types for use in respond_to blocks:
+# Mime::Type.register "text/richtext", :rtf
+# Mime::Type.register_alias "text/html", :iphone
diff --git a/services/api/config/initializers/net_http.rb b/services/api/config/initializers/net_http.rb
new file mode 100644 (file)
index 0000000..5c4dc01
--- /dev/null
@@ -0,0 +1 @@
+require 'net/http'
diff --git a/services/api/config/initializers/omniauth.rb.example b/services/api/config/initializers/omniauth.rb.example
new file mode 100644 (file)
index 0000000..aefcf56
--- /dev/null
@@ -0,0 +1,13 @@
+# Change this omniauth configuration to point to your registered provider
+# Since this is a registered application, add the app id and secret here
+APP_ID = 'arvados-server'
+APP_SECRET = rand(2**512).to_s(36) # CHANGE ME!
+
+# Update your custom Omniauth provider URL here
+CUSTOM_PROVIDER_URL = 'http://localhost:3002'
+
+Rails.application.config.middleware.use OmniAuth::Builder do
+  provider :josh_id, APP_ID, APP_SECRET, CUSTOM_PROVIDER_URL
+end
+
+OmniAuth.config.on_failure = StaticController.action(:login_failure)
diff --git a/services/api/config/initializers/schema_discovery_cache.rb b/services/api/config/initializers/schema_discovery_cache.rb
new file mode 100644 (file)
index 0000000..1f8a7b1
--- /dev/null
@@ -0,0 +1,5 @@
+# Delete the cached discovery document during startup. Otherwise we
+# might still serve an old discovery document after updating the
+# schema and restarting the server.
+
+Rails.cache.delete 'arvados_v1_rest_discovery'
diff --git a/services/api/config/initializers/session_store.rb b/services/api/config/initializers/session_store.rb
new file mode 100644 (file)
index 0000000..44293cc
--- /dev/null
@@ -0,0 +1,8 @@
+# Be sure to restart your server when you modify this file.
+
+Server::Application.config.session_store :cookie_store, :key => '_server_session'
+
+# Use the database for sessions instead of the cookie-based default,
+# which shouldn't be used to store highly confidential information
+# (create the session table with "rails generate session_migration")
+# Server::Application.config.session_store :active_record_store
diff --git a/services/api/config/initializers/wrap_parameters.rb b/services/api/config/initializers/wrap_parameters.rb
new file mode 100644 (file)
index 0000000..da4fb07
--- /dev/null
@@ -0,0 +1,14 @@
+# Be sure to restart your server when you modify this file.
+#
+# This file contains settings for ActionController::ParamsWrapper which
+# is enabled by default.
+
+# Enable parameter wrapping for JSON. You can disable this by setting :format to an empty array.
+ActiveSupport.on_load(:action_controller) do
+  wrap_parameters :format => [:json]
+end
+
+# Disable root element in JSON by default.
+ActiveSupport.on_load(:active_record) do
+  self.include_root_in_json = false
+end
diff --git a/services/api/config/initializers/zz_load_config.rb b/services/api/config/initializers/zz_load_config.rb
new file mode 100644 (file)
index 0000000..3399fd9
--- /dev/null
@@ -0,0 +1,46 @@
+$application_config = {}
+
+%w(application.default application).each do |cfgfile|
+  path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
+  if File.exists? path
+    yaml = ERB.new(IO.read path).result(binding)
+    confs = YAML.load(yaml)
+    $application_config.merge!(confs['common'] || {})
+    $application_config.merge!(confs[::Rails.env.to_s] || {})
+  end
+end
+
+Server::Application.configure do
+  nils = []
+  $application_config.each do |k, v|
+    # "foo.bar: baz" --> { config.foo.bar = baz }
+    cfg = config
+    ks = k.split '.'
+    k = ks.pop
+    ks.each do |kk|
+      cfg = cfg.send(kk)
+    end
+    if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
+      # Config must have been set already in environments/*.rb.
+      #
+      # After config files have been migrated, this mechanism should
+      # be deprecated, then removed.
+    elsif v.nil?
+      # Config variables are not allowed to be nil. Make a "naughty"
+      # list, and present it below.
+      nils << k
+    else
+      cfg.send "#{k}=", v
+    end
+  end
+  if !nils.empty?
+    raise <<EOS
+Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
+
+The following configuration settings must be specified in
+config/application.yml:
+* #{nils.join "\n* "}
+
+EOS
+  end
+end
diff --git a/services/api/config/initializers/zz_preload_all_models.rb b/services/api/config/initializers/zz_preload_all_models.rb
new file mode 100644 (file)
index 0000000..1a76b72
--- /dev/null
@@ -0,0 +1,7 @@
+# See http://aaronvb.com/articles/37-rails-caching-and-undefined-class-module
+
+if Rails.env == 'development'
+  Dir.foreach("#{Rails.root}/app/models") do |model_file|
+    require_dependency model_file if model_file.match /\.rb$/
+  end 
+end
diff --git a/services/api/config/locales/en.yml b/services/api/config/locales/en.yml
new file mode 100644 (file)
index 0000000..179c14c
--- /dev/null
@@ -0,0 +1,5 @@
+# Sample localization file for English. Add more files in this directory for other locales.
+# See https://github.com/svenfuchs/rails-i18n/tree/master/rails%2Flocale for starting points.
+
+en:
+  hello: "Hello world"
diff --git a/services/api/config/routes.rb b/services/api/config/routes.rb
new file mode 100644 (file)
index 0000000..27fd67c
--- /dev/null
@@ -0,0 +1,97 @@
+Server::Application.routes.draw do
+  themes_for_rails
+
+  # See http://guides.rubyonrails.org/routing.html
+
+  # OPTIONS requests are not allowed at routes that use cookies.
+  ['/auth/*a', '/login', '/logout'].each do |nono|
+    match nono, :to => 'user_sessions#cross_origin_forbidden', :via => 'OPTIONS'
+  end
+  # OPTIONS at discovery and API paths get an empty response with CORS headers.
+  match '/discovery/v1/*a', :to => 'static#empty', :via => 'OPTIONS'
+  match '/arvados/v1/*a', :to => 'static#empty', :via => 'OPTIONS'
+
+  namespace :arvados do
+    namespace :v1 do
+      resources :api_client_authorizations do
+        post 'create_system_auth', on: :collection
+      end
+      resources :api_clients
+      resources :authorized_keys
+      resources :collections do
+        get 'provenance', on: :member
+        get 'used_by', on: :member
+      end
+      resources :groups do
+        get 'contents', on: :collection
+        get 'contents', on: :member
+      end
+      resources :humans
+      resources :job_tasks
+      resources :jobs do
+        get 'queue', on: :collection
+        get 'queue_size', on: :collection
+        post 'cancel', on: :member
+        post 'lock', on: :member
+      end
+      resources :keep_disks do
+        post 'ping', on: :collection
+      end
+      resources :keep_services do
+        get 'accessible', on: :collection
+      end
+      resources :links
+      resources :logs
+      resources :nodes do
+        post 'ping', on: :member
+      end
+      resources :pipeline_instances
+      resources :pipeline_templates
+      resources :repositories do
+        get 'get_all_permissions', on: :collection
+      end
+      resources :specimens
+      resources :traits
+      resources :user_agreements do
+        get 'signatures', on: :collection
+        post 'sign', on: :collection
+      end
+      resources :users do
+        get 'current', on: :collection
+        get 'system', on: :collection
+        post 'activate', on: :member
+        post 'setup', on: :collection
+        post 'unsetup', on: :member
+      end
+      resources :virtual_machines do
+        get 'logins', on: :member
+        get 'get_all_logins', on: :collection
+      end
+      get '/permissions/:uuid', :to => 'links#get_permissions'
+    end
+  end
+
+  if Rails.env == 'test'
+    post '/database/reset', to: 'database#reset'
+  end
+
+  # omniauth
+  match '/auth/:provider/callback', :to => 'user_sessions#create'
+  match '/auth/failure', :to => 'user_sessions#failure'
+  # not handled by omniauth provider -> 403 with no CORS headers.
+  get '/auth/*a', :to => 'user_sessions#cross_origin_forbidden'
+
+  # Custom logout
+  match '/login', :to => 'user_sessions#login'
+  match '/logout', :to => 'user_sessions#logout'
+
+  match '/discovery/v1/apis/arvados/v1/rest', :to => 'arvados/v1/schema#index'
+
+  match '/static/login_failure', :to => 'static#login_failure', :as => :login_failure
+
+  # Send unroutable requests to an arbitrary controller
+  # (ends up at ApplicationController#render_not_found)
+  match '*a', :to => 'static#render_not_found'
+
+  root :to => 'static#home'
+end
diff --git a/services/api/config/unbound.template b/services/api/config/unbound.template
new file mode 100644 (file)
index 0000000..0c67700
--- /dev/null
@@ -0,0 +1,4 @@
+  local-data: "%{hostname} IN A %{ip_address}"
+  local-data: "%{hostname}.%{uuid_prefix} IN A %{ip_address}"
+  local-data: "%{hostname}.%{uuid_prefix}.arvadosapi.com. IN A %{ip_address}"
+  local-data: "%{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com"
diff --git a/services/api/db/migrate/20121016005009_create_collections.rb b/services/api/db/migrate/20121016005009_create_collections.rb
new file mode 100644 (file)
index 0000000..fec4690
--- /dev/null
@@ -0,0 +1,21 @@
+class CreateCollections < ActiveRecord::Migration
+  def change
+    create_table :collections do |t|
+      t.string :locator
+      t.string :create_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :portable_data_hash
+      t.string :name
+      t.integer :redundancy
+      t.string :redundancy_confirmed_by_client
+      t.datetime :redundancy_confirmed_at
+      t.integer :redundancy_confirmed_as
+
+      t.timestamps
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130105203021_create_metadata.rb b/services/api/db/migrate/20130105203021_create_metadata.rb
new file mode 100644 (file)
index 0000000..8e0f01b
--- /dev/null
@@ -0,0 +1,22 @@
+class CreateMetadata < ActiveRecord::Migration
+  def change
+    create_table :metadata do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :target_uuid
+      t.string :target_kind
+      t.references :native_target, :polymorphic => true
+      t.string :metadatum_class
+      t.string :key
+      t.string :value
+      t.text :info # "unlimited length" in postgresql
+
+      t.timestamps
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130105224358_rename_metadata_class.rb b/services/api/db/migrate/20130105224358_rename_metadata_class.rb
new file mode 100644 (file)
index 0000000..4388ae4
--- /dev/null
@@ -0,0 +1,9 @@
+class RenameMetadataClass < ActiveRecord::Migration
+  def up
+    rename_column :metadata, :metadatum_class, :metadata_class
+  end
+
+  def down
+    rename_column :metadata, :metadata_class, :metadatum_class
+  end
+end
diff --git a/services/api/db/migrate/20130105224618_rename_collection_created_by_client.rb b/services/api/db/migrate/20130105224618_rename_collection_created_by_client.rb
new file mode 100644 (file)
index 0000000..9b5b1c1
--- /dev/null
@@ -0,0 +1,9 @@
+class RenameCollectionCreatedByClient < ActiveRecord::Migration
+  def up
+    rename_column :collections, :create_by_client, :created_by_client
+  end
+
+  def down
+    rename_column :collections, :created_by_client, :create_by_client
+  end
+end
diff --git a/services/api/db/migrate/20130107181109_add_uuid_to_collections.rb b/services/api/db/migrate/20130107181109_add_uuid_to_collections.rb
new file mode 100644 (file)
index 0000000..af95260
--- /dev/null
@@ -0,0 +1,5 @@
+class AddUuidToCollections < ActiveRecord::Migration
+  def change
+    add_column :collections, :uuid, :string
+  end
+end
diff --git a/services/api/db/migrate/20130107212832_create_nodes.rb b/services/api/db/migrate/20130107212832_create_nodes.rb
new file mode 100644 (file)
index 0000000..6ca977a
--- /dev/null
@@ -0,0 +1,28 @@
+class CreateNodes < ActiveRecord::Migration
+  def up
+    create_table :nodes do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.integer :slot_number
+      t.string :hostname
+      t.string :domain
+      t.string :ip_address
+      t.datetime :first_ping_at
+      t.datetime :last_ping_at
+      t.text :info
+
+      t.timestamps
+    end
+    add_index :nodes, :uuid, :unique => true
+    add_index :nodes, :slot_number, :unique => true
+    add_index :nodes, :hostname, :unique => true
+  end
+  def down
+    drop_table :nodes
+  end
+end
diff --git a/services/api/db/migrate/20130109175700_create_pipelines.rb b/services/api/db/migrate/20130109175700_create_pipelines.rb
new file mode 100644 (file)
index 0000000..fe05886
--- /dev/null
@@ -0,0 +1,21 @@
+class CreatePipelines < ActiveRecord::Migration
+  def up
+    create_table :pipelines do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.text :components
+
+      t.timestamps
+    end
+    add_index :pipelines, :uuid, :unique => true
+  end
+  def down
+    drop_table :pipelines
+  end
+end
diff --git a/services/api/db/migrate/20130109220548_create_pipeline_invocations.rb b/services/api/db/migrate/20130109220548_create_pipeline_invocations.rb
new file mode 100644 (file)
index 0000000..147fdad
--- /dev/null
@@ -0,0 +1,24 @@
+class CreatePipelineInvocations < ActiveRecord::Migration
+  def up
+    create_table :pipeline_invocations do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :pipeline_uuid
+      t.string :name
+      t.text :components
+      t.boolean :success, :null => true
+      t.boolean :active, :default => false
+
+      t.timestamps
+    end
+    add_index :pipeline_invocations, :uuid, :unique => true
+  end
+  def down
+    drop_table :pipeline_invocations
+  end
+end
diff --git a/services/api/db/migrate/20130113214204_add_index_to_collections_and_metadata.rb b/services/api/db/migrate/20130113214204_add_index_to_collections_and_metadata.rb
new file mode 100644 (file)
index 0000000..78499e4
--- /dev/null
@@ -0,0 +1,10 @@
+class AddIndexToCollectionsAndMetadata < ActiveRecord::Migration
+  def up
+    add_index :collections, :uuid, :unique => true
+    add_index :metadata, :uuid, :unique => true
+  end
+  def down
+    remove_index :metadata, :uuid
+    remove_index :collections, :uuid
+  end
+end
diff --git a/services/api/db/migrate/20130116024233_create_specimens.rb b/services/api/db/migrate/20130116024233_create_specimens.rb
new file mode 100644 (file)
index 0000000..443369e
--- /dev/null
@@ -0,0 +1,20 @@
+class CreateSpecimens < ActiveRecord::Migration
+  def up
+    create_table :specimens do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :material
+
+      t.timestamps
+    end
+    add_index :specimens, :uuid, :unique => true
+  end
+  def down
+    drop_table :specimens
+  end
+end
diff --git a/services/api/db/migrate/20130116215213_create_projects.rb b/services/api/db/migrate/20130116215213_create_projects.rb
new file mode 100644 (file)
index 0000000..5ae59e1
--- /dev/null
@@ -0,0 +1,21 @@
+class CreateProjects < ActiveRecord::Migration
+  def up
+    create_table :projects do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.text :description
+
+      t.timestamps
+    end
+    add_index :projects, :uuid, :unique => true
+  end
+  def down
+    drop_table :projects
+  end
+end
diff --git a/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb b/services/api/db/migrate/20130118002239_rename_metadata_attributes.rb
new file mode 100644 (file)
index 0000000..deaa35c
--- /dev/null
@@ -0,0 +1,41 @@
+class RenameMetadataAttributes < ActiveRecord::Migration
+  def up
+    rename_column :metadata, :target_kind, :tail_kind
+    rename_column :metadata, :target_uuid, :tail
+    rename_column :metadata, :value, :head
+    rename_column :metadata, :key, :name
+    add_column :metadata, :head_kind, :string
+    add_index :metadata, :head
+    add_index :metadata, :head_kind
+    add_index :metadata, :tail
+    add_index :metadata, :tail_kind
+    begin
+      Metadatum.where('head like ?', 'orvos#%').each do |m|
+        kind_uuid = m.head.match /^(orvos\#.*)\#([-0-9a-z]+)$/
+        if kind_uuid
+          m.update_attributes(head_kind: kind_uuid[1],
+                              head: kind_uuid[2])
+        end
+      end
+    rescue
+    end
+  end
+
+  def down
+    begin
+      Metadatum.where('head_kind is not null and head_kind <> ? and head is not null', '').each do |m|
+        m.update_attributes(head: m.head_kind + '#' + m.head)
+      end
+    rescue
+    end
+    remove_index :metadata, :tail_kind
+    remove_index :metadata, :tail
+    remove_index :metadata, :head_kind
+    remove_index :metadata, :head
+    rename_column :metadata, :name, :key
+    remove_column :metadata, :head_kind
+    rename_column :metadata, :head, :value
+    rename_column :metadata, :tail, :target_uuid
+    rename_column :metadata, :tail_kind, :target_kind
+  end
+end
diff --git a/services/api/db/migrate/20130122020042_create_users.rb b/services/api/db/migrate/20130122020042_create_users.rb
new file mode 100644 (file)
index 0000000..61a60c5
--- /dev/null
@@ -0,0 +1,21 @@
+class CreateUsers < ActiveRecord::Migration
+  def change
+    create_table :users do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.datetime :created_at
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :email
+      t.string :first_name
+      t.string :last_name
+      t.string :identity_url
+      t.boolean :is_admin
+      t.text :prefs
+
+      t.timestamps
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130122201442_create_logs.rb b/services/api/db/migrate/20130122201442_create_logs.rb
new file mode 100644 (file)
index 0000000..b3122bd
--- /dev/null
@@ -0,0 +1,28 @@
+class CreateLogs < ActiveRecord::Migration
+  def up
+    create_table :logs do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.string :object_kind
+      t.string :object_uuid
+      t.datetime :event_at
+      t.string :event_type
+      t.text :summary
+      t.text :info
+
+      t.timestamps
+    end
+    add_index :logs, :uuid, :unique => true
+    add_index :logs, :object_kind
+    add_index :logs, :object_uuid
+    add_index :logs, :event_type
+    add_index :logs, :event_at
+    add_index :logs, :summary
+  end
+
+  def down
+    drop_table :logs  end
+end
diff --git a/services/api/db/migrate/20130122221616_add_modified_at_to_logs.rb b/services/api/db/migrate/20130122221616_add_modified_at_to_logs.rb
new file mode 100644 (file)
index 0000000..47828d0
--- /dev/null
@@ -0,0 +1,5 @@
+class AddModifiedAtToLogs < ActiveRecord::Migration
+  def change
+    add_column :logs, :modified_at, :datetime
+  end
+end
diff --git a/services/api/db/migrate/20130123174514_add_uuid_index_to_users.rb b/services/api/db/migrate/20130123174514_add_uuid_index_to_users.rb
new file mode 100644 (file)
index 0000000..f21d5e4
--- /dev/null
@@ -0,0 +1,5 @@
+class AddUuidIndexToUsers < ActiveRecord::Migration
+  def change
+    add_index :users, :uuid, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130123180224_create_api_clients.rb b/services/api/db/migrate/20130123180224_create_api_clients.rb
new file mode 100644 (file)
index 0000000..2bd8234
--- /dev/null
@@ -0,0 +1,17 @@
+class CreateApiClients < ActiveRecord::Migration
+  def change
+    create_table :api_clients do |t|
+      t.string :uuid
+      t.string :created_by_client
+      t.string :created_by_user
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.string :url_prefix
+
+      t.timestamps
+    end
+    add_index :api_clients, :uuid, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130123180228_create_api_client_authorizations.rb b/services/api/db/migrate/20130123180228_create_api_client_authorizations.rb
new file mode 100644 (file)
index 0000000..c3d99e6
--- /dev/null
@@ -0,0 +1,19 @@
+class CreateApiClientAuthorizations < ActiveRecord::Migration
+  def change
+    create_table :api_client_authorizations do |t|
+      t.string :api_token, :null => false
+      t.references :api_client, :null => false
+      t.references :user, :null => false
+      t.string :created_by_ip_address
+      t.string :last_used_by_ip_address
+      t.datetime :last_used_at
+      t.datetime :expires_at
+
+      t.timestamps
+    end
+    add_index :api_client_authorizations, :api_token, :unique => true
+    add_index :api_client_authorizations, :api_client_id
+    add_index :api_client_authorizations, :user_id
+    add_index :api_client_authorizations, :expires_at
+  end
+end
diff --git a/services/api/db/migrate/20130125220425_rename_created_by_to_owner.rb b/services/api/db/migrate/20130125220425_rename_created_by_to_owner.rb
new file mode 100644 (file)
index 0000000..f7dae6e
--- /dev/null
@@ -0,0 +1,19 @@
+class RenameCreatedByToOwner < ActiveRecord::Migration
+  def tables
+    %w{api_clients collections logs metadata nodes pipelines pipeline_invocations projects specimens users}
+  end
+
+  def up
+    tables.each do |t|
+      remove_column t.to_sym, :created_by_client
+      rename_column t.to_sym, :created_by_user, :owner
+    end
+  end
+
+  def down
+    tables.reverse.each do |t|
+      rename_column t.to_sym, :owner, :created_by_user
+      add_column t.to_sym, :created_by_client, :string
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130128202518_rename_metadata_to_links.rb b/services/api/db/migrate/20130128202518_rename_metadata_to_links.rb
new file mode 100644 (file)
index 0000000..067c69c
--- /dev/null
@@ -0,0 +1,27 @@
+class RenameMetadataToLinks < ActiveRecord::Migration
+  def up
+    rename_table :metadata, :links
+    rename_column :links, :tail, :tail_uuid
+    rename_column :links, :head, :head_uuid
+    rename_column :links, :info, :properties
+    rename_column :links, :metadata_class, :link_class
+    rename_index :links, :index_metadata_on_head_kind, :index_links_on_head_kind
+    rename_index :links, :index_metadata_on_head, :index_links_on_head_uuid
+    rename_index :links, :index_metadata_on_tail_kind, :index_links_on_tail_kind
+    rename_index :links, :index_metadata_on_tail, :index_links_on_tail_uuid
+    rename_index :links, :index_metadata_on_uuid, :index_links_on_uuid
+  end
+
+  def down
+    rename_index :links, :index_links_on_uuid, :index_metadata_on_uuid
+    rename_index :links, :index_links_on_head_kind, :index_metadata_on_head_kind
+    rename_index :links, :index_links_on_head_uuid, :index_metadata_on_head
+    rename_index :links, :index_links_on_tail_kind, :index_metadata_on_tail_kind
+    rename_index :links, :index_links_on_tail_uuid, :index_metadata_on_tail
+    rename_column :links, :link_class, :metadata_class
+    rename_column :links, :properties, :info
+    rename_column :links, :head_uuid, :head
+    rename_column :links, :tail_uuid, :tail
+    rename_table :links, :metadata
+  end
+end
diff --git a/services/api/db/migrate/20130128231343_add_properties_to_specimen.rb b/services/api/db/migrate/20130128231343_add_properties_to_specimen.rb
new file mode 100644 (file)
index 0000000..e00b7a1
--- /dev/null
@@ -0,0 +1,5 @@
+class AddPropertiesToSpecimen < ActiveRecord::Migration
+  def change
+    add_column :specimens, :properties, :text
+  end
+end
diff --git a/services/api/db/migrate/20130130205749_add_manifest_text_to_collection.rb b/services/api/db/migrate/20130130205749_add_manifest_text_to_collection.rb
new file mode 100644 (file)
index 0000000..7bcb47a
--- /dev/null
@@ -0,0 +1,5 @@
+class AddManifestTextToCollection < ActiveRecord::Migration
+  def change
+    add_column :collections, :manifest_text, :text
+  end
+end
diff --git a/services/api/db/migrate/20130203104818_create_jobs.rb b/services/api/db/migrate/20130203104818_create_jobs.rb
new file mode 100644 (file)
index 0000000..f99bf88
--- /dev/null
@@ -0,0 +1,31 @@
+class CreateJobs < ActiveRecord::Migration
+  def change
+    create_table :jobs do |t|
+      t.string :uuid
+      t.string :owner
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :submit_id
+      t.string :command
+      t.string :command_version
+      t.text :command_parameters
+      t.string :cancelled_by_client
+      t.string :cancelled_by_user
+      t.datetime :cancelled_at
+      t.datetime :started_at
+      t.datetime :finished_at
+      t.boolean :running
+      t.boolean :success
+      t.string :output
+
+      t.timestamps
+    end
+    add_index :jobs, :uuid, :unique => true
+    add_index :jobs, :submit_id, :unique => true
+    add_index :jobs, :command
+    add_index :jobs, :finished_at
+    add_index :jobs, :started_at
+    add_index :jobs, :output
+  end
+end
diff --git a/services/api/db/migrate/20130203104824_create_job_steps.rb b/services/api/db/migrate/20130203104824_create_job_steps.rb
new file mode 100644 (file)
index 0000000..0636881
--- /dev/null
@@ -0,0 +1,23 @@
+class CreateJobSteps < ActiveRecord::Migration
+  def change
+    create_table :job_steps do |t|
+      t.string :uuid
+      t.string :owner
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :job_uuid
+      t.integer :sequence
+      t.text :parameters
+      t.text :output
+      t.float :progress
+      t.boolean :success
+
+      t.timestamps
+    end
+    add_index :job_steps, :uuid, :unique => true
+    add_index :job_steps, :job_uuid
+    add_index :job_steps, :sequence
+    add_index :job_steps, :success
+  end
+end
diff --git a/services/api/db/migrate/20130203115329_add_priority_to_jobs.rb b/services/api/db/migrate/20130203115329_add_priority_to_jobs.rb
new file mode 100644 (file)
index 0000000..ad9ce87
--- /dev/null
@@ -0,0 +1,5 @@
+class AddPriorityToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :priority, :string
+  end
+end
diff --git a/services/api/db/migrate/20130207195855_add_index_on_timestamps.rb b/services/api/db/migrate/20130207195855_add_index_on_timestamps.rb
new file mode 100644 (file)
index 0000000..dff6745
--- /dev/null
@@ -0,0 +1,12 @@
+class AddIndexOnTimestamps < ActiveRecord::Migration
+  def tables
+    %w{api_clients collections jobs job_steps links logs nodes pipeline_invocations pipelines projects specimens users}
+  end
+
+  def change
+    tables.each do |t|
+      add_index t.to_sym, :created_at
+      add_index t.to_sym, :modified_at
+    end
+  end
+end
diff --git a/services/api/db/migrate/20130218181504_add_properties_to_pipeline_invocations.rb b/services/api/db/migrate/20130218181504_add_properties_to_pipeline_invocations.rb
new file mode 100644 (file)
index 0000000..62bb592
--- /dev/null
@@ -0,0 +1,5 @@
+class AddPropertiesToPipelineInvocations < ActiveRecord::Migration
+  def change
+    add_column :pipeline_invocations, :properties, :text
+  end
+end
diff --git a/services/api/db/migrate/20130226170000_remove_native_target_from_links.rb b/services/api/db/migrate/20130226170000_remove_native_target_from_links.rb
new file mode 100644 (file)
index 0000000..a54d2bc
--- /dev/null
@@ -0,0 +1,10 @@
+class RemoveNativeTargetFromLinks < ActiveRecord::Migration
+  def up
+    remove_column :links, :native_target_id
+    remove_column :links, :native_target_type
+  end
+  def down
+    add_column :links, :native_target_id, :integer
+    add_column :links, :native_target_type, :string
+  end
+end
diff --git a/services/api/db/migrate/20130313175417_rename_projects_to_groups.rb b/services/api/db/migrate/20130313175417_rename_projects_to_groups.rb
new file mode 100644 (file)
index 0000000..868dfe7
--- /dev/null
@@ -0,0 +1,21 @@
+class RenameProjectsToGroups < ActiveRecord::Migration
+  def up
+    rename_table :projects, :groups
+    rename_index :groups, :index_projects_on_created_at, :index_groups_on_created_at
+    rename_index :groups, :index_projects_on_modified_at, :index_groups_on_modified_at
+    rename_index :groups, :index_projects_on_uuid, :index_groups_on_uuid
+    Link.update_all({head_kind:'orvos#group'}, ['head_kind=?','orvos#project'])
+    Link.update_all({tail_kind:'orvos#group'}, ['tail_kind=?','orvos#project'])
+    Log.update_all({object_kind:'orvos#group'}, ['object_kind=?','orvos#project'])
+  end
+
+  def down
+    Log.update_all({object_kind:'orvos#project'}, ['object_kind=?','orvos#group'])
+    Link.update_all({tail_kind:'orvos#project'}, ['tail_kind=?','orvos#group'])
+    Link.update_all({head_kind:'orvos#project'}, ['head_kind=?','orvos#group'])
+    rename_index :groups, :index_groups_on_created_at, :index_projects_on_created_at
+    rename_index :groups, :index_groups_on_modified_at, :index_projects_on_modified_at
+    rename_index :groups, :index_groups_on_uuid, :index_projects_on_uuid
+    rename_table :groups, :projects
+  end
+end
diff --git a/services/api/db/migrate/20130315155820_add_is_locked_by_to_jobs.rb b/services/api/db/migrate/20130315155820_add_is_locked_by_to_jobs.rb
new file mode 100644 (file)
index 0000000..02e6f34
--- /dev/null
@@ -0,0 +1,5 @@
+class AddIsLockedByToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :is_locked_by, :string
+  end
+end
diff --git a/services/api/db/migrate/20130315183626_add_log_to_jobs.rb b/services/api/db/migrate/20130315183626_add_log_to_jobs.rb
new file mode 100644 (file)
index 0000000..d9dbeff
--- /dev/null
@@ -0,0 +1,5 @@
+class AddLogToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :log, :string
+  end
+end
diff --git a/services/api/db/migrate/20130315213205_add_tasks_summary_to_jobs.rb b/services/api/db/migrate/20130315213205_add_tasks_summary_to_jobs.rb
new file mode 100644 (file)
index 0000000..ea30854
--- /dev/null
@@ -0,0 +1,5 @@
+class AddTasksSummaryToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :tasks_summary, :text
+  end
+end
diff --git a/services/api/db/migrate/20130318002138_add_resource_limits_to_jobs.rb b/services/api/db/migrate/20130318002138_add_resource_limits_to_jobs.rb
new file mode 100644 (file)
index 0000000..f7bd04b
--- /dev/null
@@ -0,0 +1,5 @@
+class AddResourceLimitsToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :resource_limits, :text
+  end
+end
diff --git a/services/api/db/migrate/20130319165853_rename_job_command_to_script.rb b/services/api/db/migrate/20130319165853_rename_job_command_to_script.rb
new file mode 100644 (file)
index 0000000..feea2e1
--- /dev/null
@@ -0,0 +1,15 @@
+class RenameJobCommandToScript < ActiveRecord::Migration
+  def up
+    rename_column :jobs, :command, :script
+    rename_column :jobs, :command_parameters, :script_parameters
+    rename_column :jobs, :command_version, :script_version
+    rename_index :jobs, :index_jobs_on_command, :index_jobs_on_script
+  end
+
+  def down
+    rename_index :jobs, :index_jobs_on_script, :index_jobs_on_command
+    rename_column :jobs, :script_version, :command_version
+    rename_column :jobs, :script_parameters, :command_parameters
+    rename_column :jobs, :script, :command
+  end
+end
diff --git a/services/api/db/migrate/20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb b/services/api/db/migrate/20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb
new file mode 100644 (file)
index 0000000..8299030
--- /dev/null
@@ -0,0 +1,21 @@
+class RenamePipelineInvocationToPipelineInstance < ActiveRecord::Migration
+  def up
+    rename_table :pipeline_invocations, :pipeline_instances
+    rename_index :pipeline_instances, :index_pipeline_invocations_on_created_at, :index_pipeline_instances_on_created_at
+    rename_index :pipeline_instances, :index_pipeline_invocations_on_modified_at, :index_pipeline_instances_on_modified_at
+    rename_index :pipeline_instances, :index_pipeline_invocations_on_uuid, :index_pipeline_instances_on_uuid
+    Link.update_all({head_kind:'orvos#pipeline_instance'}, ['head_kind=?','orvos#pipeline_invocation'])
+    Link.update_all({tail_kind:'orvos#pipeline_instance'}, ['tail_kind=?','orvos#pipeline_invocation'])
+    Log.update_all({object_kind:'orvos#pipeline_instance'}, ['object_kind=?','orvos#pipeline_invocation'])
+  end
+
+  def down
+    Link.update_all({head_kind:'orvos#pipeline_invocation'}, ['head_kind=?','orvos#pipeline_instance'])
+    Link.update_all({tail_kind:'orvos#pipeline_invocation'}, ['tail_kind=?','orvos#pipeline_instance'])
+    Log.update_all({object_kind:'orvos#pipeline_invocation'}, ['object_kind=?','orvos#pipeline_instance'])
+    rename_index :pipeline_instances, :index_pipeline_instances_on_created_at, :index_pipeline_invocations_on_created_at
+    rename_index :pipeline_instances, :index_pipeline_instances_on_modified_at, :index_pipeline_invocations_on_modified_at
+    rename_index :pipeline_instances, :index_pipeline_instances_on_uuid, :index_pipeline_invocations_on_uuid
+    rename_table :pipeline_instances, :pipeline_invocations
+  end
+end
diff --git a/services/api/db/migrate/20130319194637_rename_pipelines_to_pipeline_templates.rb b/services/api/db/migrate/20130319194637_rename_pipelines_to_pipeline_templates.rb
new file mode 100644 (file)
index 0000000..006b753
--- /dev/null
@@ -0,0 +1,23 @@
+class RenamePipelinesToPipelineTemplates < ActiveRecord::Migration
+  def up
+    rename_column :pipeline_instances, :pipeline_uuid, :pipeline_template_uuid
+    rename_table :pipelines, :pipeline_templates
+    rename_index :pipeline_templates, :index_pipelines_on_created_at, :index_pipeline_templates_on_created_at
+    rename_index :pipeline_templates, :index_pipelines_on_modified_at, :index_pipeline_templates_on_modified_at
+    rename_index :pipeline_templates, :index_pipelines_on_uuid, :index_pipeline_templates_on_uuid
+    Link.update_all({head_kind:'orvos#pipeline'}, ['head_kind=?','orvos#pipeline_template'])
+    Link.update_all({tail_kind:'orvos#pipeline'}, ['tail_kind=?','orvos#pipeline_template'])
+    Log.update_all({object_kind:'orvos#pipeline'}, ['object_kind=?','orvos#pipeline_template'])
+  end
+
+  def down
+    Link.update_all({head_kind:'orvos#pipeline_template'}, ['head_kind=?','orvos#pipeline'])
+    Link.update_all({tail_kind:'orvos#pipeline_template'}, ['tail_kind=?','orvos#pipeline'])
+    Log.update_all({object_kind:'orvos#pipeline_template'}, ['object_kind=?','orvos#pipeline'])
+    rename_index :pipeline_templates, :index_pipeline_templates_on_created_at, :index_pipelines_on_created_at
+    rename_index :pipeline_templates, :index_pipeline_templates_on_modified_at, :index_pipelines_on_modified_at
+    rename_index :pipeline_templates, :index_pipeline_templates_on_uuid, :index_pipelines_on_uuid
+    rename_table :pipeline_templates, :pipelines
+    rename_column :pipeline_instances, :pipeline_template_uuid, :pipeline_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130319201431_rename_job_steps_to_job_tasks.rb b/services/api/db/migrate/20130319201431_rename_job_steps_to_job_tasks.rb
new file mode 100644 (file)
index 0000000..f251d18
--- /dev/null
@@ -0,0 +1,21 @@
+class RenameJobStepsToJobTasks < ActiveRecord::Migration
+  def up
+    rename_table :job_steps, :job_tasks
+    rename_index :job_tasks, :index_job_steps_on_created_at, :index_job_tasks_on_created_at
+    rename_index :job_tasks, :index_job_steps_on_job_uuid, :index_job_tasks_on_job_uuid
+    rename_index :job_tasks, :index_job_steps_on_modified_at, :index_job_tasks_on_modified_at
+    rename_index :job_tasks, :index_job_steps_on_sequence, :index_job_tasks_on_sequence
+    rename_index :job_tasks, :index_job_steps_on_success, :index_job_tasks_on_success
+    rename_index :job_tasks, :index_job_steps_on_uuid, :index_job_tasks_on_uuid
+  end
+
+  def down
+    rename_index :job_steps, :index_job_tasks_on_created_at, :index_job_steps_on_created_at
+    rename_index :job_steps, :index_job_tasks_on_job_uuid, :index_job_steps_on_job_uuid
+    rename_index :job_steps, :index_job_tasks_on_modified_at, :index_job_steps_on_modified_at
+    rename_index :job_steps, :index_job_tasks_on_sequence, :index_job_steps_on_sequence
+    rename_index :job_steps, :index_job_tasks_on_success, :index_job_steps_on_success
+    rename_index :job_steps, :index_job_tasks_on_uuid, :index_job_steps_on_uuid
+    rename_table :job_tasks, :job_steps
+  end
+end
diff --git a/services/api/db/migrate/20130319235957_add_default_owner_to_users.rb b/services/api/db/migrate/20130319235957_add_default_owner_to_users.rb
new file mode 100644 (file)
index 0000000..712267a
--- /dev/null
@@ -0,0 +1,5 @@
+class AddDefaultOwnerToUsers < ActiveRecord::Migration
+  def change
+    add_column :users, :default_owner, :string
+  end
+end
diff --git a/services/api/db/migrate/20130320000107_add_default_owner_to_api_client_authorizations.rb b/services/api/db/migrate/20130320000107_add_default_owner_to_api_client_authorizations.rb
new file mode 100644 (file)
index 0000000..43577bc
--- /dev/null
@@ -0,0 +1,5 @@
+class AddDefaultOwnerToApiClientAuthorizations < ActiveRecord::Migration
+  def change
+    add_column :api_client_authorizations, :default_owner, :string
+  end
+end
diff --git a/services/api/db/migrate/20130326173804_create_commits.rb b/services/api/db/migrate/20130326173804_create_commits.rb
new file mode 100644 (file)
index 0000000..f991070
--- /dev/null
@@ -0,0 +1,12 @@
+class CreateCommits < ActiveRecord::Migration
+  def change
+    create_table :commits do |t|
+      t.string :repository_name
+      t.string :sha1
+      t.string :message
+
+      t.timestamps
+    end
+    add_index :commits, [:repository_name, :sha1], :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130326182917_create_commit_ancestors.rb b/services/api/db/migrate/20130326182917_create_commit_ancestors.rb
new file mode 100644 (file)
index 0000000..46a8dc6
--- /dev/null
@@ -0,0 +1,13 @@
+class CreateCommitAncestors < ActiveRecord::Migration
+  def change
+    create_table :commit_ancestors do |t|
+      t.string :repository_name
+      t.string :descendant, :null => false
+      t.string :ancestor, :null => false
+      t.boolean :is, :default => false, :null => false
+
+      t.timestamps
+    end
+    add_index :commit_ancestors, [:descendant, :ancestor], :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130415020241_rename_orvos_to_arvados.rb b/services/api/db/migrate/20130415020241_rename_orvos_to_arvados.rb
new file mode 100644 (file)
index 0000000..d11bba8
--- /dev/null
@@ -0,0 +1,13 @@
+class RenameOrvosToArvados < ActiveRecord::Migration
+  def up
+    Link.update_all("head_kind=replace(head_kind,'orvos','arvados')")
+    Link.update_all("tail_kind=replace(tail_kind,'orvos','arvados')")
+    Log.update_all("object_kind=replace(object_kind,'orvos','arvados')")
+  end
+
+  def down
+    Link.update_all("head_kind=replace(head_kind,'arvados','orvos')")
+    Link.update_all("tail_kind=replace(tail_kind,'arvados','orvos')")
+    Log.update_all("object_kind=replace(object_kind,'arvados','orvos')")
+  end
+end
diff --git a/services/api/db/migrate/20130425024459_create_keep_disks.rb b/services/api/db/migrate/20130425024459_create_keep_disks.rb
new file mode 100644 (file)
index 0000000..7ad92c7
--- /dev/null
@@ -0,0 +1,27 @@
+class CreateKeepDisks < ActiveRecord::Migration
+  def change
+    create_table :keep_disks do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :ping_secret, :null => false
+      t.string :node_uuid
+      t.string :filesystem_uuid
+      t.integer :bytes_total
+      t.integer :bytes_free
+      t.boolean :is_readable, :null => false, :default => true
+      t.boolean :is_writable, :null => false, :default => true
+      t.datetime :last_read_at
+      t.datetime :last_write_at
+      t.datetime :last_ping_at
+
+      t.timestamps
+    end
+    add_index :keep_disks, :uuid, :unique => true
+    add_index :keep_disks, :filesystem_uuid
+    add_index :keep_disks, :node_uuid
+    add_index :keep_disks, :last_ping_at
+  end
+end
diff --git a/services/api/db/migrate/20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb b/services/api/db/migrate/20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb
new file mode 100644 (file)
index 0000000..8e17785
--- /dev/null
@@ -0,0 +1,9 @@
+class AddServiceHostAndServicePortAndServiceSslFlagToKeepDisks < ActiveRecord::Migration
+  def change
+    add_column :keep_disks, :service_host, :string
+    add_column :keep_disks, :service_port, :integer
+    add_column :keep_disks, :service_ssl_flag, :boolean
+    add_index :keep_disks, [:service_host, :service_port, :last_ping_at],
+      name: 'keep_disks_service_host_port_ping_at_index'
+  end
+end
diff --git a/services/api/db/migrate/20130523060112_add_created_by_job_task_to_job_tasks.rb b/services/api/db/migrate/20130523060112_add_created_by_job_task_to_job_tasks.rb
new file mode 100644 (file)
index 0000000..c3ce3d1
--- /dev/null
@@ -0,0 +1,5 @@
+class AddCreatedByJobTaskToJobTasks < ActiveRecord::Migration
+  def change
+    add_column :job_tasks, :created_by_job_task, :string
+  end
+end
diff --git a/services/api/db/migrate/20130523060213_add_qsequence_to_job_tasks.rb b/services/api/db/migrate/20130523060213_add_qsequence_to_job_tasks.rb
new file mode 100644 (file)
index 0000000..a75a600
--- /dev/null
@@ -0,0 +1,5 @@
+class AddQsequenceToJobTasks < ActiveRecord::Migration
+  def change
+    add_column :job_tasks, :qsequence, :integer
+  end
+end
diff --git a/services/api/db/migrate/20130524042319_fix_job_task_qsequence_type.rb b/services/api/db/migrate/20130524042319_fix_job_task_qsequence_type.rb
new file mode 100644 (file)
index 0000000..f6a8ef8
--- /dev/null
@@ -0,0 +1,9 @@
+class FixJobTaskQsequenceType < ActiveRecord::Migration
+  def up
+    change_column :job_tasks, :qsequence, :integer, :limit => 8
+  end
+
+  def down
+    change_column :job_tasks, :qsequence, :integer
+  end
+end
diff --git a/services/api/db/migrate/20130528134100_update_nodes_index.rb b/services/api/db/migrate/20130528134100_update_nodes_index.rb
new file mode 100644 (file)
index 0000000..cbfe7f6
--- /dev/null
@@ -0,0 +1,10 @@
+class UpdateNodesIndex < ActiveRecord::Migration
+  def up
+    remove_index :nodes, :hostname
+    add_index :nodes, :hostname
+  end
+  def down
+    remove_index :nodes, :hostname
+    add_index :nodes, :hostname, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130606183519_create_authorized_keys.rb b/services/api/db/migrate/20130606183519_create_authorized_keys.rb
new file mode 100644 (file)
index 0000000..52529fe
--- /dev/null
@@ -0,0 +1,20 @@
+class CreateAuthorizedKeys < ActiveRecord::Migration
+  def change
+    create_table :authorized_keys do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.string :key_type
+      t.string :authorized_user
+      t.text :public_key
+      t.datetime :expires_at
+
+      t.timestamps
+    end
+    add_index :authorized_keys, :uuid, :unique => true
+    add_index :authorized_keys, [:authorized_user, :expires_at]
+  end
+end
diff --git a/services/api/db/migrate/20130608053730_create_virtual_machines.rb b/services/api/db/migrate/20130608053730_create_virtual_machines.rb
new file mode 100644 (file)
index 0000000..8578f23
--- /dev/null
@@ -0,0 +1,16 @@
+class CreateVirtualMachines < ActiveRecord::Migration
+  def change
+    create_table :virtual_machines do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :hostname
+
+      t.timestamps
+    end
+    add_index :virtual_machines, :uuid, :unique => true
+    add_index :virtual_machines, :hostname
+  end
+end
diff --git a/services/api/db/migrate/20130610202538_create_repositories.rb b/services/api/db/migrate/20130610202538_create_repositories.rb
new file mode 100644 (file)
index 0000000..27a962d
--- /dev/null
@@ -0,0 +1,18 @@
+class CreateRepositories < ActiveRecord::Migration
+  def change
+    create_table :repositories do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.string :fetch_url
+      t.string :push_url
+
+      t.timestamps
+    end
+    add_index :repositories, :uuid, :unique => true
+    add_index :repositories, :name
+  end
+end
diff --git a/services/api/db/migrate/20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb b/services/api/db/migrate/20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb
new file mode 100644 (file)
index 0000000..f1203e9
--- /dev/null
@@ -0,0 +1,13 @@
+class RenameAuthorizedKeyAuthorizedUserToAuthorizedUserUuid < ActiveRecord::Migration
+  def up
+    remove_index :authorized_keys, [:authorized_user, :expires_at]
+    rename_column :authorized_keys, :authorized_user, :authorized_user_uuid
+    add_index :authorized_keys, [:authorized_user_uuid, :expires_at]
+  end
+
+  def down
+    remove_index :authorized_keys, [:authorized_user_uuid, :expires_at]
+    rename_column :authorized_keys, :authorized_user_uuid, :authorized_user
+    add_index :authorized_keys, [:authorized_user, :expires_at]
+  end
+end
diff --git a/services/api/db/migrate/20130612042554_add_name_unique_index_to_repositories.rb b/services/api/db/migrate/20130612042554_add_name_unique_index_to_repositories.rb
new file mode 100644 (file)
index 0000000..c8958fb
--- /dev/null
@@ -0,0 +1,11 @@
+class AddNameUniqueIndexToRepositories < ActiveRecord::Migration
+  def up
+    remove_index :repositories, :name
+    add_index :repositories, :name, :unique => true
+  end
+
+  def down
+    remove_index :repositories, :name
+    add_index :repositories, :name
+  end
+end
diff --git a/services/api/db/migrate/20130617150007_add_is_trusted_to_api_clients.rb b/services/api/db/migrate/20130617150007_add_is_trusted_to_api_clients.rb
new file mode 100644 (file)
index 0000000..0d9b992
--- /dev/null
@@ -0,0 +1,5 @@
+class AddIsTrustedToApiClients < ActiveRecord::Migration
+  def change
+    add_column :api_clients, :is_trusted, :boolean, :default => false
+  end
+end
diff --git a/services/api/db/migrate/20130626002829_add_is_active_to_users.rb b/services/api/db/migrate/20130626002829_add_is_active_to_users.rb
new file mode 100644 (file)
index 0000000..602c9ec
--- /dev/null
@@ -0,0 +1,5 @@
+class AddIsActiveToUsers < ActiveRecord::Migration
+  def change
+    add_column :users, :is_active, :boolean, :default => false
+  end
+end
diff --git a/services/api/db/migrate/20130626022810_activate_all_admins.rb b/services/api/db/migrate/20130626022810_activate_all_admins.rb
new file mode 100644 (file)
index 0000000..c86530d
--- /dev/null
@@ -0,0 +1,8 @@
+class ActivateAllAdmins < ActiveRecord::Migration
+  def up
+    User.update_all({is_active: true}, ['is_admin=?', true])
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20130627154537_create_traits.rb b/services/api/db/migrate/20130627154537_create_traits.rb
new file mode 100644 (file)
index 0000000..541a313
--- /dev/null
@@ -0,0 +1,17 @@
+class CreateTraits < ActiveRecord::Migration
+  def change
+    create_table :traits do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.string :name
+      t.text :properties
+
+      t.timestamps
+    end
+    add_index :traits, :uuid, :unique => true
+    add_index :traits, :name
+  end
+end
diff --git a/services/api/db/migrate/20130627184333_create_humans.rb b/services/api/db/migrate/20130627184333_create_humans.rb
new file mode 100644 (file)
index 0000000..5051658
--- /dev/null
@@ -0,0 +1,15 @@
+class CreateHumans < ActiveRecord::Migration
+  def change
+    create_table :humans do |t|
+      t.string :uuid, :null => false
+      t.string :owner, :null => false
+      t.string :modified_by_client
+      t.string :modified_by_user
+      t.datetime :modified_at
+      t.text :properties
+
+      t.timestamps
+    end
+    add_index :humans, :uuid, :unique => true
+  end
+end
diff --git a/services/api/db/migrate/20130708163414_rename_foreign_uuid_attributes.rb b/services/api/db/migrate/20130708163414_rename_foreign_uuid_attributes.rb
new file mode 100644 (file)
index 0000000..122571b
--- /dev/null
@@ -0,0 +1,16 @@
+class RenameForeignUuidAttributes < ActiveRecord::Migration
+  def change
+    rename_column :api_client_authorizations, :default_owner, :default_owner_uuid
+    [:api_clients, :authorized_keys, :collections,
+     :groups, :humans, :job_tasks, :jobs, :keep_disks,
+     :links, :logs, :nodes, :pipeline_instances, :pipeline_templates,
+     :repositories, :specimens, :traits, :users, :virtual_machines].each do |t|
+      rename_column t, :owner, :owner_uuid
+      rename_column t, :modified_by_client, :modified_by_client_uuid
+      rename_column t, :modified_by_user, :modified_by_user_uuid
+    end
+    rename_column :collections, :redundancy_confirmed_by_client, :redundancy_confirmed_by_client_uuid
+    rename_column :jobs, :is_locked_by, :is_locked_by_uuid
+    rename_column :job_tasks, :created_by_job_task, :created_by_job_task_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130708182912_rename_job_foreign_uuid_attributes.rb b/services/api/db/migrate/20130708182912_rename_job_foreign_uuid_attributes.rb
new file mode 100644 (file)
index 0000000..b5c3396
--- /dev/null
@@ -0,0 +1,6 @@
+class RenameJobForeignUuidAttributes < ActiveRecord::Migration
+  def change
+    rename_column :jobs, :cancelled_by_client, :cancelled_by_client_uuid
+    rename_column :jobs, :cancelled_by_user, :cancelled_by_user_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130708185153_rename_user_default_owner.rb b/services/api/db/migrate/20130708185153_rename_user_default_owner.rb
new file mode 100644 (file)
index 0000000..4e28740
--- /dev/null
@@ -0,0 +1,5 @@
+class RenameUserDefaultOwner < ActiveRecord::Migration
+  def change
+    rename_column :users, :default_owner, :default_owner_uuid
+  end
+end
diff --git a/services/api/db/migrate/20130724153034_add_scopes_to_api_client_authorizations.rb b/services/api/db/migrate/20130724153034_add_scopes_to_api_client_authorizations.rb
new file mode 100644 (file)
index 0000000..6693a68
--- /dev/null
@@ -0,0 +1,5 @@
+class AddScopesToApiClientAuthorizations < ActiveRecord::Migration
+  def change
+    add_column :api_client_authorizations, :scopes, :text, :null => false, :default => ['all'].to_yaml
+  end
+end
diff --git a/services/api/db/migrate/20131007180607_rename_resource_limits_to_runtime_constraints.rb b/services/api/db/migrate/20131007180607_rename_resource_limits_to_runtime_constraints.rb
new file mode 100644 (file)
index 0000000..8a6a08a
--- /dev/null
@@ -0,0 +1,5 @@
+class RenameResourceLimitsToRuntimeConstraints < ActiveRecord::Migration
+  def change
+    rename_column :jobs, :resource_limits, :runtime_constraints
+  end
+end
diff --git a/services/api/db/migrate/20140117231056_normalize_collection_uuid.rb b/services/api/db/migrate/20140117231056_normalize_collection_uuid.rb
new file mode 100644 (file)
index 0000000..b26c0a8
--- /dev/null
@@ -0,0 +1,91 @@
+class NormalizeCollectionUuid < ActiveRecord::Migration
+  def count_orphans
+    %w(head tail).each do |ht|
+      results = ActiveRecord::Base.connection.execute(<<-EOS)
+SELECT COUNT(links.*)
+ FROM links
+ LEFT JOIN collections c
+   ON links.#{ht}_uuid = c.uuid
+ WHERE (#{ht}_kind='arvados#collection' or #{ht}_uuid ~ '^[0-9a-f]{32,}')
+   AND #{ht}_uuid IS NOT NULL
+   AND #{ht}_uuid NOT IN (SELECT uuid FROM collections)
+EOS
+      puts "#{results.first['count'].to_i} links with #{ht}_uuid pointing nowhere."
+    end
+  end
+
+  def up
+    # Normalize uuids in the collections table to
+    # {hash}+{size}. Existing uuids might be {hash},
+    # {hash}+{size}+K@{instance-name}, {hash}+K@{instance-name}, etc.
+
+    count_orphans
+    puts "Normalizing collection UUIDs."
+
+    update_sql <<-EOS
+UPDATE collections
+ SET uuid = regexp_replace(uuid,'\\+.*','') || '+' || length(manifest_text)
+ WHERE uuid !~ '^[0-9a-f]{32,}\\+[0-9]+$'
+   AND (regexp_replace(uuid,'\\+.*','') || '+' || length(manifest_text))
+     NOT IN (SELECT uuid FROM collections)
+EOS
+
+    count_orphans
+    puts "Updating links by stripping +K@.* from *_uuid attributes."
+
+    update_sql <<-EOS
+UPDATE links
+ SET head_uuid = regexp_replace(head_uuid,'\\+K@.*','')
+ WHERE head_uuid like '%+K@%'
+EOS
+    update_sql <<-EOS
+UPDATE links
+ SET tail_uuid = regexp_replace(tail_uuid,'\\+K@.*','')
+ WHERE tail_uuid like '%+K@%'
+EOS
+
+    count_orphans
+    puts "Updating links by searching bare collection hashes using regexp."
+
+    # Next, update {hash} (and any other non-normalized forms) to
+    # {hash}+{size}. This can only work where the corresponding
+    # collection is found in the collections table (otherwise we can't
+    # know the size).
+    %w(head tail).each do |ht|
+      update_sql <<-EOS
+UPDATE links
+ SET #{ht}_uuid = c.uuid
+ FROM collections c
+ WHERE #{ht}_uuid IS NOT NULL
+   AND (#{ht}_kind='arvados#collection' or #{ht}_uuid ~ '^[0-9a-f]{32,}')
+   AND #{ht}_uuid NOT IN (SELECT uuid FROM collections)
+   AND regexp_replace(#{ht}_uuid,'\\+.*','') = regexp_replace(c.uuid,'\\+.*','')
+   AND c.uuid ~ '^[0-9a-f]{32,}\\+[0-9]+$'
+EOS
+    end
+
+    count_orphans
+    puts "Stripping \"+K@.*\" from jobs.output, jobs.log, job_tasks.output."
+
+    update_sql <<-EOS
+UPDATE jobs
+ SET output = regexp_replace(output,'\\+K@.*','')
+ WHERE output ~ '^[0-9a-f]{32,}\\+[0-9]+\\+K@\\w+$'
+EOS
+    update_sql <<-EOS
+UPDATE jobs
+ SET log = regexp_replace(log,'\\+K@.*','')
+ WHERE log ~ '^[0-9a-f]{32,}\\+[0-9]+\\+K@\\w+$'
+EOS
+    update_sql <<-EOS
+UPDATE job_tasks
+ SET output = regexp_replace(output,'\\+K@.*','')
+ WHERE output ~ '^[0-9a-f]{32,}\\+[0-9]+\\+K@\\w+$'
+EOS
+
+    puts "Done."
+  end
+
+  def down
+  end
+end
diff --git a/services/api/db/migrate/20140124222114_fix_link_kind_underscores.rb b/services/api/db/migrate/20140124222114_fix_link_kind_underscores.rb
new file mode 100644 (file)
index 0000000..3d13c00
--- /dev/null
@@ -0,0 +1,17 @@
+class FixLinkKindUnderscores < ActiveRecord::Migration
+  def up
+    update_sql <<-EOS
+UPDATE links
+ SET head_kind = 'arvados#virtualMachine'
+ WHERE head_kind = 'arvados#virtual_machine'
+EOS
+  end
+
+  def down
+    update_sql <<-EOS
+UPDATE links
+ SET head_kind = 'arvados#virtual_machine'
+ WHERE head_kind = 'arvados#virtualMachine'
+EOS
+  end
+end
diff --git a/services/api/db/migrate/20140129184311_normalize_collection_uuids_in_script_parameters.rb b/services/api/db/migrate/20140129184311_normalize_collection_uuids_in_script_parameters.rb
new file mode 100644 (file)
index 0000000..b36241b
--- /dev/null
@@ -0,0 +1,45 @@
+class NormalizeCollectionUuidsInScriptParameters < ActiveRecord::Migration
+  include CurrentApiClient
+  def up
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        pi.save! if fix_values_recursively(pi.components)
+      end
+      Job.all.each do |j|
+        changed = false
+        j.script_parameters.each do |p, v|
+          if v.is_a? String and v.match /\+K/
+            v.gsub! /\+K\@\w+/, ''
+            changed = true
+          end
+        end
+        j.save! if changed
+      end
+    end
+  end
+
+  def down
+  end
+
+  protected
+  def fix_values_recursively fixme
+    changed = false
+    if fixme.is_a? String
+      if fixme.match /\+K/
+        fixme.gsub! /\+K\@\w+/, ''
+        return true
+      else
+        return false
+      end
+    elsif fixme.is_a? Array
+      fixme.each do |v|
+        changed = fix_values_recursively(v) || changed
+      end
+    elsif fixme.is_a? Hash
+      fixme.each do |p, v|
+        changed = fix_values_recursively(v) || changed
+      end
+    end
+    changed
+  end
+end
diff --git a/services/api/db/migrate/20140317135600_add_nondeterministic_column_to_job.rb b/services/api/db/migrate/20140317135600_add_nondeterministic_column_to_job.rb
new file mode 100644 (file)
index 0000000..574001b
--- /dev/null
@@ -0,0 +1,9 @@
+class AddNondeterministicColumnToJob < ActiveRecord::Migration
+  def up
+    add_column :jobs, :nondeterministic, :boolean
+  end
+
+  def down
+    remove_column :jobs, :nondeterministic
+  end
+end
diff --git a/services/api/db/migrate/20140319160547_separate_repository_from_script_version.rb b/services/api/db/migrate/20140319160547_separate_repository_from_script_version.rb
new file mode 100644 (file)
index 0000000..d87b037
--- /dev/null
@@ -0,0 +1,31 @@
+class SeparateRepositoryFromScriptVersion < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def fixup pt
+    c = pt.components
+    c.each do |k, v|
+      commit_ish = v["script_version"]
+      if commit_ish.andand.index(':')
+        want_repo, commit_ish = commit_ish.split(':',2)
+        v[:repository] = want_repo
+        v[:script_version] = commit_ish
+      end
+    end
+    pt.save!
+  end
+
+  def up
+    act_as_system_user do
+      PipelineTemplate.all.each do |pt|
+        fixup pt
+      end
+      PipelineInstance.all.each do |pt|
+        fixup pt
+      end
+    end
+  end
+
+  def down
+    raise ActiveRecord::IrreversibleMigration
+  end
+end
diff --git a/services/api/db/migrate/20140321191343_add_repository_column_to_job.rb b/services/api/db/migrate/20140321191343_add_repository_column_to_job.rb
new file mode 100644 (file)
index 0000000..e1ebb2e
--- /dev/null
@@ -0,0 +1,9 @@
+class AddRepositoryColumnToJob < ActiveRecord::Migration
+  def up
+    add_column :jobs, :repository, :string
+  end
+
+  def down
+    remove_column :jobs, :repository
+  end
+end
diff --git a/services/api/db/migrate/20140324024606_add_output_is_persistent_to_job.rb b/services/api/db/migrate/20140324024606_add_output_is_persistent_to_job.rb
new file mode 100644 (file)
index 0000000..04a03c0
--- /dev/null
@@ -0,0 +1,5 @@
+class AddOutputIsPersistentToJob < ActiveRecord::Migration
+  def change
+    add_column :jobs, :output_is_persistent, :boolean, null: false, default: false
+  end
+end
diff --git a/services/api/db/migrate/20140325175653_remove_kind_columns.rb b/services/api/db/migrate/20140325175653_remove_kind_columns.rb
new file mode 100644 (file)
index 0000000..eae2a2c
--- /dev/null
@@ -0,0 +1,27 @@
+class RemoveKindColumns < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    remove_column :links, :head_kind
+    remove_column :links, :tail_kind
+    remove_column :logs, :object_kind
+  end
+
+  def down
+    add_column :links, :head_kind, :string
+    add_column :links, :tail_kind, :string
+    add_column :logs, :object_kind, :string
+
+    act_as_system_user do
+      Link.all.each do |l|
+        l.head_kind = ArvadosModel::resource_class_for_uuid(l.head_uuid).kind if l.head_uuid
+        l.tail_kind = ArvadosModel::resource_class_for_uuid(l.tail_uuid).kind if l.tail_uuid
+        l.save
+      end
+      Log.all.each do |l|
+        l.object_kind = ArvadosModel::resource_class_for_uuid(l.object_uuid).kind if l.object_uuid
+        l.save
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140402001908_add_system_group.rb b/services/api/db/migrate/20140402001908_add_system_group.rb
new file mode 100644 (file)
index 0000000..3bae7ea
--- /dev/null
@@ -0,0 +1,18 @@
+class AddSystemGroup < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    # Make sure the system group exists.
+    system_group
+  end
+
+  def down
+    act_as_system_user do
+      system_group.destroy
+
+      # Destroy the automatically generated links giving system_group
+      # permission on all users.
+      Link.destroy_all(tail_uuid: system_group_uuid, head_kind: 'arvados#user')
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140407184311_rename_log_info_to_properties.rb b/services/api/db/migrate/20140407184311_rename_log_info_to_properties.rb
new file mode 100644 (file)
index 0000000..06561c5
--- /dev/null
@@ -0,0 +1,5 @@
+class RenameLogInfoToProperties < ActiveRecord::Migration
+  def change
+    rename_column :logs, :info, :properties
+  end
+end
diff --git a/services/api/db/migrate/20140421140924_add_group_class_to_groups.rb b/services/api/db/migrate/20140421140924_add_group_class_to_groups.rb
new file mode 100644 (file)
index 0000000..de52983
--- /dev/null
@@ -0,0 +1,6 @@
+class AddGroupClassToGroups < ActiveRecord::Migration
+  def change
+    add_column :groups, :group_class, :string
+    add_index :groups, :group_class
+  end
+end
diff --git a/services/api/db/migrate/20140421151939_rename_auth_keys_user_index.rb b/services/api/db/migrate/20140421151939_rename_auth_keys_user_index.rb
new file mode 100644 (file)
index 0000000..2b057f0
--- /dev/null
@@ -0,0 +1,11 @@
+class RenameAuthKeysUserIndex < ActiveRecord::Migration
+  # Rails' default name for this index is so long, Rails can't modify
+  # the index later, because the autogenerated temporary name exceeds
+  # PostgreSQL's 64-character limit.  This migration gives the index
+  # an explicit name to work around that issue.
+  def change
+    rename_index("authorized_keys",
+                 "index_authorized_keys_on_authorized_user_uuid_and_expires_at",
+                 "index_authkeys_on_user_and_expires_at")
+  end
+end
diff --git a/services/api/db/migrate/20140421151940_timestamps_not_null.rb b/services/api/db/migrate/20140421151940_timestamps_not_null.rb
new file mode 100644 (file)
index 0000000..e4ca19e
--- /dev/null
@@ -0,0 +1,13 @@
+class TimestampsNotNull < ActiveRecord::Migration
+  def up
+    ActiveRecord::Base.connection.tables.each do |t|
+      next if t == 'schema_migrations'
+      change_column t.to_sym, :created_at, :datetime, :null => false
+      change_column t.to_sym, :updated_at, :datetime, :null => false
+    end
+  end
+  def down
+    # There might have been a NULL constraint before this, depending
+    # on the version of Rails used to build the database.
+  end
+end
diff --git a/services/api/db/migrate/20140422011506_pipeline_instance_state.rb b/services/api/db/migrate/20140422011506_pipeline_instance_state.rb
new file mode 100644 (file)
index 0000000..7643b18
--- /dev/null
@@ -0,0 +1,84 @@
+class PipelineInstanceState < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    add_column :pipeline_instances, :state, :string
+    add_column :pipeline_instances, :components_summary, :text
+
+    PipelineInstance.reset_column_information
+
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        pi.state = PipelineInstance::New
+
+        if !pi.attribute_present? :success   # success is nil
+          if pi[:active] == true
+            pi.state = PipelineInstance::RunningOnServer
+          else
+            if pi.components_look_ready?
+              pi.state = PipelineInstance::Ready
+            else
+              pi.state = PipelineInstance::New
+            end
+          end
+        elsif pi[:success] == true
+          pi.state = PipelineInstance::Complete
+        else
+          pi.state = PipelineInstance::Failed
+        end
+
+        pi.save!
+      end
+    end
+
+# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.
+=begin
+    if column_exists?(:pipeline_instances, :active)
+      remove_column :pipeline_instances, :active
+    end
+
+    if column_exists?(:pipeline_instances, :success)
+      remove_column :pipeline_instances, :success
+    end
+=end
+  end
+
+  def down
+# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.
+=begin
+    add_column :pipeline_instances, :success, :boolean, :null => true
+    add_column :pipeline_instances, :active, :boolean, :default => false
+
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        case pi.state
+        when PipelineInstance::New, PipelineInstance::Ready
+          pi.active = false
+          pi.success = nil
+        when PipelineInstance::RunningOnServer
+          pi.active = true
+          pi.success = nil
+        when PipelineInstance::RunningOnClient
+          pi.active = false
+          pi.success = nil
+        when PipelineInstance::Failed
+          pi.active = false
+          pi.success = false
+        when PipelineInstance::Complete
+          pi.active = false
+          pi.success = true
+        end
+        pi.save!
+      end
+    end
+=end
+
+    if column_exists?(:pipeline_instances, :components_summary)
+      remove_column :pipeline_instances, :components_summary
+    end
+
+    if column_exists?(:pipeline_instances, :state)
+      remove_column :pipeline_instances, :state
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140423132913_add_object_owner_to_logs.rb b/services/api/db/migrate/20140423132913_add_object_owner_to_logs.rb
new file mode 100644 (file)
index 0000000..61724f2
--- /dev/null
@@ -0,0 +1,27 @@
+class AddObjectOwnerToLogs < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    add_column :logs, :object_owner_uuid, :string
+    act_as_system_user do
+      Log.find_in_batches(:batch_size => 500) do |batch|
+        upd = {}
+        ActiveRecord::Base.transaction do
+          batch.each do |log|
+            if log.properties["new_attributes"]
+              log.object_owner_uuid = log.properties['new_attributes']['owner_uuid']
+              log.save
+            elsif log.properties["old_attributes"]
+              log.object_owner_uuid = log.properties['old_attributes']['owner_uuid']
+              log.save
+            end
+          end
+        end
+      end
+    end
+  end
+
+  def down
+    remove_column :logs, :object_owner_uuid
+  end
+end
diff --git a/services/api/db/migrate/20140423133559_new_scope_format.rb b/services/api/db/migrate/20140423133559_new_scope_format.rb
new file mode 100644 (file)
index 0000000..5b69e95
--- /dev/null
@@ -0,0 +1,48 @@
+# At the time we introduced scopes everywhere, VirtualMachinesController
+# recognized scopes that gave the URL for a VM to grant access to that VM's
+# login list.  This migration converts those VM-specific scopes to the new
+# general format, and back.
+
+class NewScopeFormat < ActiveRecord::Migration
+  include CurrentApiClient
+
+  VM_PATH_REGEX =
+    %r{(/arvados/v1/virtual_machines/[0-9a-z]{5}-[0-9a-z]{5}-[0-9a-z]{15})}
+  OLD_SCOPE_REGEX = %r{^https?://[^/]+#{VM_PATH_REGEX.source}$}
+  NEW_SCOPE_REGEX = %r{^GET #{VM_PATH_REGEX.source}/logins$}
+
+  def fix_scopes_matching(regex)
+    act_as_system_user
+    ApiClientAuthorization.find_each do |auth|
+      auth.scopes = auth.scopes.map do |scope|
+        if match = regex.match(scope)
+          yield match
+        else
+          scope
+        end
+      end
+      auth.save!
+    end
+  end
+
+  def up
+    fix_scopes_matching(OLD_SCOPE_REGEX) do |match|
+      "GET #{match[1]}/logins"
+    end
+  end
+
+  def down
+    case Rails.env
+    when 'test'
+      hostname = 'www.example.com'
+    else
+      require 'socket'
+      hostname = Socket.gethostname
+    end
+    fix_scopes_matching(NEW_SCOPE_REGEX) do |match|
+      Rails.application.routes.url_for(controller: 'virtual_machines',
+                                       uuid: match[1].split('/').last,
+                                       host: hostname, protocol: 'https')
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140501165548_add_unique_name_index_to_links.rb b/services/api/db/migrate/20140501165548_add_unique_name_index_to_links.rb
new file mode 100644 (file)
index 0000000..444265a
--- /dev/null
@@ -0,0 +1,13 @@
+class AddUniqueNameIndexToLinks < ActiveRecord::Migration
+  def change
+    # Make sure PgPower is here. Otherwise the "where" will be ignored
+    # and we'll end up with a far too restrictive unique
+    # constraint. (Rails4 should work without PgPower, but that isn't
+    # tested.)
+    if not PgPower then raise "No partial column support" end
+
+    add_index(:links, [:tail_uuid, :name], unique: true,
+              where: "link_class='name'",
+              name: 'links_tail_name_unique_if_link_class_name')
+  end
+end
diff --git a/services/api/db/migrate/20140519205916_create_keep_services.rb b/services/api/db/migrate/20140519205916_create_keep_services.rb
new file mode 100644 (file)
index 0000000..24e3921
--- /dev/null
@@ -0,0 +1,51 @@
+class CreateKeepServices < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def change
+    act_as_system_user do
+      create_table :keep_services do |t|
+        t.string :uuid, :null => false
+        t.string :owner_uuid, :null => false
+        t.string :modified_by_client_uuid
+        t.string :modified_by_user_uuid
+        t.datetime :modified_at
+        t.string   :service_host
+        t.integer  :service_port
+        t.boolean  :service_ssl_flag
+        t.string   :service_type
+
+        t.timestamps
+      end
+      add_index :keep_services, :uuid, :unique => true
+
+      add_column :keep_disks, :keep_service_uuid, :string
+
+      KeepDisk.reset_column_information
+
+      services = {}
+
+      KeepDisk.find_each do |k|
+        services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"] = {
+          service_host: k[:service_host],
+          service_port: k[:service_port],
+          service_ssl_flag: k[:service_ssl_flag],
+          service_type: 'disk',
+          owner_uuid: k[:owner_uuid]
+        }
+      end
+
+      services.each do |k, v|
+        v['uuid'] = KeepService.create(v).uuid
+      end
+
+      KeepDisk.find_each do |k|
+        k.keep_service_uuid = services["#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}"]['uuid']
+        k.save
+      end
+
+      remove_column :keep_disks, :service_host
+      remove_column :keep_disks, :service_port
+      remove_column :keep_disks, :service_ssl_flag
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140527152921_add_description_to_pipeline_templates.rb b/services/api/db/migrate/20140527152921_add_description_to_pipeline_templates.rb
new file mode 100644 (file)
index 0000000..6cf3133
--- /dev/null
@@ -0,0 +1,5 @@
+class AddDescriptionToPipelineTemplates < ActiveRecord::Migration
+  def change
+    add_column :pipeline_templates, :description, :text
+  end
+end
diff --git a/services/api/db/migrate/20140530200539_add_supplied_script_version.rb b/services/api/db/migrate/20140530200539_add_supplied_script_version.rb
new file mode 100644 (file)
index 0000000..c054235
--- /dev/null
@@ -0,0 +1,9 @@
+class AddSuppliedScriptVersion < ActiveRecord::Migration
+  def up
+    add_column :jobs, :supplied_script_version, :string
+  end
+
+  def down
+    remove_column :jobs, :supplied_script_version, :string
+  end
+end
diff --git a/services/api/db/migrate/20140601022548_remove_name_from_collections.rb b/services/api/db/migrate/20140601022548_remove_name_from_collections.rb
new file mode 100644 (file)
index 0000000..4910de1
--- /dev/null
@@ -0,0 +1,9 @@
+class RemoveNameFromCollections < ActiveRecord::Migration
+  def up
+    remove_column :collections, :name
+  end
+
+  def down
+    add_column :collections, :name, :string
+  end
+end
diff --git a/services/api/db/migrate/20140602143352_remove_active_and_success_from_pipeline_instances.rb b/services/api/db/migrate/20140602143352_remove_active_and_success_from_pipeline_instances.rb
new file mode 100644 (file)
index 0000000..6d4014c
--- /dev/null
@@ -0,0 +1,42 @@
+class RemoveActiveAndSuccessFromPipelineInstances < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    if column_exists?(:pipeline_instances, :active)
+      remove_column :pipeline_instances, :active
+    end
+
+    if column_exists?(:pipeline_instances, :success)
+      remove_column :pipeline_instances, :success
+    end
+  end
+
+  def down
+    if !column_exists?(:pipeline_instances, :success)
+      add_column :pipeline_instances, :success, :boolean, :null => true
+    end
+    if !column_exists?(:pipeline_instances, :active)
+      add_column :pipeline_instances, :active, :boolean, :default => false
+    end
+
+    act_as_system_user do
+      PipelineInstance.all.each do |pi|
+        case pi.state
+        when PipelineInstance::New, PipelineInstance::Ready, PipelineInstance::Paused, PipelineInstance::RunningOnClient
+          pi.active = nil
+          pi.success = nil
+        when PipelineInstance::RunningOnServer
+          pi.active = true
+          pi.success = nil
+        when PipelineInstance::Failed
+          pi.active = false
+          pi.success = false
+        when PipelineInstance::Complete
+          pi.active = false
+          pi.success = true
+        end
+        pi.save!
+      end
+    end
+  end
+end
diff --git a/services/api/db/migrate/20140607150616_rename_folder_to_project.rb b/services/api/db/migrate/20140607150616_rename_folder_to_project.rb
new file mode 100644 (file)
index 0000000..2a3c1d3
--- /dev/null
@@ -0,0 +1,9 @@
+class RenameFolderToProject < ActiveRecord::Migration
+  def up
+    Group.update_all("group_class = 'project'", "group_class = 'folder'")
+  end
+
+  def down
+    Group.update_all("group_class = 'folder'", "group_class = 'project'")
+  end
+end
diff --git a/services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb b/services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb
new file mode 100644 (file)
index 0000000..3186e56
--- /dev/null
@@ -0,0 +1,5 @@
+class AddDockerLocatorToJobs < ActiveRecord::Migration
+  def change
+    add_column :jobs, :docker_image_locator, :string
+  end
+end
diff --git a/services/api/db/migrate/20140627210837_anonymous_group.rb b/services/api/db/migrate/20140627210837_anonymous_group.rb
new file mode 100644 (file)
index 0000000..0bb7608
--- /dev/null
@@ -0,0 +1,17 @@
+class AnonymousGroup < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    # create the anonymous group and user
+    anonymous_group
+    anonymous_user
+  end
+
+  def down
+    act_as_system_user do
+      anonymous_user.destroy
+      anonymous_group.destroy
+    end
+  end
+
+end
diff --git a/services/api/db/migrate/20140709172343_job_task_serial_qsequence.rb b/services/api/db/migrate/20140709172343_job_task_serial_qsequence.rb
new file mode 100644 (file)
index 0000000..db99e7c
--- /dev/null
@@ -0,0 +1,11 @@
+class JobTaskSerialQsequence < ActiveRecord::Migration
+  SEQ_NAME = "job_tasks_qsequence_seq"
+
+  def up
+    execute "CREATE SEQUENCE #{SEQ_NAME} OWNED BY job_tasks.qsequence;"
+  end
+
+  def down
+    execute "DROP SEQUENCE #{SEQ_NAME};"
+  end
+end
diff --git a/services/api/db/migrate/20140714184006_empty_collection.rb b/services/api/db/migrate/20140714184006_empty_collection.rb
new file mode 100644 (file)
index 0000000..15fd887
--- /dev/null
@@ -0,0 +1,12 @@
+class EmptyCollection < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    empty_collection
+  end
+
+  def down
+    # do nothing when migrating down (having the empty collection
+    # and a permission link for it is harmless)
+  end
+end
diff --git a/services/api/db/migrate/20140811184643_collection_use_regular_uuids.rb b/services/api/db/migrate/20140811184643_collection_use_regular_uuids.rb
new file mode 100644 (file)
index 0000000..4f83eca
--- /dev/null
@@ -0,0 +1,180 @@
+class CollectionUseRegularUuids < ActiveRecord::Migration
+  def up
+    add_column :collections, :name, :string
+    add_column :collections, :description, :string
+    add_column :collections, :properties, :text
+    add_column :collections, :expires_at, :date
+    remove_column :collections, :locator
+
+    say_with_time "Step 1. Move manifest hashes into portable_data_hash field" do
+      ActiveRecord::Base.connection.execute("update collections set portable_data_hash=uuid, uuid=null")
+    end
+
+    say_with_time "Step 2. Create new collection objects from the name links in the table." do
+      from_clause = %{
+from links inner join collections on head_uuid=collections.portable_data_hash
+where link_class='name' and collections.uuid is null
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, links.name,
+manifest_text, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid
+#{from_clause}
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into collections (uuid, portable_data_hash, owner_uuid, name, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['head_uuid']},
+#{ActiveRecord::Base.connection.quote d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['name']},
+#{ActiveRecord::Base.connection.quote d['manifest_text']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 3. Create new collection objects from the can_read links in the table." do
+      from_clause = %{
+from links inner join collections on head_uuid=collections.portable_data_hash
+where link_class='permission' and links.name='can_read' and collections.uuid is null
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, manifest_text, links.created_at, links.modified_at
+#{from_clause}
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into collections (uuid, portable_data_hash, owner_uuid, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['head_uuid']},
+#{ActiveRecord::Base.connection.quote d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['manifest_text']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 4. Migrate remaining orphan collection objects" do
+      links = ActiveRecord::Base.connection.select_all %{
+select portable_data_hash, owner_uuid, manifest_text, created_at, modified_at
+from collections
+where uuid is null and portable_data_hash not in (select portable_data_hash from collections where uuid is not null)
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into collections (uuid, portable_data_hash, owner_uuid, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['portable_data_hash']},
+#{ActiveRecord::Base.connection.quote d['owner_uuid']},
+#{ActiveRecord::Base.connection.quote d['manifest_text']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+    end
+
+    say_with_time "Step 5. Delete old collection objects." do
+      ActiveRecord::Base.connection.execute("delete from collections where uuid is null")
+    end
+
+    say_with_time "Step 6. Delete permission links where tail_uuid is a collection (invalid records)" do
+      ActiveRecord::Base.connection.execute %{
+delete from links where links.uuid in (select links.uuid
+from links
+where tail_uuid like '________________________________+%' and link_class='permission' )
+}
+    end
+
+    say_with_time "Step 7. Migrate collection -> collection provenance links to jobs" do
+      from_clause = %{
+from links
+where head_uuid like '________________________________+%' and tail_uuid like '________________________________+%' and links.link_class = 'provenance'
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid, links.owner_uuid
+#{from_clause}
+}
+      links.each do |d|
+        newuuid = Job.generate_uuid
+        ActiveRecord::Base.connection.execute %{
+insert into jobs (uuid, script_parameters, output, running, success, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, owner_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote newuuid},
+#{ActiveRecord::Base.connection.quote "---\ninput: "+d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['head_uuid']},
+#{ActiveRecord::Base.connection.quote false},
+#{ActiveRecord::Base.connection.quote true},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['owner_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 8. Migrate remaining links with head_uuid pointing to collections" do
+      from_clause = %{
+from links inner join collections on links.head_uuid=portable_data_hash
+where collections.uuid is not null
+}
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, collections.uuid as collectionuuid, tail_uuid, link_class, links.properties,
+links.name, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid, links.owner_uuid
+#{from_clause}
+}
+      links.each do |d|
+        ActiveRecord::Base.connection.execute %{
+insert into links (uuid, head_uuid, tail_uuid, link_class, name, properties, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, owner_uuid, updated_at)
+values (#{ActiveRecord::Base.connection.quote Link.generate_uuid},
+#{ActiveRecord::Base.connection.quote d['collectionuuid']},
+#{ActiveRecord::Base.connection.quote d['tail_uuid']},
+#{ActiveRecord::Base.connection.quote d['link_class']},
+#{ActiveRecord::Base.connection.quote d['name']},
+#{ActiveRecord::Base.connection.quote d['properties']},
+#{ActiveRecord::Base.connection.quote d['created_at']},
+#{ActiveRecord::Base.connection.quote d['modified_at']},
+#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},
+#{ActiveRecord::Base.connection.quote d['owner_uuid']},
+#{ActiveRecord::Base.connection.quote d['modified_at']})
+}
+      end
+      ActiveRecord::Base.connection.execute "delete from links where links.uuid in (select links.uuid #{from_clause})"
+    end
+
+    say_with_time "Step 9. Delete any remaining name links" do
+      ActiveRecord::Base.connection.execute("delete from links where link_class='name'")
+    end
+
+    say_with_time "Step 10. Validate links table" do
+      links = ActiveRecord::Base.connection.select_all %{
+select links.uuid, head_uuid, tail_uuid, link_class, name
+from links
+where head_uuid like '________________________________+%' or tail_uuid like '________________________________+%'
+}
+      links.each do |d|
+        raise "Bad row #{d}"
+      end
+    end
+
+  end
+
+  def down
+    raise ActiveRecord::IrreversibleMigration, "Can't downmigrate changes to collections and links without potentially losing data."
+  end
+end
diff --git a/services/api/db/migrate/20140817035914_add_unique_name_constraints.rb b/services/api/db/migrate/20140817035914_add_unique_name_constraints.rb
new file mode 100644 (file)
index 0000000..3c57e6f
--- /dev/null
@@ -0,0 +1,30 @@
+class AddUniqueNameConstraints < ActiveRecord::Migration
+  def change
+    # Ensure uniqueness before adding constraints.
+    ["collections", "pipeline_templates", "groups"].each do |table|
+      rows = ActiveRecord::Base.connection.select_all %{
+select uuid, owner_uuid, name from #{table} order by owner_uuid, name
+}
+      prev = {}
+      n = 1
+      rows.each do |r|
+        if r["owner_uuid"] == prev["owner_uuid"] and !r["name"].nil? and r["name"] == prev["name"]
+          n += 1
+          ActiveRecord::Base.connection.execute %{
+update #{table} set name='#{r["name"]} #{n}' where uuid='#{r["uuid"]}'
+}
+        else
+          n = 1
+        end
+        prev = r
+      end
+    end
+
+    add_index(:collections, [:owner_uuid, :name], unique: true,
+              name: 'collection_owner_uuid_name_unique')
+    add_index(:pipeline_templates, [:owner_uuid, :name], unique: true,
+              name: 'pipeline_template_owner_uuid_name_unique')
+    add_index(:groups, [:owner_uuid, :name], unique: true,
+              name: 'groups_owner_uuid_name_unique')
+  end
+end
diff --git a/services/api/db/migrate/20140818125735_add_not_null_constraint_to_group_name.rb b/services/api/db/migrate/20140818125735_add_not_null_constraint_to_group_name.rb
new file mode 100644 (file)
index 0000000..1b07470
--- /dev/null
@@ -0,0 +1,6 @@
+class AddNotNullConstraintToGroupName < ActiveRecord::Migration
+  def change
+    ActiveRecord::Base.connection.execute("update groups set name=uuid where name is null or name=''")
+    change_column_null :groups, :name, false
+  end
+end
diff --git a/services/api/db/migrate/20140826180337_remove_output_is_persistent_column.rb b/services/api/db/migrate/20140826180337_remove_output_is_persistent_column.rb
new file mode 100644 (file)
index 0000000..f2744ad
--- /dev/null
@@ -0,0 +1,9 @@
+class RemoveOutputIsPersistentColumn < ActiveRecord::Migration
+  def up
+    remove_column :jobs, :output_is_persistent
+  end
+
+  def down
+    add_column :jobs, :output_is_persistent, :boolean, null: false, default: false
+  end
+end
diff --git a/services/api/db/migrate/20140828141043_job_priority_fixup.rb b/services/api/db/migrate/20140828141043_job_priority_fixup.rb
new file mode 100644 (file)
index 0000000..f643a3f
--- /dev/null
@@ -0,0 +1,11 @@
+class JobPriorityFixup < ActiveRecord::Migration
+  def up
+    remove_column :jobs, :priority
+    add_column :jobs, :priority, :integer, null: false, default: 0
+  end
+
+  def down
+    remove_column :jobs, :priority
+    add_column :jobs, :priority, :string, null: true, default: nil
+  end
+end
diff --git a/services/api/db/migrate/20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb b/services/api/db/migrate/20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb
new file mode 100644 (file)
index 0000000..139cb8d
--- /dev/null
@@ -0,0 +1,15 @@
+class AddStartFinishTimeToTasksAndPipelines < ActiveRecord::Migration
+  def up
+    add_column :job_tasks, :started_at, :datetime
+    add_column :job_tasks, :finished_at, :datetime
+    add_column :pipeline_instances, :started_at, :datetime
+    add_column :pipeline_instances, :finished_at, :datetime
+  end
+
+  def down
+    remove_column :job_tasks, :started_at
+    remove_column :job_tasks, :finished_at
+    remove_column :pipeline_instances, :started_at
+    remove_column :pipeline_instances, :finished_at
+  end
+end
diff --git a/services/api/db/migrate/20140911221252_add_description_to_pipeline_instances_and_jobs.rb b/services/api/db/migrate/20140911221252_add_description_to_pipeline_instances_and_jobs.rb
new file mode 100644 (file)
index 0000000..53d3a13
--- /dev/null
@@ -0,0 +1,11 @@
+class AddDescriptionToPipelineInstancesAndJobs < ActiveRecord::Migration
+  def up
+    add_column :pipeline_instances, :description, :text, null: true
+    add_column :jobs, :description, :text, null: true
+  end
+
+  def down
+    remove_column :jobs, :description
+    remove_column :pipeline_instances, :description
+  end
+end
diff --git a/services/api/db/migrate/20140918141529_change_user_owner_uuid_not_null.rb b/services/api/db/migrate/20140918141529_change_user_owner_uuid_not_null.rb
new file mode 100644 (file)
index 0000000..683e5c7
--- /dev/null
@@ -0,0 +1,12 @@
+class ChangeUserOwnerUuidNotNull < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    User.update_all({owner_uuid: system_user_uuid}, 'owner_uuid is null')
+    change_column :users, :owner_uuid, :string, :null => false
+  end
+
+  def down
+    change_column :users, :owner_uuid, :string, :null => true
+  end
+end
diff --git a/services/api/db/migrate/20140918153541_add_properties_to_node.rb b/services/api/db/migrate/20140918153541_add_properties_to_node.rb
new file mode 100644 (file)
index 0000000..85f41ee
--- /dev/null
@@ -0,0 +1,9 @@
+class AddPropertiesToNode < ActiveRecord::Migration
+  def up
+    add_column :nodes, :properties, :text
+  end
+
+  def down
+    remove_column :nodes, :properties
+  end
+end
diff --git a/services/api/db/migrate/20140918153705_add_state_to_job.rb b/services/api/db/migrate/20140918153705_add_state_to_job.rb
new file mode 100644 (file)
index 0000000..20625c9
--- /dev/null
@@ -0,0 +1,21 @@
+class AddStateToJob < ActiveRecord::Migration
+  include CurrentApiClient
+
+  def up
+    ActiveRecord::Base.transaction do
+      add_column :jobs, :state, :string
+      Job.reset_column_information
+      Job.update_all({state: 'Cancelled'}, ['state is null and cancelled_at is not null'])
+      Job.update_all({state: 'Failed'}, ['state is null and success = ?', false])
+      Job.update_all({state: 'Complete'}, ['state is null and success = ?', true])
+      Job.update_all({state: 'Running'}, ['state is null and running = ?', true])
+      # Locked/started, but not Running/Failed/Complete? Let's assume it failed.
+      Job.update_all({state: 'Failed'}, ['state is null and (is_locked_by_uuid is not null or started_at is not null)'])
+      Job.update_all({state: 'Queued'}, ['state is null'])
+    end
+  end
+
+  def down
+    remove_column :jobs, :state
+  end
+end
diff --git a/services/api/db/migrate/20140924091559_add_job_uuid_to_nodes.rb b/services/api/db/migrate/20140924091559_add_job_uuid_to_nodes.rb
new file mode 100644 (file)
index 0000000..d8ec20f
--- /dev/null
@@ -0,0 +1,13 @@
+class AddJobUuidToNodes < ActiveRecord::Migration
+  def up
+    change_table :nodes do |t|
+      t.column :job_uuid, :string
+    end
+  end
+
+  def down
+    change_table :nodes do |t|
+      t.remove :job_uuid
+    end
+  end
+end
diff --git a/services/api/db/migrate/20141111133038_add_arvados_sdk_version_to_jobs.rb b/services/api/db/migrate/20141111133038_add_arvados_sdk_version_to_jobs.rb
new file mode 100644 (file)
index 0000000..214919d
--- /dev/null
@@ -0,0 +1,13 @@
+class AddArvadosSdkVersionToJobs < ActiveRecord::Migration
+  def up
+    change_table :jobs do |t|
+      t.column :arvados_sdk_version, :string
+    end
+  end
+
+  def down
+    change_table :jobs do |t|
+      t.remove :arvados_sdk_version
+    end
+  end
+end
diff --git a/services/api/db/migrate/20141208164553_owner_uuid_index.rb b/services/api/db/migrate/20141208164553_owner_uuid_index.rb
new file mode 100644 (file)
index 0000000..0859d46
--- /dev/null
@@ -0,0 +1,20 @@
+class OwnerUuidIndex < ActiveRecord::Migration
+  def tables_with_owner_uuid
+    ActiveRecord::Base.connection.tables.select do |table|
+      columns = ActiveRecord::Base.connection.columns(table)
+      columns.collect(&:name).include? 'owner_uuid'
+    end
+  end
+
+  def up
+    tables_with_owner_uuid.each do |table|
+      add_index table.to_sym, :owner_uuid
+    end
+  end
+
+  def down
+    tables_with_owner_uuid.each do |table|
+      remove_index table.to_sym, :owner_uuid
+    end
+  end
+end
diff --git a/services/api/db/seeds.rb b/services/api/db/seeds.rb
new file mode 100644 (file)
index 0000000..384d2e2
--- /dev/null
@@ -0,0 +1,5 @@
+# This file seeds the database with initial/default values.
+#
+# It is invoked by `rake db:seed` and `rake db:setup`.
+
+DatabaseSeeds.install
diff --git a/services/api/db/structure.sql b/services/api/db/structure.sql
new file mode 100644 (file)
index 0000000..038973f
--- /dev/null
@@ -0,0 +1,2170 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+
+--
+-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: -
+--
+
+CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog;
+
+
+--
+-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: -
+--
+
+COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language';
+
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: api_client_authorizations; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE api_client_authorizations (
+    id integer NOT NULL,
+    api_token character varying(255) NOT NULL,
+    api_client_id integer NOT NULL,
+    user_id integer NOT NULL,
+    created_by_ip_address character varying(255),
+    last_used_by_ip_address character varying(255),
+    last_used_at timestamp without time zone,
+    expires_at timestamp without time zone,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    default_owner_uuid character varying(255),
+    scopes text DEFAULT '---
+- all
+'::text NOT NULL
+);
+
+
+--
+-- Name: api_client_authorizations_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE api_client_authorizations_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: api_client_authorizations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE api_client_authorizations_id_seq OWNED BY api_client_authorizations.id;
+
+
+--
+-- Name: api_clients; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE api_clients (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    url_prefix character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    is_trusted boolean DEFAULT false
+);
+
+
+--
+-- Name: api_clients_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE api_clients_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: api_clients_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE api_clients_id_seq OWNED BY api_clients.id;
+
+
+--
+-- Name: authorized_keys; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE authorized_keys (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    key_type character varying(255),
+    authorized_user_uuid character varying(255),
+    public_key text,
+    expires_at timestamp without time zone,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: authorized_keys_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE authorized_keys_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: authorized_keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE authorized_keys_id_seq OWNED BY authorized_keys.id;
+
+
+--
+-- Name: collections; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE collections (
+    id integer NOT NULL,
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    portable_data_hash character varying(255),
+    redundancy integer,
+    redundancy_confirmed_by_client_uuid character varying(255),
+    redundancy_confirmed_at timestamp without time zone,
+    redundancy_confirmed_as integer,
+    updated_at timestamp without time zone NOT NULL,
+    uuid character varying(255),
+    manifest_text text,
+    name character varying(255),
+    description character varying(255),
+    properties text,
+    expires_at date
+);
+
+
+--
+-- Name: collections_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE collections_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: collections_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE collections_id_seq OWNED BY collections.id;
+
+
+--
+-- Name: commit_ancestors; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE commit_ancestors (
+    id integer NOT NULL,
+    repository_name character varying(255),
+    descendant character varying(255) NOT NULL,
+    ancestor character varying(255) NOT NULL,
+    "is" boolean DEFAULT false NOT NULL,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: commit_ancestors_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE commit_ancestors_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: commit_ancestors_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE commit_ancestors_id_seq OWNED BY commit_ancestors.id;
+
+
+--
+-- Name: commits; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE commits (
+    id integer NOT NULL,
+    repository_name character varying(255),
+    sha1 character varying(255),
+    message character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: commits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE commits_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: commits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE commits_id_seq OWNED BY commits.id;
+
+
+--
+-- Name: groups; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE groups (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255) NOT NULL,
+    description text,
+    updated_at timestamp without time zone NOT NULL,
+    group_class character varying(255)
+);
+
+
+--
+-- Name: groups_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE groups_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: groups_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE groups_id_seq OWNED BY groups.id;
+
+
+--
+-- Name: humans; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE humans (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    properties text,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: humans_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE humans_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: humans_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE humans_id_seq OWNED BY humans.id;
+
+
+--
+-- Name: job_tasks; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE job_tasks (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    job_uuid character varying(255),
+    sequence integer,
+    parameters text,
+    output text,
+    progress double precision,
+    success boolean,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    created_by_job_task_uuid character varying(255),
+    qsequence bigint,
+    started_at timestamp without time zone,
+    finished_at timestamp without time zone
+);
+
+
+--
+-- Name: job_tasks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE job_tasks_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: job_tasks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE job_tasks_id_seq OWNED BY job_tasks.id;
+
+
+--
+-- Name: job_tasks_qsequence_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE job_tasks_qsequence_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: job_tasks_qsequence_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE job_tasks_qsequence_seq OWNED BY job_tasks.qsequence;
+
+
+--
+-- Name: jobs; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE jobs (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    submit_id character varying(255),
+    script character varying(255),
+    script_version character varying(255),
+    script_parameters text,
+    cancelled_by_client_uuid character varying(255),
+    cancelled_by_user_uuid character varying(255),
+    cancelled_at timestamp without time zone,
+    started_at timestamp without time zone,
+    finished_at timestamp without time zone,
+    running boolean,
+    success boolean,
+    output character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    is_locked_by_uuid character varying(255),
+    log character varying(255),
+    tasks_summary text,
+    runtime_constraints text,
+    nondeterministic boolean,
+    repository character varying(255),
+    supplied_script_version character varying(255),
+    docker_image_locator character varying(255),
+    priority integer DEFAULT 0 NOT NULL,
+    description text,
+    state character varying(255),
+    arvados_sdk_version character varying(255)
+);
+
+
+--
+-- Name: jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE jobs_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE jobs_id_seq OWNED BY jobs.id;
+
+
+--
+-- Name: keep_disks; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE keep_disks (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    ping_secret character varying(255) NOT NULL,
+    node_uuid character varying(255),
+    filesystem_uuid character varying(255),
+    bytes_total integer,
+    bytes_free integer,
+    is_readable boolean DEFAULT true NOT NULL,
+    is_writable boolean DEFAULT true NOT NULL,
+    last_read_at timestamp without time zone,
+    last_write_at timestamp without time zone,
+    last_ping_at timestamp without time zone,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    keep_service_uuid character varying(255)
+);
+
+
+--
+-- Name: keep_disks_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE keep_disks_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: keep_disks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE keep_disks_id_seq OWNED BY keep_disks.id;
+
+
+--
+-- Name: keep_services; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE keep_services (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    service_host character varying(255),
+    service_port integer,
+    service_ssl_flag boolean,
+    service_type character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: keep_services_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE keep_services_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: keep_services_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE keep_services_id_seq OWNED BY keep_services.id;
+
+
+--
+-- Name: links; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE links (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    tail_uuid character varying(255),
+    link_class character varying(255),
+    name character varying(255),
+    head_uuid character varying(255),
+    properties text,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: links_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE links_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: links_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE links_id_seq OWNED BY links.id;
+
+
+--
+-- Name: logs; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE logs (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    object_uuid character varying(255),
+    event_at timestamp without time zone,
+    event_type character varying(255),
+    summary text,
+    properties text,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL,
+    modified_at timestamp without time zone,
+    object_owner_uuid character varying(255)
+);
+
+
+--
+-- Name: logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE logs_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE logs_id_seq OWNED BY logs.id;
+
+
+--
+-- Name: nodes; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE nodes (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    slot_number integer,
+    hostname character varying(255),
+    domain character varying(255),
+    ip_address character varying(255),
+    first_ping_at timestamp without time zone,
+    last_ping_at timestamp without time zone,
+    info text,
+    updated_at timestamp without time zone NOT NULL,
+    properties text,
+    job_uuid character varying(255)
+);
+
+
+--
+-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE nodes_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: nodes_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE nodes_id_seq OWNED BY nodes.id;
+
+
+--
+-- Name: pipeline_instances; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE pipeline_instances (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    pipeline_template_uuid character varying(255),
+    name character varying(255),
+    components text,
+    updated_at timestamp without time zone NOT NULL,
+    properties text,
+    state character varying(255),
+    components_summary text,
+    started_at timestamp without time zone,
+    finished_at timestamp without time zone,
+    description text
+);
+
+
+--
+-- Name: pipeline_instances_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE pipeline_instances_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: pipeline_instances_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE pipeline_instances_id_seq OWNED BY pipeline_instances.id;
+
+
+--
+-- Name: pipeline_templates; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE pipeline_templates (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    components text,
+    updated_at timestamp without time zone NOT NULL,
+    description text
+);
+
+
+--
+-- Name: pipeline_templates_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE pipeline_templates_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: pipeline_templates_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE pipeline_templates_id_seq OWNED BY pipeline_templates.id;
+
+
+--
+-- Name: repositories; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE repositories (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    fetch_url character varying(255),
+    push_url character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: repositories_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE repositories_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: repositories_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE repositories_id_seq OWNED BY repositories.id;
+
+
+--
+-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE schema_migrations (
+    version character varying(255) NOT NULL
+);
+
+
+--
+-- Name: specimens; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE specimens (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    material character varying(255),
+    updated_at timestamp without time zone NOT NULL,
+    properties text
+);
+
+
+--
+-- Name: specimens_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE specimens_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: specimens_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE specimens_id_seq OWNED BY specimens.id;
+
+
+--
+-- Name: traits; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE traits (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    name character varying(255),
+    properties text,
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: traits_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE traits_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: traits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE traits_id_seq OWNED BY traits.id;
+
+
+--
+-- Name: users; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE users (
+    id integer NOT NULL,
+    uuid character varying(255),
+    owner_uuid character varying(255) NOT NULL,
+    created_at timestamp without time zone NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    email character varying(255),
+    first_name character varying(255),
+    last_name character varying(255),
+    identity_url character varying(255),
+    is_admin boolean,
+    prefs text,
+    updated_at timestamp without time zone NOT NULL,
+    default_owner_uuid character varying(255),
+    is_active boolean DEFAULT false
+);
+
+
+--
+-- Name: users_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE users_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE users_id_seq OWNED BY users.id;
+
+
+--
+-- Name: virtual_machines; Type: TABLE; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE TABLE virtual_machines (
+    id integer NOT NULL,
+    uuid character varying(255) NOT NULL,
+    owner_uuid character varying(255) NOT NULL,
+    modified_by_client_uuid character varying(255),
+    modified_by_user_uuid character varying(255),
+    modified_at timestamp without time zone,
+    hostname character varying(255),
+    created_at timestamp without time zone NOT NULL,
+    updated_at timestamp without time zone NOT NULL
+);
+
+
+--
+-- Name: virtual_machines_id_seq; Type: SEQUENCE; Schema: public; Owner: -
+--
+
+CREATE SEQUENCE virtual_machines_id_seq
+    START WITH 1
+    INCREMENT BY 1
+    NO MINVALUE
+    NO MAXVALUE
+    CACHE 1;
+
+
+--
+-- Name: virtual_machines_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -
+--
+
+ALTER SEQUENCE virtual_machines_id_seq OWNED BY virtual_machines.id;
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY api_client_authorizations ALTER COLUMN id SET DEFAULT nextval('api_client_authorizations_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY api_clients ALTER COLUMN id SET DEFAULT nextval('api_clients_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY authorized_keys ALTER COLUMN id SET DEFAULT nextval('authorized_keys_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY collections ALTER COLUMN id SET DEFAULT nextval('collections_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY commit_ancestors ALTER COLUMN id SET DEFAULT nextval('commit_ancestors_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY commits ALTER COLUMN id SET DEFAULT nextval('commits_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY groups ALTER COLUMN id SET DEFAULT nextval('groups_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY humans ALTER COLUMN id SET DEFAULT nextval('humans_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY job_tasks ALTER COLUMN id SET DEFAULT nextval('job_tasks_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY jobs ALTER COLUMN id SET DEFAULT nextval('jobs_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY keep_disks ALTER COLUMN id SET DEFAULT nextval('keep_disks_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY keep_services ALTER COLUMN id SET DEFAULT nextval('keep_services_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY links ALTER COLUMN id SET DEFAULT nextval('links_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY logs ALTER COLUMN id SET DEFAULT nextval('logs_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY nodes ALTER COLUMN id SET DEFAULT nextval('nodes_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY pipeline_instances ALTER COLUMN id SET DEFAULT nextval('pipeline_instances_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY pipeline_templates ALTER COLUMN id SET DEFAULT nextval('pipeline_templates_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY repositories ALTER COLUMN id SET DEFAULT nextval('repositories_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY specimens ALTER COLUMN id SET DEFAULT nextval('specimens_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY traits ALTER COLUMN id SET DEFAULT nextval('traits_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY users ALTER COLUMN id SET DEFAULT nextval('users_id_seq'::regclass);
+
+
+--
+-- Name: id; Type: DEFAULT; Schema: public; Owner: -
+--
+
+ALTER TABLE ONLY virtual_machines ALTER COLUMN id SET DEFAULT nextval('virtual_machines_id_seq'::regclass);
+
+
+--
+-- Name: api_client_authorizations_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY api_client_authorizations
+    ADD CONSTRAINT api_client_authorizations_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: api_clients_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY api_clients
+    ADD CONSTRAINT api_clients_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: authorized_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY authorized_keys
+    ADD CONSTRAINT authorized_keys_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: collections_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY collections
+    ADD CONSTRAINT collections_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: commit_ancestors_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY commit_ancestors
+    ADD CONSTRAINT commit_ancestors_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: commits_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY commits
+    ADD CONSTRAINT commits_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: groups_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY groups
+    ADD CONSTRAINT groups_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: humans_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY humans
+    ADD CONSTRAINT humans_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: job_tasks_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY job_tasks
+    ADD CONSTRAINT job_tasks_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY jobs
+    ADD CONSTRAINT jobs_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: keep_disks_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY keep_disks
+    ADD CONSTRAINT keep_disks_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: keep_services_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY keep_services
+    ADD CONSTRAINT keep_services_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: links_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY links
+    ADD CONSTRAINT links_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY logs
+    ADD CONSTRAINT logs_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY nodes
+    ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: pipeline_instances_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY pipeline_instances
+    ADD CONSTRAINT pipeline_instances_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: pipeline_templates_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY pipeline_templates
+    ADD CONSTRAINT pipeline_templates_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: repositories_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY repositories
+    ADD CONSTRAINT repositories_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: specimens_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY specimens
+    ADD CONSTRAINT specimens_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: traits_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY traits
+    ADD CONSTRAINT traits_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: users_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY users
+    ADD CONSTRAINT users_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: virtual_machines_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: 
+--
+
+ALTER TABLE ONLY virtual_machines
+    ADD CONSTRAINT virtual_machines_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: collection_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX collection_owner_uuid_name_unique ON collections USING btree (owner_uuid, name);
+
+
+--
+-- Name: groups_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX groups_owner_uuid_name_unique ON groups USING btree (owner_uuid, name);
+
+
+--
+-- Name: index_api_client_authorizations_on_api_client_id; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_api_client_authorizations_on_api_client_id ON api_client_authorizations USING btree (api_client_id);
+
+
+--
+-- Name: index_api_client_authorizations_on_api_token; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON api_client_authorizations USING btree (api_token);
+
+
+--
+-- Name: index_api_client_authorizations_on_expires_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_api_client_authorizations_on_expires_at ON api_client_authorizations USING btree (expires_at);
+
+
+--
+-- Name: index_api_client_authorizations_on_user_id; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_api_client_authorizations_on_user_id ON api_client_authorizations USING btree (user_id);
+
+
+--
+-- Name: index_api_clients_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_api_clients_on_created_at ON api_clients USING btree (created_at);
+
+
+--
+-- Name: index_api_clients_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_api_clients_on_modified_at ON api_clients USING btree (modified_at);
+
+
+--
+-- Name: index_api_clients_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_api_clients_on_owner_uuid ON api_clients USING btree (owner_uuid);
+
+
+--
+-- Name: index_api_clients_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_api_clients_on_uuid ON api_clients USING btree (uuid);
+
+
+--
+-- Name: index_authkeys_on_user_and_expires_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_authkeys_on_user_and_expires_at ON authorized_keys USING btree (authorized_user_uuid, expires_at);
+
+
+--
+-- Name: index_authorized_keys_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_authorized_keys_on_owner_uuid ON authorized_keys USING btree (owner_uuid);
+
+
+--
+-- Name: index_authorized_keys_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_authorized_keys_on_uuid ON authorized_keys USING btree (uuid);
+
+
+--
+-- Name: index_collections_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_collections_on_created_at ON collections USING btree (created_at);
+
+
+--
+-- Name: index_collections_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_collections_on_modified_at ON collections USING btree (modified_at);
+
+
+--
+-- Name: index_collections_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_collections_on_owner_uuid ON collections USING btree (owner_uuid);
+
+
+--
+-- Name: index_collections_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_collections_on_uuid ON collections USING btree (uuid);
+
+
+--
+-- Name: index_commit_ancestors_on_descendant_and_ancestor; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_commit_ancestors_on_descendant_and_ancestor ON commit_ancestors USING btree (descendant, ancestor);
+
+
+--
+-- Name: index_commits_on_repository_name_and_sha1; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_commits_on_repository_name_and_sha1 ON commits USING btree (repository_name, sha1);
+
+
+--
+-- Name: index_groups_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_groups_on_created_at ON groups USING btree (created_at);
+
+
+--
+-- Name: index_groups_on_group_class; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_groups_on_group_class ON groups USING btree (group_class);
+
+
+--
+-- Name: index_groups_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_groups_on_modified_at ON groups USING btree (modified_at);
+
+
+--
+-- Name: index_groups_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_groups_on_owner_uuid ON groups USING btree (owner_uuid);
+
+
+--
+-- Name: index_groups_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_groups_on_uuid ON groups USING btree (uuid);
+
+
+--
+-- Name: index_humans_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_humans_on_owner_uuid ON humans USING btree (owner_uuid);
+
+
+--
+-- Name: index_humans_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_humans_on_uuid ON humans USING btree (uuid);
+
+
+--
+-- Name: index_job_tasks_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_job_tasks_on_created_at ON job_tasks USING btree (created_at);
+
+
+--
+-- Name: index_job_tasks_on_job_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_job_tasks_on_job_uuid ON job_tasks USING btree (job_uuid);
+
+
+--
+-- Name: index_job_tasks_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_job_tasks_on_modified_at ON job_tasks USING btree (modified_at);
+
+
+--
+-- Name: index_job_tasks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_job_tasks_on_owner_uuid ON job_tasks USING btree (owner_uuid);
+
+
+--
+-- Name: index_job_tasks_on_sequence; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_job_tasks_on_sequence ON job_tasks USING btree (sequence);
+
+
+--
+-- Name: index_job_tasks_on_success; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_job_tasks_on_success ON job_tasks USING btree (success);
+
+
+--
+-- Name: index_job_tasks_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_job_tasks_on_uuid ON job_tasks USING btree (uuid);
+
+
+--
+-- Name: index_jobs_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_created_at ON jobs USING btree (created_at);
+
+
+--
+-- Name: index_jobs_on_finished_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_finished_at ON jobs USING btree (finished_at);
+
+
+--
+-- Name: index_jobs_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_modified_at ON jobs USING btree (modified_at);
+
+
+--
+-- Name: index_jobs_on_output; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_output ON jobs USING btree (output);
+
+
+--
+-- Name: index_jobs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_owner_uuid ON jobs USING btree (owner_uuid);
+
+
+--
+-- Name: index_jobs_on_script; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_script ON jobs USING btree (script);
+
+
+--
+-- Name: index_jobs_on_started_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_jobs_on_started_at ON jobs USING btree (started_at);
+
+
+--
+-- Name: index_jobs_on_submit_id; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_jobs_on_submit_id ON jobs USING btree (submit_id);
+
+
+--
+-- Name: index_jobs_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_jobs_on_uuid ON jobs USING btree (uuid);
+
+
+--
+-- Name: index_keep_disks_on_filesystem_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_keep_disks_on_filesystem_uuid ON keep_disks USING btree (filesystem_uuid);
+
+
+--
+-- Name: index_keep_disks_on_last_ping_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_keep_disks_on_last_ping_at ON keep_disks USING btree (last_ping_at);
+
+
+--
+-- Name: index_keep_disks_on_node_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_keep_disks_on_node_uuid ON keep_disks USING btree (node_uuid);
+
+
+--
+-- Name: index_keep_disks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_keep_disks_on_owner_uuid ON keep_disks USING btree (owner_uuid);
+
+
+--
+-- Name: index_keep_disks_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_keep_disks_on_uuid ON keep_disks USING btree (uuid);
+
+
+--
+-- Name: index_keep_services_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_keep_services_on_owner_uuid ON keep_services USING btree (owner_uuid);
+
+
+--
+-- Name: index_keep_services_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_keep_services_on_uuid ON keep_services USING btree (uuid);
+
+
+--
+-- Name: index_links_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_links_on_created_at ON links USING btree (created_at);
+
+
+--
+-- Name: index_links_on_head_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_links_on_head_uuid ON links USING btree (head_uuid);
+
+
+--
+-- Name: index_links_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_links_on_modified_at ON links USING btree (modified_at);
+
+
+--
+-- Name: index_links_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_links_on_owner_uuid ON links USING btree (owner_uuid);
+
+
+--
+-- Name: index_links_on_tail_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_links_on_tail_uuid ON links USING btree (tail_uuid);
+
+
+--
+-- Name: index_links_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_links_on_uuid ON links USING btree (uuid);
+
+
+--
+-- Name: index_logs_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_created_at ON logs USING btree (created_at);
+
+
+--
+-- Name: index_logs_on_event_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_event_at ON logs USING btree (event_at);
+
+
+--
+-- Name: index_logs_on_event_type; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_event_type ON logs USING btree (event_type);
+
+
+--
+-- Name: index_logs_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_modified_at ON logs USING btree (modified_at);
+
+
+--
+-- Name: index_logs_on_object_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_object_uuid ON logs USING btree (object_uuid);
+
+
+--
+-- Name: index_logs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_owner_uuid ON logs USING btree (owner_uuid);
+
+
+--
+-- Name: index_logs_on_summary; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_logs_on_summary ON logs USING btree (summary);
+
+
+--
+-- Name: index_logs_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_logs_on_uuid ON logs USING btree (uuid);
+
+
+--
+-- Name: index_nodes_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_nodes_on_created_at ON nodes USING btree (created_at);
+
+
+--
+-- Name: index_nodes_on_hostname; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_nodes_on_hostname ON nodes USING btree (hostname);
+
+
+--
+-- Name: index_nodes_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_nodes_on_modified_at ON nodes USING btree (modified_at);
+
+
+--
+-- Name: index_nodes_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_nodes_on_owner_uuid ON nodes USING btree (owner_uuid);
+
+
+--
+-- Name: index_nodes_on_slot_number; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_nodes_on_slot_number ON nodes USING btree (slot_number);
+
+
+--
+-- Name: index_nodes_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_nodes_on_uuid ON nodes USING btree (uuid);
+
+
+--
+-- Name: index_pipeline_instances_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_instances_on_created_at ON pipeline_instances USING btree (created_at);
+
+
+--
+-- Name: index_pipeline_instances_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_instances_on_modified_at ON pipeline_instances USING btree (modified_at);
+
+
+--
+-- Name: index_pipeline_instances_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_instances_on_owner_uuid ON pipeline_instances USING btree (owner_uuid);
+
+
+--
+-- Name: index_pipeline_instances_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_pipeline_instances_on_uuid ON pipeline_instances USING btree (uuid);
+
+
+--
+-- Name: index_pipeline_templates_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_templates_on_created_at ON pipeline_templates USING btree (created_at);
+
+
+--
+-- Name: index_pipeline_templates_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_templates_on_modified_at ON pipeline_templates USING btree (modified_at);
+
+
+--
+-- Name: index_pipeline_templates_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_pipeline_templates_on_owner_uuid ON pipeline_templates USING btree (owner_uuid);
+
+
+--
+-- Name: index_pipeline_templates_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON pipeline_templates USING btree (uuid);
+
+
+--
+-- Name: index_repositories_on_name; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_repositories_on_name ON repositories USING btree (name);
+
+
+--
+-- Name: index_repositories_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_repositories_on_owner_uuid ON repositories USING btree (owner_uuid);
+
+
+--
+-- Name: index_repositories_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_repositories_on_uuid ON repositories USING btree (uuid);
+
+
+--
+-- Name: index_specimens_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_specimens_on_created_at ON specimens USING btree (created_at);
+
+
+--
+-- Name: index_specimens_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_specimens_on_modified_at ON specimens USING btree (modified_at);
+
+
+--
+-- Name: index_specimens_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_specimens_on_owner_uuid ON specimens USING btree (owner_uuid);
+
+
+--
+-- Name: index_specimens_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_specimens_on_uuid ON specimens USING btree (uuid);
+
+
+--
+-- Name: index_traits_on_name; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_traits_on_name ON traits USING btree (name);
+
+
+--
+-- Name: index_traits_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_traits_on_owner_uuid ON traits USING btree (owner_uuid);
+
+
+--
+-- Name: index_traits_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_traits_on_uuid ON traits USING btree (uuid);
+
+
+--
+-- Name: index_users_on_created_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_users_on_created_at ON users USING btree (created_at);
+
+
+--
+-- Name: index_users_on_modified_at; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_users_on_modified_at ON users USING btree (modified_at);
+
+
+--
+-- Name: index_users_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_users_on_owner_uuid ON users USING btree (owner_uuid);
+
+
+--
+-- Name: index_users_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_users_on_uuid ON users USING btree (uuid);
+
+
+--
+-- Name: index_virtual_machines_on_hostname; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_virtual_machines_on_hostname ON virtual_machines USING btree (hostname);
+
+
+--
+-- Name: index_virtual_machines_on_owner_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE INDEX index_virtual_machines_on_owner_uuid ON virtual_machines USING btree (owner_uuid);
+
+
+--
+-- Name: index_virtual_machines_on_uuid; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX index_virtual_machines_on_uuid ON virtual_machines USING btree (uuid);
+
+
+--
+-- Name: links_tail_name_unique_if_link_class_name; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON links USING btree (tail_uuid, name) WHERE ((link_class)::text = 'name'::text);
+
+
+--
+-- Name: pipeline_template_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON pipeline_templates USING btree (owner_uuid, name);
+
+
+--
+-- Name: unique_schema_migrations; Type: INDEX; Schema: public; Owner: -; Tablespace: 
+--
+
+CREATE UNIQUE INDEX unique_schema_migrations ON schema_migrations USING btree (version);
+
+
+--
+-- PostgreSQL database dump complete
+--
+
+SET search_path TO "$user",public;
+
+INSERT INTO schema_migrations (version) VALUES ('20121016005009');
+
+INSERT INTO schema_migrations (version) VALUES ('20130105203021');
+
+INSERT INTO schema_migrations (version) VALUES ('20130105224358');
+
+INSERT INTO schema_migrations (version) VALUES ('20130105224618');
+
+INSERT INTO schema_migrations (version) VALUES ('20130107181109');
+
+INSERT INTO schema_migrations (version) VALUES ('20130107212832');
+
+INSERT INTO schema_migrations (version) VALUES ('20130109175700');
+
+INSERT INTO schema_migrations (version) VALUES ('20130109220548');
+
+INSERT INTO schema_migrations (version) VALUES ('20130113214204');
+
+INSERT INTO schema_migrations (version) VALUES ('20130116024233');
+
+INSERT INTO schema_migrations (version) VALUES ('20130116215213');
+
+INSERT INTO schema_migrations (version) VALUES ('20130118002239');
+
+INSERT INTO schema_migrations (version) VALUES ('20130122020042');
+
+INSERT INTO schema_migrations (version) VALUES ('20130122201442');
+
+INSERT INTO schema_migrations (version) VALUES ('20130122221616');
+
+INSERT INTO schema_migrations (version) VALUES ('20130123174514');
+
+INSERT INTO schema_migrations (version) VALUES ('20130123180224');
+
+INSERT INTO schema_migrations (version) VALUES ('20130123180228');
+
+INSERT INTO schema_migrations (version) VALUES ('20130125220425');
+
+INSERT INTO schema_migrations (version) VALUES ('20130128202518');
+
+INSERT INTO schema_migrations (version) VALUES ('20130128231343');
+
+INSERT INTO schema_migrations (version) VALUES ('20130130205749');
+
+INSERT INTO schema_migrations (version) VALUES ('20130203104818');
+
+INSERT INTO schema_migrations (version) VALUES ('20130203104824');
+
+INSERT INTO schema_migrations (version) VALUES ('20130203115329');
+
+INSERT INTO schema_migrations (version) VALUES ('20130207195855');
+
+INSERT INTO schema_migrations (version) VALUES ('20130218181504');
+
+INSERT INTO schema_migrations (version) VALUES ('20130226170000');
+
+INSERT INTO schema_migrations (version) VALUES ('20130313175417');
+
+INSERT INTO schema_migrations (version) VALUES ('20130315155820');
+
+INSERT INTO schema_migrations (version) VALUES ('20130315183626');
+
+INSERT INTO schema_migrations (version) VALUES ('20130315213205');
+
+INSERT INTO schema_migrations (version) VALUES ('20130318002138');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319165853');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319180730');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319194637');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319201431');
+
+INSERT INTO schema_migrations (version) VALUES ('20130319235957');
+
+INSERT INTO schema_migrations (version) VALUES ('20130320000107');
+
+INSERT INTO schema_migrations (version) VALUES ('20130326173804');
+
+INSERT INTO schema_migrations (version) VALUES ('20130326182917');
+
+INSERT INTO schema_migrations (version) VALUES ('20130415020241');
+
+INSERT INTO schema_migrations (version) VALUES ('20130425024459');
+
+INSERT INTO schema_migrations (version) VALUES ('20130425214427');
+
+INSERT INTO schema_migrations (version) VALUES ('20130523060112');
+
+INSERT INTO schema_migrations (version) VALUES ('20130523060213');
+
+INSERT INTO schema_migrations (version) VALUES ('20130524042319');
+
+INSERT INTO schema_migrations (version) VALUES ('20130528134100');
+
+INSERT INTO schema_migrations (version) VALUES ('20130606183519');
+
+INSERT INTO schema_migrations (version) VALUES ('20130608053730');
+
+INSERT INTO schema_migrations (version) VALUES ('20130610202538');
+
+INSERT INTO schema_migrations (version) VALUES ('20130611163736');
+
+INSERT INTO schema_migrations (version) VALUES ('20130612042554');
+
+INSERT INTO schema_migrations (version) VALUES ('20130617150007');
+
+INSERT INTO schema_migrations (version) VALUES ('20130626002829');
+
+INSERT INTO schema_migrations (version) VALUES ('20130626022810');
+
+INSERT INTO schema_migrations (version) VALUES ('20130627154537');
+
+INSERT INTO schema_migrations (version) VALUES ('20130627184333');
+
+INSERT INTO schema_migrations (version) VALUES ('20130708163414');
+
+INSERT INTO schema_migrations (version) VALUES ('20130708182912');
+
+INSERT INTO schema_migrations (version) VALUES ('20130708185153');
+
+INSERT INTO schema_migrations (version) VALUES ('20130724153034');
+
+INSERT INTO schema_migrations (version) VALUES ('20131007180607');
+
+INSERT INTO schema_migrations (version) VALUES ('20140117231056');
+
+INSERT INTO schema_migrations (version) VALUES ('20140124222114');
+
+INSERT INTO schema_migrations (version) VALUES ('20140129184311');
+
+INSERT INTO schema_migrations (version) VALUES ('20140317135600');
+
+INSERT INTO schema_migrations (version) VALUES ('20140319160547');
+
+INSERT INTO schema_migrations (version) VALUES ('20140321191343');
+
+INSERT INTO schema_migrations (version) VALUES ('20140324024606');
+
+INSERT INTO schema_migrations (version) VALUES ('20140325175653');
+
+INSERT INTO schema_migrations (version) VALUES ('20140402001908');
+
+INSERT INTO schema_migrations (version) VALUES ('20140407184311');
+
+INSERT INTO schema_migrations (version) VALUES ('20140421140924');
+
+INSERT INTO schema_migrations (version) VALUES ('20140421151939');
+
+INSERT INTO schema_migrations (version) VALUES ('20140421151940');
+
+INSERT INTO schema_migrations (version) VALUES ('20140422011506');
+
+INSERT INTO schema_migrations (version) VALUES ('20140423132913');
+
+INSERT INTO schema_migrations (version) VALUES ('20140423133559');
+
+INSERT INTO schema_migrations (version) VALUES ('20140501165548');
+
+INSERT INTO schema_migrations (version) VALUES ('20140519205916');
+
+INSERT INTO schema_migrations (version) VALUES ('20140527152921');
+
+INSERT INTO schema_migrations (version) VALUES ('20140530200539');
+
+INSERT INTO schema_migrations (version) VALUES ('20140601022548');
+
+INSERT INTO schema_migrations (version) VALUES ('20140602143352');
+
+INSERT INTO schema_migrations (version) VALUES ('20140607150616');
+
+INSERT INTO schema_migrations (version) VALUES ('20140611173003');
+
+INSERT INTO schema_migrations (version) VALUES ('20140627210837');
+
+INSERT INTO schema_migrations (version) VALUES ('20140709172343');
+
+INSERT INTO schema_migrations (version) VALUES ('20140714184006');
+
+INSERT INTO schema_migrations (version) VALUES ('20140811184643');
+
+INSERT INTO schema_migrations (version) VALUES ('20140817035914');
+
+INSERT INTO schema_migrations (version) VALUES ('20140818125735');
+
+INSERT INTO schema_migrations (version) VALUES ('20140826180337');
+
+INSERT INTO schema_migrations (version) VALUES ('20140828141043');
+
+INSERT INTO schema_migrations (version) VALUES ('20140909183946');
+
+INSERT INTO schema_migrations (version) VALUES ('20140911221252');
+
+INSERT INTO schema_migrations (version) VALUES ('20140918141529');
+
+INSERT INTO schema_migrations (version) VALUES ('20140918153541');
+
+INSERT INTO schema_migrations (version) VALUES ('20140918153705');
+
+INSERT INTO schema_migrations (version) VALUES ('20140924091559');
+
+INSERT INTO schema_migrations (version) VALUES ('20141111133038');
+
+INSERT INTO schema_migrations (version) VALUES ('20141208164553');
\ No newline at end of file
diff --git a/services/api/lib/assets/.gitkeep b/services/api/lib/assets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/lib/can_be_an_owner.rb b/services/api/lib/can_be_an_owner.rb
new file mode 100644 (file)
index 0000000..16a8783
--- /dev/null
@@ -0,0 +1,47 @@
+# Protect referential integrity of owner_uuid columns in other tables
+# that can refer to the uuid column in this table.
+
+module CanBeAnOwner
+
+  def self.included(base)
+    # Rails' "has_many" can prevent us from destroying the owner
+    # record when other objects refer to it.
+    ActiveRecord::Base.connection.tables.each do |t|
+      next if t == base.table_name
+      next if t == 'schema_migrations'
+      klass = t.classify.constantize
+      next unless klass and 'owner_uuid'.in?(klass.columns.collect(&:name))
+      base.has_many(t.to_sym,
+                    foreign_key: :owner_uuid,
+                    primary_key: :uuid,
+                    dependent: :restrict)
+    end
+    # We need custom protection for changing an owner's primary
+    # key. (Apart from this restriction, admins are allowed to change
+    # UUIDs.)
+    base.validate :restrict_uuid_change_breaking_associations
+  end
+
+  protected
+
+  def restrict_uuid_change_breaking_associations
+    return true if new_record? or not uuid_changed?
+
+    # Check for objects that have my old uuid listed as their owner.
+    self.class.reflect_on_all_associations(:has_many).each do |assoc|
+      next unless assoc.foreign_key == :owner_uuid
+      if assoc.klass.where(owner_uuid: uuid_was).any?
+        errors.add(:uuid,
+                   "cannot be changed on a #{self.class} that owns objects")
+        return false
+      end
+    end
+
+    # if I owned myself before, I'll just continue to own myself with
+    # my new uuid.
+    if owner_uuid == uuid_was
+      self.owner_uuid = uuid
+    end
+  end
+
+end
diff --git a/services/api/lib/common_api_template.rb b/services/api/lib/common_api_template.rb
new file mode 100644 (file)
index 0000000..67c7f8c
--- /dev/null
@@ -0,0 +1,39 @@
+module CommonApiTemplate
+  def self.included(base)
+    base.acts_as_api
+    base.class_eval do
+      alias_method :as_api_response_orig, :as_api_response
+      include InstanceMethods
+    end
+    base.extend(ClassMethods)
+    base.api_accessible :common do |t|
+      t.add :href
+      t.add :kind
+      t.add :etag
+      t.add :uuid
+      t.add :owner_uuid
+      t.add :created_at
+      t.add :modified_by_client_uuid
+      t.add :modified_by_user_uuid
+      t.add :modified_at
+    end
+  end
+
+  module InstanceMethods
+    # choose template based on opts[:for_user]
+    def as_api_response(template=nil, opts={})
+      if template.nil?
+        user = opts[:for_user] || current_user
+        if user.andand.is_admin and self.respond_to? :api_accessible_superuser
+          template = :superuser
+        else
+          template = :user
+        end
+      end
+      self.as_api_response_orig(template, opts)
+    end
+  end
+
+  module ClassMethods
+  end
+end
diff --git a/services/api/lib/current_api_client.rb b/services/api/lib/current_api_client.rb
new file mode 100644 (file)
index 0000000..9f78587
--- /dev/null
@@ -0,0 +1,205 @@
+module CurrentApiClient
+  def current_user
+    Thread.current[:user]
+  end
+
+  def current_api_client
+    Thread.current[:api_client]
+  end
+
+  def current_api_client_authorization
+    Thread.current[:api_client_authorization]
+  end
+
+  def current_api_base
+    Thread.current[:api_url_base]
+  end
+
+  def current_default_owner
+    # owner_uuid for newly created objects
+    ((current_api_client_authorization &&
+      current_api_client_authorization.default_owner_uuid) ||
+     (current_user && current_user.default_owner_uuid) ||
+     (current_user && current_user.uuid) ||
+     nil)
+  end
+
+  # Where is the client connecting from?
+  def current_api_client_ip_address
+    Thread.current[:api_client_ip_address]
+  end
+
+  def system_user_uuid
+    [Server::Application.config.uuid_prefix,
+     User.uuid_prefix,
+     '000000000000000'].join('-')
+  end
+
+  def system_group_uuid
+    [Server::Application.config.uuid_prefix,
+     Group.uuid_prefix,
+     '000000000000000'].join('-')
+  end
+
+  def anonymous_group_uuid
+    [Server::Application.config.uuid_prefix,
+     Group.uuid_prefix,
+     'anonymouspublic'].join('-')
+  end
+
+  def anonymous_user_uuid
+    [Server::Application.config.uuid_prefix,
+     User.uuid_prefix,
+     'anonymouspublic'].join('-')
+  end
+
+  def system_user
+    if not $system_user
+      real_current_user = Thread.current[:user]
+      Thread.current[:user] = User.new(is_admin: true,
+                                       is_active: true,
+                                       uuid: system_user_uuid)
+      $system_user = User.where('uuid=?', system_user_uuid).first
+      if !$system_user
+        $system_user = User.new(uuid: system_user_uuid,
+                                is_active: true,
+                                is_admin: true,
+                                email: 'root',
+                                first_name: 'root',
+                                last_name: '')
+        $system_user.save!
+        $system_user.reload
+      end
+      Thread.current[:user] = real_current_user
+    end
+    $system_user
+  end
+
+  def system_group
+    if not $system_group
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          $system_group = Group.
+            where(uuid: system_group_uuid).first_or_create do |g|
+            g.update_attributes(name: "System group",
+                                description: "System group")
+            User.all.collect(&:uuid).each do |user_uuid|
+              Link.create(link_class: 'permission',
+                          name: 'can_manage',
+                          tail_kind: 'arvados#group',
+                          tail_uuid: system_group_uuid,
+                          head_kind: 'arvados#user',
+                          head_uuid: user_uuid)
+            end
+          end
+        end
+      end
+    end
+    $system_group
+  end
+
+  def all_users_group_uuid
+    [Server::Application.config.uuid_prefix,
+     Group.uuid_prefix,
+     'fffffffffffffff'].join('-')
+  end
+
+  def all_users_group
+    if not $all_users_group
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          $all_users_group = Group.
+            where(uuid: all_users_group_uuid).first_or_create do |g|
+            g.update_attributes(name: "All users",
+                                description: "All users",
+                                group_class: "role")
+          end
+        end
+      end
+    end
+    $all_users_group
+  end
+
+  def act_as_system_user
+    if block_given?
+      act_as_user system_user do
+        yield
+      end
+    else
+      Thread.current[:user] = system_user
+    end
+  end
+
+  def act_as_user user
+    user_was = Thread.current[:user]
+    Thread.current[:user] = user
+    begin
+      yield
+    ensure
+      Thread.current[:user] = user_was
+    end
+  end
+
+  def anonymous_group
+    if not $anonymous_group
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          $anonymous_group = Group.
+          where(uuid: anonymous_group_uuid).first_or_create do |g|
+            g.update_attributes(name: "Anonymous group",
+                                description: "Anonymous group")
+          end
+        end
+      end
+    end
+    $anonymous_group
+  end
+
+  def anonymous_user
+    if not $anonymous_user
+      act_as_system_user do
+        $anonymous_user = User.where('uuid=?', anonymous_user_uuid).first
+        if !$anonymous_user
+          $anonymous_user = User.new(uuid: anonymous_user_uuid,
+                                     is_active: false,
+                                     is_admin: false,
+                                     email: 'anonymouspublic',
+                                     first_name: 'anonymouspublic',
+                                     last_name: 'anonymouspublic')
+          $anonymous_user.save!
+          $anonymous_user.reload
+        end
+
+        group_perms = Link.where(tail_uuid: anonymous_user_uuid,
+                                 head_uuid: anonymous_group_uuid,
+                                 link_class: 'permission',
+                                 name: 'can_read')
+
+        if !group_perms.any?
+          group_perm = Link.create!(tail_uuid: anonymous_user_uuid,
+                                    head_uuid: anonymous_group_uuid,
+                                    link_class: 'permission',
+                                    name: 'can_read')
+        end
+      end
+    end
+    $anonymous_user
+  end
+
+  def empty_collection_uuid
+    'd41d8cd98f00b204e9800998ecf8427e+0'
+  end
+
+  def empty_collection
+    if not $empty_collection
+      act_as_system_user do
+        ActiveRecord::Base.transaction do
+          $empty_collection = Collection.
+            where(portable_data_hash: empty_collection_uuid).
+            first_or_create!(manifest_text: '', owner_uuid: anonymous_group.uuid)
+        end
+      end
+    end
+    $empty_collection
+  end
+end
diff --git a/services/api/lib/eventbus.rb b/services/api/lib/eventbus.rb
new file mode 100644 (file)
index 0000000..35671d6
--- /dev/null
@@ -0,0 +1,283 @@
+# If any threads raise an unhandled exception, make them all die.
+# We trust a supervisor like runit to restart the server in this case.
+Thread.abort_on_exception = true
+
+require 'eventmachine'
+require 'oj'
+require 'faye/websocket'
+require 'record_filters'
+require 'load_param'
+
+# Patch in user, last_log_id and filters fields into the Faye::Websocket class.
+module Faye
+  class WebSocket
+    attr_accessor :user
+    attr_accessor :last_log_id
+    attr_accessor :filters
+  end
+end
+
+# Store the filters supplied by the user that will be applied to the logs table
+# to determine which events to return to the listener.
+class Filter
+  include LoadParam
+
+  attr_accessor :filters
+
+  def initialize p
+    @params = p
+    load_filters_param
+  end
+
+  def params
+    @params
+  end
+end
+
+# Manages websocket connections, accepts subscription messages and publishes
+# log table events.
+class EventBus
+  include CurrentApiClient
+  include RecordFilters
+
+  # used in RecordFilters
+  def model_class
+    Log
+  end
+
+  # Initialize EventBus.  Takes no parameters.
+  def initialize
+    @channel = EventMachine::Channel.new
+    @mtx = Mutex.new
+    @bgthread = false
+  end
+
+  # Push out any pending events to the connection +ws+
+  # +notify_id+  the id of the most recent row in the log table, may be nil
+  #
+  # This accepts a websocket and a notify_id (this is the row id from Postgres
+  # LISTEN/NOTIFY, it may be nil if called from somewhere else)
+  #
+  # It queries the database for log rows that are either
+  #  a) greater than ws.last_log_id, which is the last log id which was a candidate to be sent out
+  #  b) if ws.last_log_id is nil, then it queries rows starting with notify_id
+  #
+  # Regular Arvados permissions are applied using readable_by() and filters using record_filters()
+  # To avoid clogging up the database, queries are limited to batches of 100.  It will schedule a new
+  # push_events call if there are more log rows to send.
+  def push_events ws, notify_id
+    begin
+      if !notify_id.nil? and !ws.last_log_id.nil? and notify_id <= ws.last_log_id
+        # This notify is for a row we've handled already.
+        return
+      end
+
+      # Must have at least one filter set up to receive events
+      if ws.filters.length > 0
+        # Start with log rows readable by user, sorted in ascending order
+        logs = Log.readable_by(ws.user).order("id asc")
+
+        cond_id = nil
+        cond_out = []
+        param_out = []
+
+        if !ws.last_log_id.nil?
+          # Client is only interested in log rows that are newer than the
+          # last log row seen by the client.
+          cond_id = "logs.id > ?"
+          param_out << ws.last_log_id
+        elsif !notify_id.nil?
+          # No last log id, so look at rows starting with notify id
+          cond_id = "logs.id >= ?"
+          param_out << notify_id
+        else
+          # No log id to start from, nothing to do, return
+          return
+        end
+
+        # Now build filters provided by client
+        ws.filters.each do |filter|
+          ft = record_filters filter.filters, Log
+          if ft[:cond_out].any?
+            # Join the clauses within a single subscription filter with AND
+            # so it is consistent with regular queries
+            cond_out << "(#{ft[:cond_out].join ') AND ('})"
+            param_out += ft[:param_out]
+          end
+        end
+
+        # Add filters to query
+        if cond_out.any?
+          # Join subscriptions with OR
+          logs = logs.where(cond_id + " AND ((#{cond_out.join ') OR ('}))", *param_out)
+        else
+          logs = logs.where(cond_id, *param_out)
+        end
+
+        # Execute query and actually send the matching log rows
+        count = 0
+        limit = 20
+
+        logs.limit(limit).each do |l|
+          ws.send(l.as_api_response.to_json)
+          ws.last_log_id = l.id
+          count += 1
+        end
+
+        if count == limit
+          # Number of rows returned was capped by limit(), we need to schedule
+          # another query to get more logs (will start from last_log_id
+          # reported by current query)
+          EventMachine::schedule do
+            push_events ws, nil
+          end
+        elsif !notify_id.nil? and (ws.last_log_id.nil? or notify_id > ws.last_log_id)
+          # Number of rows returned was less than cap, but the notify id is
+          # higher than the last id visible to the client, so update last_log_id
+          ws.last_log_id = notify_id
+        end
+      elsif !notify_id.nil?
+        # No filters set up, so just record the sequence number
+        ws.last_log_id = notify_id
+      end
+    rescue => e
+      Rails.logger.warn "Error publishing event: #{$!}"
+      Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+      ws.send ({status: 500, message: 'error'}.to_json)
+      ws.close
+      # These exceptions typically indicate serious server trouble:
+      # out of memory issues, database connection problems, etc.  Go ahead and
+      # crash; we expect that a supervisor service like runit will restart us.
+      raise
+    end
+  end
+
+  # Handle inbound subscribe or unsubscribe message.
+  def handle_message ws, event
+    begin
+      begin
+        # Parse event data as JSON
+        p = (Oj.load event.data).symbolize_keys
+        filter = Filter.new(p)
+      rescue Oj::Error => e
+        ws.send ({status: 400, message: "malformed request"}.to_json)
+        return
+      end
+
+      if p[:method] == 'subscribe'
+        # Handle subscribe event
+
+        if p[:last_log_id]
+          # Set or reset the last_log_id.  The event bus only reports events
+          # for rows that come after last_log_id.
+          ws.last_log_id = p[:last_log_id].to_i
+        end
+
+        if ws.filters.length < MAX_FILTERS
+          # Add a filter.  This gets the :filters field which is the same
+          # format as used for regular index queries.
+          ws.filters << filter
+          ws.send ({status: 200, message: 'subscribe ok', filter: p}.to_json)
+
+          # Send any pending events
+          push_events ws, nil
+        else
+          ws.send ({status: 403, message: "maximum of #{MAX_FILTERS} filters allowed per connection"}.to_json)
+        end
+
+      elsif p[:method] == 'unsubscribe'
+        # Handle unsubscribe event
+
+        len = ws.filters.length
+        ws.filters.select! { |f| not ((f.filters == p[:filters]) or (f.filters.empty? and p[:filters].nil?)) }
+        if ws.filters.length < len
+          ws.send ({status: 200, message: 'unsubscribe ok'}.to_json)
+        else
+          ws.send ({status: 404, message: 'filter not found'}.to_json)
+        end
+
+      else
+        ws.send ({status: 400, message: "missing or unrecognized method"}.to_json)
+      end
+    rescue => e
+      Rails.logger.warn "Error handling message: #{$!}"
+      Rails.logger.warn "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+      ws.send ({status: 500, message: 'error'}.to_json)
+      ws.close
+    end
+  end
+
+  # Constant maximum number of filters, to avoid silly huge database queries.
+  MAX_FILTERS = 16
+
+  # Called by RackSocket when a new websocket connection has been established.
+  def on_connect ws
+
+    # Disconnect if no valid API token.
+    # current_user is included from CurrentApiClient
+    if not current_user
+      ws.send ({status: 401, message: "Valid API token required"}.to_json)
+      ws.close
+      return
+    end
+
+    # Initialize our custom fields on the websocket connection object.
+    ws.user = current_user
+    ws.filters = []
+    ws.last_log_id = nil
+
+    # Subscribe to internal postgres notifications through @channel.  This will
+    # call push_events when a notification comes through.
+    sub = @channel.subscribe do |msg|
+      push_events ws, msg
+    end
+
+    # Set up callback for inbound message dispatch.
+    ws.on :message do |event|
+      handle_message ws, event
+    end
+
+    # Set up socket close callback
+    ws.on :close do |event|
+      @channel.unsubscribe sub
+      ws = nil
+    end
+
+    # Start up thread to monitor the Postgres database, if none exists already.
+    @mtx.synchronize do
+      unless @bgthread
+        @bgthread = true
+        Thread.new do
+          # from http://stackoverflow.com/questions/16405520/postgres-listen-notify-rails
+          ActiveRecord::Base.connection_pool.with_connection do |connection|
+            conn = connection.instance_variable_get(:@connection)
+            begin
+              conn.async_exec "LISTEN logs"
+              while true
+                # wait_for_notify will block until there is a change
+                # notification from Postgres about the logs table, then push
+                # the notification into the EventMachine channel.  Each
+                # websocket connection subscribes to the other end of the
+                # channel and calls #push_events to actually dispatch the
+                # events to the client.
+                conn.wait_for_notify do |channel, pid, payload|
+                  @channel.push payload.to_i
+                end
+              end
+            ensure
+              # Don't want the connection to still be listening once we return
+              # it to the pool - could result in weird behavior for the next
+              # thread to check it out.
+              conn.async_exec "UNLISTEN *"
+            end
+          end
+          @bgthread = false
+        end
+      end
+    end
+
+    # Since EventMachine is an asynchronous event based dispatcher, #on_connect
+    # does not block but instead returns immediately after having set up the
+    # websocket and notification channel callbacks.
+  end
+end
diff --git a/services/api/lib/has_uuid.rb b/services/api/lib/has_uuid.rb
new file mode 100644 (file)
index 0000000..e0a5613
--- /dev/null
@@ -0,0 +1,72 @@
+module HasUuid
+
+  UUID_REGEX = /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/
+
+  def self.included(base)
+    base.extend(ClassMethods)
+    base.validate :validate_uuid
+    base.before_create :assign_uuid
+    base.before_destroy :destroy_permission_links
+    base.has_many :links_via_head, class_name: 'Link', foreign_key: :head_uuid, primary_key: :uuid, conditions: "not (link_class = 'permission')", dependent: :restrict
+    base.has_many :links_via_tail, class_name: 'Link', foreign_key: :tail_uuid, primary_key: :uuid, conditions: "not (link_class = 'permission')", dependent: :restrict
+  end
+
+  module ClassMethods
+    def uuid_prefix
+      Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
+    end
+    def generate_uuid
+      [Server::Application.config.uuid_prefix,
+       self.uuid_prefix,
+       rand(2**256).to_s(36)[-15..-1]].
+        join '-'
+    end
+  end
+
+  protected
+
+  def respond_to_uuid?
+    self.respond_to? :uuid
+  end
+
+  def validate_uuid
+    if self.respond_to_uuid? and self.uuid_changed?
+      if current_user.andand.is_admin and self.uuid.is_a?(String)
+        if (re = self.uuid.match HasUuid::UUID_REGEX)
+          if re[1] == self.class.uuid_prefix
+            return true
+          else
+            self.errors.add(:uuid, "type field is '#{re[1]}', expected '#{self.class.uuid_prefix}'")
+            return false
+          end
+        else
+          self.errors.add(:uuid, "not a valid Arvados uuid '#{self.uuid}'")
+          return false
+        end
+      else
+        if self.new_record?
+          self.errors.add(:uuid, "assignment not permitted")
+        else
+          self.errors.add(:uuid, "change not permitted")
+        end
+        return false
+      end
+    else
+      return true
+    end
+  end
+
+  def assign_uuid
+    if self.respond_to_uuid? and self.uuid.nil? or self.uuid.empty?
+      self.uuid = self.class.generate_uuid
+    end
+    true
+  end
+
+  def destroy_permission_links
+    if uuid
+      Link.destroy_all(['link_class=? and (head_uuid=? or tail_uuid=?)',
+                        'permission', uuid, uuid])
+    end
+  end
+end
diff --git a/services/api/lib/josh_id.rb b/services/api/lib/josh_id.rb
new file mode 100644 (file)
index 0000000..a63b251
--- /dev/null
@@ -0,0 +1,53 @@
+require 'omniauth-oauth2'
+module OmniAuth
+  module Strategies
+    class JoshId < OmniAuth::Strategies::OAuth2
+
+      args [:client_id, :client_secret, :custom_provider_url]
+
+      option :custom_provider_url, ''
+
+      uid { raw_info['id'] }
+
+      option :client_options, {}
+
+      info do
+        {
+          :first_name => raw_info['info']['first_name'],
+          :last_name => raw_info['info']['last_name'],
+          :email => raw_info['info']['email'],
+          :identity_url => raw_info['info']['identity_url'],
+        }
+      end
+
+      extra do
+        {
+          'raw_info' => raw_info
+        }
+      end
+
+      def authorize_params
+        options.authorize_params[:auth_provider] = request.params['auth_provider']
+        super
+      end
+
+      def client
+        options.client_options[:site] = options[:custom_provider_url]
+        options.client_options[:authorize_url] = "#{options[:custom_provider_url]}/auth/josh_id/authorize"
+        options.client_options[:access_token_url] = "#{options[:custom_provider_url]}/auth/josh_id/access_token"
+        if Rails.configuration.sso_insecure
+          options.client_options[:ssl] = {verify_mode: OpenSSL::SSL::VERIFY_NONE}
+        end
+        ::OAuth2::Client.new(options.client_id, options.client_secret, deep_symbolize(options.client_options))
+      end
+
+      def callback_url
+        full_host + script_name + callback_path + "?return_to=" + CGI.escape(request.params['return_to'])
+      end
+
+      def raw_info
+        @raw_info ||= access_token.get("/auth/josh_id/user.json?oauth_token=#{access_token.token}").parsed
+      end
+    end
+  end
+end
diff --git a/services/api/lib/kind_and_etag.rb b/services/api/lib/kind_and_etag.rb
new file mode 100644 (file)
index 0000000..89c01ef
--- /dev/null
@@ -0,0 +1,20 @@
+module KindAndEtag
+
+  def self.included(base)
+    base.extend(ClassMethods)
+  end
+
+  module ClassMethods
+    def kind
+      'arvados#' + self.to_s.camelcase(:lower)
+    end
+  end
+
+  def kind
+    self.class.kind
+  end
+
+  def etag
+    Digest::MD5.hexdigest(self.inspect).to_i(16).to_s(36)
+  end
+end
diff --git a/services/api/lib/load_param.rb b/services/api/lib/load_param.rb
new file mode 100644 (file)
index 0000000..3f1a3b2
--- /dev/null
@@ -0,0 +1,145 @@
+# Mixin module for reading out query parameters from request params.
+#
+# Expects:
+#   +params+ Hash
+# Sets:
+#   @where, @filters, @limit, @offset, @orders
+module LoadParam
+
+  # Default number of rows to return in a single query.
+  DEFAULT_LIMIT = 100
+
+  # Maximum number of rows to return in a single query, even if the client asks for more.
+  MAX_LIMIT = 1000
+
+  # Load params[:where] into @where
+  def load_where_param
+    if params[:where].nil? or params[:where] == ""
+      @where = {}
+    elsif params[:where].is_a? Hash
+      @where = params[:where]
+    elsif params[:where].is_a? String
+      begin
+        @where = Oj.load(params[:where])
+        raise unless @where.is_a? Hash
+      rescue
+        raise ArgumentError.new("Could not parse \"where\" param as an object")
+      end
+    end
+    @where = @where.with_indifferent_access
+  end
+
+  # Load params[:filters] into @filters
+  def load_filters_param
+    @filters ||= []
+    if params[:filters].is_a? Array
+      @filters += params[:filters]
+    elsif params[:filters].is_a? String and !params[:filters].empty?
+      begin
+        f = Oj.load params[:filters]
+        if not f.nil?
+          raise unless f.is_a? Array
+          @filters += f
+        end
+      rescue
+        raise ArgumentError.new("Could not parse \"filters\" param as an array")
+      end
+    end
+  end
+
+  def default_orders
+    ["#{table_name}.modified_at desc"]
+  end
+
+  # Load params[:limit], params[:offset] and params[:order]
+  # into @limit, @offset, @orders
+  def load_limit_offset_order_params
+    if params[:limit]
+      unless params[:limit].to_s.match(/^\d+$/)
+        raise ArgumentError.new("Invalid value for limit parameter")
+      end
+      @limit = [params[:limit].to_i, MAX_LIMIT].min
+    else
+      @limit = DEFAULT_LIMIT
+    end
+
+    if params[:offset]
+      unless params[:offset].to_s.match(/^\d+$/)
+        raise ArgumentError.new("Invalid value for offset parameter")
+      end
+      @offset = params[:offset].to_i
+    else
+      @offset = 0
+    end
+
+    @orders = []
+    if (params[:order].is_a?(Array) && !params[:order].empty?) || !params[:order].blank?
+      od = []
+      (case params[:order]
+       when String
+         if params[:order].starts_with? '['
+           od = Oj.load(params[:order])
+           raise unless od.is_a? Array
+           od
+         else
+           params[:order].split(',')
+         end
+       when Array
+         params[:order]
+       else
+         []
+       end).each do |order|
+        order = order.to_s
+        attr, direction = order.strip.split " "
+        direction ||= 'asc'
+        # The attr can have its table unspecified if it happens to be for the current "model_class" (the first case)
+        # or it can be fully specified with the database tablename (the second case) (e.g. "collections.name").
+        # NB that the security check for the second case table_name will not work if the model
+        # has used set_table_name to use an alternate table name from the Rails standard.
+        # I could not find a perfect way to handle this well, but ActiveRecord::Base.send(:descendants)
+        # would be a place to start if this ever becomes necessary.
+        if attr.match /^[a-z][_a-z0-9]+$/ and
+            model_class.columns.collect(&:name).index(attr) and
+            ['asc','desc'].index direction.downcase
+          @orders << "#{table_name}.#{attr} #{direction.downcase}"
+        elsif attr.match /^([a-z][_a-z0-9]+)\.([a-z][_a-z0-9]+)$/ and
+            ['asc','desc'].index(direction.downcase) and
+            ActiveRecord::Base.connection.tables.include?($1) and
+            $1.classify.constantize.columns.collect(&:name).index($2)
+          # $1 in the above checks references the first match from the regular expression, which is expected to be the database table name
+          # $2 is of course the actual database column name
+          @orders << "#{attr} #{direction.downcase}"
+        end
+      end
+    end
+
+    if @orders.empty?
+      @orders = default_orders
+    end
+
+    case params[:select]
+    when Array
+      @select = params[:select]
+    when String
+      begin
+        @select = Oj.load params[:select]
+        raise unless @select.is_a? Array or @select.nil?
+      rescue
+        raise ArgumentError.new("Could not parse \"select\" param as an array")
+      end
+    end
+
+    if @select
+      # Any ordering columns must be selected when doing select,
+      # otherwise it is an SQL error, so filter out invaliding orderings.
+      @orders.select! { |o|
+        # match select column against order array entry
+        @select.select { |s| /^#{table_name}.#{s}( (asc|desc))?$/.match o }.any?
+      }
+    end
+
+    @distinct = true if (params[:distinct] == true || params[:distinct] == "true")
+    @distinct = false if (params[:distinct] == false || params[:distinct] == "false")
+  end
+
+end
diff --git a/services/api/lib/record_filters.rb b/services/api/lib/record_filters.rb
new file mode 100644 (file)
index 0000000..9408dcf
--- /dev/null
@@ -0,0 +1,114 @@
+# Mixin module providing a method to convert filters into a list of SQL
+# fragments suitable to be fed to ActiveRecord #where.
+#
+# Expects:
+#   model_class
+# Operates on:
+#   @objects
+module RecordFilters
+
+  # Input:
+  # +filters+        array of conditions, each being [column, operator, operand]
+  # +model_class+    subclass of ActiveRecord being filtered
+  #
+  # Output:
+  # Hash with two keys:
+  # :cond_out  array of SQL fragments for each filter expression
+  # :param_out  array of values for parameter substitution in cond_out
+  def record_filters filters, model_class
+    conds_out = []
+    param_out = []
+
+    ar_table_name = model_class.table_name
+    filters.each do |filter|
+      attrs_in, operator, operand = filter
+      if attrs_in == 'any'
+        attrs = model_class.searchable_columns(operator)
+      elsif attrs_in.is_a? Array
+        attrs = attrs_in
+      else
+        attrs = [attrs_in]
+      end
+      if !filter.is_a? Array
+        raise ArgumentError.new("Invalid element in filters array: #{filter.inspect} is not an array")
+      elsif !operator.is_a? String
+        raise ArgumentError.new("Invalid operator '#{operator}' (#{operator.class}) in filter")
+      end
+      cond_out = []
+      attrs.each do |attr|
+        if !model_class.searchable_columns(operator).index attr.to_s
+          raise ArgumentError.new("Invalid attribute '#{attr}' in filter")
+        end
+        case operator.downcase
+        when '=', '<', '<=', '>', '>=', '!=', 'like', 'ilike'
+          attr_type = model_class.attribute_column(attr).type
+          operator = '<>' if operator == '!='
+          if operand.is_a? String
+            if attr_type == :boolean
+              if not ['=', '<>'].include?(operator)
+                raise ArgumentError.new("Invalid operator '#{operator}' for " \
+                                        "boolean attribute '#{attr}'")
+              end
+              case operand.downcase
+              when '1', 't', 'true', 'y', 'yes'
+                operand = true
+              when '0', 'f', 'false', 'n', 'no'
+                operand = false
+              else
+                raise ArgumentError("Invalid operand '#{operand}' for " \
+                                    "boolean attribute '#{attr}'")
+              end
+            end
+            cond_out << "#{ar_table_name}.#{attr} #{operator} ?"
+            if (# any operator that operates on value rather than
+                # representation:
+                operator.match(/[<=>]/) and (attr_type == :datetime))
+              operand = Time.parse operand
+            end
+            param_out << operand
+          elsif operand.nil? and operator == '='
+            cond_out << "#{ar_table_name}.#{attr} is null"
+          elsif operand.nil? and operator == '<>'
+            cond_out << "#{ar_table_name}.#{attr} is not null"
+          elsif (attr_type == :boolean) and ['=', '<>'].include?(operator) and
+              [true, false].include?(operand)
+            cond_out << "#{ar_table_name}.#{attr} #{operator} ?"
+            param_out << operand
+          else
+            raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+                                    "for '#{operator}' operator in filters")
+          end
+        when 'in', 'not in'
+          if operand.is_a? Array
+            cond_out << "#{ar_table_name}.#{attr} #{operator} (?)"
+            param_out << operand
+            if operator == 'not in' and not operand.include?(nil)
+              # explicitly allow NULL
+              cond_out[-1] = "(#{cond_out[-1]} OR #{ar_table_name}.#{attr} IS NULL)"
+            end
+          else
+            raise ArgumentError.new("Invalid operand type '#{operand.class}' "\
+                                    "for '#{operator}' operator in filters")
+          end
+        when 'is_a'
+          operand = [operand] unless operand.is_a? Array
+          cond = []
+          operand.each do |op|
+            cl = ArvadosModel::kind_class op
+            if cl
+              cond << "#{ar_table_name}.#{attr} like ?"
+              param_out << cl.uuid_like_pattern
+            else
+              cond << "1=0"
+            end
+          end
+          cond_out << cond.join(' OR ')
+        end
+      end
+      conds_out << cond_out.join(' OR ') if cond_out.any?
+    end
+
+    {:cond_out => conds_out, :param_out => param_out}
+  end
+
+end
diff --git a/services/api/lib/simulate_job_log.rb b/services/api/lib/simulate_job_log.rb
new file mode 100644 (file)
index 0000000..fc124c8
--- /dev/null
@@ -0,0 +1,49 @@
+module SimulateJobLog
+  def replay(filename, multiplier = 1, simulated_job_uuid = nil)
+    raise "Environment must be development or test" unless [ 'test', 'development' ].include? ENV['RAILS_ENV']
+
+    multiplier = multiplier.to_f
+    multiplier = 1.0 if multiplier <= 0
+
+    actual_start_time = Time.now
+    log_start_time = nil
+
+    act_as_system_user do
+      File.open(filename).each.with_index do |line, index|
+        cols = {}
+        cols[:timestamp], rest_of_line = line.split(' ', 2)
+        begin
+          cols[:timestamp] = Time.strptime( cols[:timestamp], "%Y-%m-%d_%H:%M:%S" )
+        rescue ArgumentError
+          if line =~ /^((?:Sun|Mon|Tue|Wed|Thu|Fri|Sat) (?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) \d{1,2} \d\d:\d\d:\d\d \d{4}) (.*)/
+            # Wed Nov 19 07:12:39 2014
+            cols[:timestamp] = Time.strptime( $1, "%a %b %d %H:%M:%S %Y" )
+            rest_of_line = $2
+          else
+              STDERR.puts "Ignoring log line because of unknown time format: #{line}"
+          end
+        end
+        cols[:job_uuid], cols[:pid], cols[:task], cols[:event_type], cols[:message] = rest_of_line.split(' ', 5)
+        # Override job uuid with a simulated one if specified
+        cols[:job_uuid] = simulated_job_uuid || cols[:job_uuid]
+        # determine when we want to simulate this log being created, based on the time multiplier
+        log_start_time = cols[:timestamp] if log_start_time.nil?
+        log_time = cols[:timestamp]
+        actual_elapsed_time = Time.now - actual_start_time
+        log_elapsed_time = log_time - log_start_time
+        modified_elapsed_time = log_elapsed_time / multiplier
+        pause_time = modified_elapsed_time - actual_elapsed_time
+        sleep pause_time if pause_time > 0
+        # output log entry for debugging and create it in the current environment's database
+        puts "#{index} #{cols.to_yaml}\n"
+        Log.new({
+          event_at:    Time.zone.local_to_utc(cols[:timestamp]),
+          object_uuid: cols[:job_uuid],
+          event_type:  cols[:event_type],
+          properties:  { 'text' => line }
+        }).save!
+      end
+    end
+
+  end
+end
diff --git a/services/api/lib/tasks/.gitkeep b/services/api/lib/tasks/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/lib/tasks/config_check.rake b/services/api/lib/tasks/config_check.rake
new file mode 100644 (file)
index 0000000..1b38655
--- /dev/null
@@ -0,0 +1,23 @@
+namespace :config do
+  desc 'Ensure site configuration has all required settings'
+  task check: :environment do
+    $application_config.sort.each do |k, v|
+      if ENV.has_key?('QUIET') then
+        # Make sure we still check for the variable to exist
+        eval("Rails.configuration.#{k}")
+      else
+        if /(password|secret)/.match(k) then
+          # Make sure we still check for the variable to exist, but don't print the value
+          eval("Rails.configuration.#{k}")
+          $stderr.puts "%-32s %s" % [k, '*********']
+        else
+          $stderr.puts "%-32s %s" % [k, eval("Rails.configuration.#{k}")]
+        end
+      end
+    end
+    # default_trash_lifetime cannot be less than 24 hours
+    if Rails.configuration.default_trash_lifetime < 86400 then
+      raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime
+    end
+  end
+end
diff --git a/services/api/lib/tasks/replay_job_log.rake b/services/api/lib/tasks/replay_job_log.rake
new file mode 100644 (file)
index 0000000..14aa3be
--- /dev/null
@@ -0,0 +1,7 @@
+require 'simulate_job_log'
+desc 'Simulate job logging from a file. Three arguments: log filename, time multipler (optional), simulated job uuid (optional). E.g. (use quotation marks if using spaces between args): rake "replay_job_log[log.txt, 2.0, qr1hi-8i9sb-nf3qk0xzwwz3lre]"'
+task :replay_job_log, [:filename, :multiplier, :uuid] => :environment do |t, args|
+  include SimulateJobLog
+  abort("No filename specified.") if args[:filename].blank?
+  replay( args[:filename], args[:multiplier].to_f, args[:uuid] )
+end
diff --git a/services/api/log/.gitkeep b/services/api/log/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/public/404.html b/services/api/public/404.html
new file mode 100644 (file)
index 0000000..9a48320
--- /dev/null
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The page you were looking for doesn't exist (404)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/404.html -->
+  <div class="dialog">
+    <h1>The page you were looking for doesn't exist.</h1>
+    <p>You may have mistyped the address or the page may have moved.</p>
+  </div>
+</body>
+</html>
diff --git a/services/api/public/422.html b/services/api/public/422.html
new file mode 100644 (file)
index 0000000..83660ab
--- /dev/null
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>The change you wanted was rejected (422)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/422.html -->
+  <div class="dialog">
+    <h1>The change you wanted was rejected.</h1>
+    <p>Maybe you tried to change something you didn't have access to.</p>
+  </div>
+</body>
+</html>
diff --git a/services/api/public/500.html b/services/api/public/500.html
new file mode 100644 (file)
index 0000000..b80307f
--- /dev/null
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+<head>
+  <title>We're sorry, but something went wrong (500)</title>
+  <style type="text/css">
+    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }
+    div.dialog {
+      width: 25em;
+      padding: 0 4em;
+      margin: 4em auto 0 auto;
+      border: 1px solid #ccc;
+      border-right-color: #999;
+      border-bottom-color: #999;
+    }
+    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }
+  </style>
+</head>
+
+<body>
+  <!-- This file lives in public/500.html -->
+  <div class="dialog">
+    <h1>We're sorry, but something went wrong.</h1>
+    <p>We've been notified about this issue and we'll take a look at it shortly.</p>
+  </div>
+</body>
+</html>
diff --git a/services/api/public/favicon.ico b/services/api/public/favicon.ico
new file mode 100644 (file)
index 0000000..4c763b6
Binary files /dev/null and b/services/api/public/favicon.ico differ
diff --git a/services/api/public/robots.txt b/services/api/public/robots.txt
new file mode 100644 (file)
index 0000000..085187f
--- /dev/null
@@ -0,0 +1,5 @@
+# See http://www.robotstxt.org/wc/norobots.html for documentation on how to use the robots.txt file
+#
+# To ban all spiders from the entire site uncomment the next two lines:
+# User-Agent: *
+# Disallow: /
diff --git a/services/api/script/cancel_stale_jobs.rb b/services/api/script/cancel_stale_jobs.rb
new file mode 100755 (executable)
index 0000000..4949ec0
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/env ruby
+
+
+if ENV["CRUNCH_DISPATCH_LOCKFILE"]
+  lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
+  lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
+  unless lockfile.flock File::LOCK_EX|File::LOCK_NB
+    abort "Lock unavailable on #{lockfilename} - exit"
+  end
+end
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+
+class CancelJobs
+  include ApplicationHelper
+
+  def cancel_stale_jobs
+    act_as_system_user do
+      Job.running.each do |jobrecord|
+        f = Log.where("object_uuid=?", jobrecord.uuid).limit(1).order("created_at desc").first
+        if f
+          age = (Time.now - f.created_at)
+          if age > 300
+            $stderr.puts "dispatch: failing orphan job #{jobrecord.uuid}, last log is #{age} seconds old"
+            # job is marked running, but not known to crunch-dispatcher, and
+            # hasn't produced any log entries for 5 minutes, so mark it as failed.
+            jobrecord.running = false
+            jobrecord.cancelled_at ||= Time.now
+            jobrecord.finished_at ||= Time.now
+            if jobrecord.success.nil?
+              jobrecord.success = false
+            end
+            jobrecord.save!
+          end
+        end
+      end
+    end
+  end
+end
+
+CancelJobs.new.cancel_stale_jobs
diff --git a/services/api/script/create_superuser_token.rb b/services/api/script/create_superuser_token.rb
new file mode 100755 (executable)
index 0000000..d119f8a
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/env ruby
+
+# Install the supplied string (or a randomly generated token, if none
+# is given) as an API token that authenticates to the system user
+# account.
+#
+# Print the token on stdout.
+
+supplied_token = ARGV[0]
+
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+
+include ApplicationHelper
+act_as_system_user
+
+if supplied_token
+  api_client_auth = ApiClientAuthorization.
+    where(api_token: supplied_token).
+    first
+  if api_client_auth && !api_client_auth.user.uuid.match(/-000000000000000$/)
+    raise ActiveRecord::RecordNotUnique("Token already exists but is not a superuser token.")
+  end
+end
+
+if !api_client_auth
+  api_client_auth = ApiClientAuthorization.
+    new(user: system_user,
+        api_client_id: 0,
+        created_by_ip_address: '::1',
+        api_token: supplied_token)
+  api_client_auth.save!
+end
+
+puts api_client_auth.api_token
diff --git a/services/api/script/crunch-dispatch.rb b/services/api/script/crunch-dispatch.rb
new file mode 100755 (executable)
index 0000000..ab4f70e
--- /dev/null
@@ -0,0 +1,761 @@
+#!/usr/bin/env ruby
+
+require 'shellwords'
+include Process
+
+$options = {}
+(ARGV.any? ? ARGV : ['--jobs', '--pipelines']).each do |arg|
+  case arg
+  when '--jobs'
+    $options[:jobs] = true
+  when '--pipelines'
+    $options[:pipelines] = true
+  else
+    abort "Unrecognized command line option '#{arg}'"
+  end
+end
+if not ($options[:jobs] or $options[:pipelines])
+  abort "Nothing to do. Please specify at least one of: --jobs, --pipelines."
+end
+
+ARGV.reject! { |a| a =~ /--jobs|--pipelines/ }
+
+$warned = {}
+$signal = {}
+%w{TERM INT}.each do |sig|
+  signame = sig
+  Signal.trap(sig) do
+    $stderr.puts "Received #{signame} signal"
+    $signal[:term] = true
+  end
+end
+
+if ENV["CRUNCH_DISPATCH_LOCKFILE"]
+  lockfilename = ENV.delete "CRUNCH_DISPATCH_LOCKFILE"
+  lockfile = File.open(lockfilename, File::RDWR|File::CREAT, 0644)
+  unless lockfile.flock File::LOCK_EX|File::LOCK_NB
+    abort "Lock unavailable on #{lockfilename} - exit"
+  end
+end
+
+ENV["RAILS_ENV"] = ARGV[0] || ENV["RAILS_ENV"] || "development"
+
+require File.dirname(__FILE__) + '/../config/boot'
+require File.dirname(__FILE__) + '/../config/environment'
+require 'open3'
+
+class LogTime < Time
+  def to_s
+    self.utc.strftime "%Y-%m-%d_%H:%M:%S"
+  end
+end
+
+class Dispatcher
+  include ApplicationHelper
+
+  def initialize
+    @crunch_job_bin = (ENV['CRUNCH_JOB_BIN'] || `which arv-crunch-job`.strip)
+    if @crunch_job_bin.empty?
+      raise "No CRUNCH_JOB_BIN env var, and crunch-job not in path."
+    end
+
+    @arvados_internal = Rails.configuration.git_internal_dir
+    if not File.exists? @arvados_internal
+      $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
+      raise "No internal git repository available" unless ($? == 0)
+    end
+
+    @repo_root = Rails.configuration.git_repositories_dir
+    @authorizations = {}
+    @did_recently = {}
+    @fetched_commits = {}
+    @git_tags = {}
+    @node_state = {}
+    @pipe_auth_tokens = {}
+    @running = {}
+    @todo = []
+    @todo_pipelines = []
+  end
+
+  def sysuser
+    return act_as_system_user
+  end
+
+  def refresh_todo
+    if $options[:jobs]
+      @todo = Job.queue.select(&:repository)
+    end
+    if $options[:pipelines]
+      @todo_pipelines = PipelineInstance.queue
+    end
+  end
+
+  def each_slurm_line(cmd, outfmt, max_fields=nil)
+    max_fields ||= outfmt.split(":").size
+    max_fields += 1  # To accommodate the node field we add
+    @@slurm_version ||= Gem::Version.new(`sinfo --version`.match(/\b[\d\.]+\b/)[0])
+    if Gem::Version.new('2.3') <= @@slurm_version
+      `#{cmd} --noheader -o '%n:#{outfmt}'`.each_line do |line|
+        yield line.chomp.split(":", max_fields)
+      end
+    else
+      # Expand rows with hostname ranges (like "foo[1-3,5,9-12]:idle")
+      # into multiple rows with one hostname each.
+      `#{cmd} --noheader -o '%N:#{outfmt}'`.each_line do |line|
+        tokens = line.chomp.split(":", max_fields)
+        if (re = tokens[0].match /^(.*?)\[([-,\d]+)\]$/)
+          tokens.shift
+          re[2].split(",").each do |range|
+            range = range.split("-").collect(&:to_i)
+            (range[0]..range[-1]).each do |n|
+              yield [re[1] + n.to_s] + tokens
+            end
+          end
+        else
+          yield tokens
+        end
+      end
+    end
+  end
+
+  def slurm_status
+    slurm_nodes = {}
+    each_slurm_line("sinfo", "%t") do |hostname, state|
+      # Treat nodes in idle* state as down, because the * means that slurm
+      # hasn't been able to communicate with it recently.
+      state.sub!(/^idle\*/, "down")
+      state.sub!(/\W+$/, "")
+      state = "down" unless %w(idle alloc down).include?(state)
+      slurm_nodes[hostname] = {state: state, job: nil}
+    end
+    each_slurm_line("squeue", "%j") do |hostname, job_uuid|
+      slurm_nodes[hostname][:job] = job_uuid if slurm_nodes[hostname]
+    end
+    slurm_nodes
+  end
+
+  def update_node_status
+    return unless Server::Application.config.crunch_job_wrapper.to_s.match /^slurm/
+    slurm_status.each_pair do |hostname, slurmdata|
+      next if @node_state[hostname] == slurmdata
+      begin
+        node = Node.where('hostname=?', hostname).order(:last_ping_at).last
+        if node
+          $stderr.puts "dispatch: update #{hostname} state to #{slurmdata}"
+          node.info["slurm_state"] = slurmdata[:state]
+          node.job_uuid = slurmdata[:job]
+          if node.save
+            @node_state[hostname] = slurmdata
+          else
+            $stderr.puts "dispatch: failed to update #{node.uuid}: #{node.errors.messages}"
+          end
+        elsif slurmdata[:state] != 'down'
+          $stderr.puts "dispatch: SLURM reports '#{hostname}' is not down, but no node has that name"
+        end
+      rescue => error
+        $stderr.puts "dispatch: error updating #{hostname} node status: #{error}"
+      end
+    end
+  end
+
+  def positive_int(raw_value, default=nil)
+    value = begin raw_value.to_i rescue 0 end
+    if value > 0
+      value
+    else
+      default
+    end
+  end
+
+  NODE_CONSTRAINT_MAP = {
+    # Map Job runtime_constraints keys to the corresponding Node info key.
+    'min_ram_mb_per_node' => 'total_ram_mb',
+    'min_scratch_mb_per_node' => 'total_scratch_mb',
+    'min_cores_per_node' => 'total_cpu_cores',
+  }
+
+  def nodes_available_for_job_now(job)
+    # Find Nodes that satisfy a Job's runtime constraints (by building
+    # a list of Procs and using them to test each Node).  If there
+    # enough to run the Job, return an array of their names.
+    # Otherwise, return nil.
+    need_procs = NODE_CONSTRAINT_MAP.each_pair.map do |job_key, node_key|
+      Proc.new do |node|
+        positive_int(node.info[node_key], 0) >=
+          positive_int(job.runtime_constraints[job_key], 0)
+      end
+    end
+    min_node_count = positive_int(job.runtime_constraints['min_nodes'], 1)
+    usable_nodes = []
+    Node.find_each do |node|
+      good_node = (node.info['slurm_state'] == 'idle')
+      need_procs.each { |node_test| good_node &&= node_test.call(node) }
+      if good_node
+        usable_nodes << node
+        if usable_nodes.count >= min_node_count
+          return usable_nodes.map { |node| node.hostname }
+        end
+      end
+    end
+    nil
+  end
+
+  def nodes_available_for_job(job)
+    # Check if there are enough idle nodes with the Job's minimum
+    # hardware requirements to run it.  If so, return an array of
+    # their names.  If not, up to once per hour, signal start_jobs to
+    # hold off launching Jobs.  This delay is meant to give the Node
+    # Manager an opportunity to make new resources available for new
+    # Jobs.
+    #
+    # The exact timing parameters here might need to be adjusted for
+    # the best balance between helping the longest-waiting Jobs run,
+    # and making efficient use of immediately available resources.
+    # These are all just first efforts until we have more data to work
+    # with.
+    nodelist = nodes_available_for_job_now(job)
+    if nodelist.nil? and not did_recently(:wait_for_available_nodes, 3600)
+      $stderr.puts "dispatch: waiting for nodes for #{job.uuid}"
+      @node_wait_deadline = Time.now + 5.minutes
+    end
+    nodelist
+  end
+
+  def fail_job job, message
+    $stderr.puts "dispatch: #{job.uuid}: #{message}"
+    begin
+      Log.new(object_uuid: job.uuid,
+              event_type: 'dispatch',
+              owner_uuid: job.owner_uuid,
+              summary: message,
+              properties: {"text" => message}).save!
+    rescue
+      $stderr.puts "dispatch: log.create failed"
+    end
+
+    begin
+      job.lock @authorizations[job.uuid].user.uuid
+      job.state = "Failed"
+      if not job.save
+        $stderr.puts "dispatch: save failed setting job #{job.uuid} to failed"
+      end
+    rescue ArvadosModel::AlreadyLockedError
+      $stderr.puts "dispatch: tried to mark job #{job.uuid} as failed but it was already locked by someone else"
+    end
+  end
+
+  def stdout_s(cmd_a, opts={})
+    IO.popen(cmd_a, "r", opts) do |pipe|
+      return pipe.read.chomp
+    end
+  end
+
+  def git_cmd(*cmd_a)
+    ["git", "--git-dir=#{@arvados_internal}"] + cmd_a
+  end
+
+  def get_authorization(job)
+    if @authorizations[job.uuid] and
+        @authorizations[job.uuid].user.uuid != job.modified_by_user_uuid
+      # We already made a token for this job, but we need a new one
+      # because modified_by_user_uuid has changed (the job will run
+      # as a different user).
+      @authorizations[job.uuid].update_attributes expires_at: Time.now
+      @authorizations[job.uuid] = nil
+    end
+    if not @authorizations[job.uuid]
+      auth = ApiClientAuthorization.
+        new(user: User.where('uuid=?', job.modified_by_user_uuid).first,
+            api_client_id: 0)
+      if not auth.save
+        $stderr.puts "dispatch: auth.save failed for #{job.uuid}"
+      else
+        @authorizations[job.uuid] = auth
+      end
+    end
+    @authorizations[job.uuid]
+  end
+
+  def get_commit(repo_name, commit_hash)
+    # @fetched_commits[V]==true if we know commit V exists in the
+    # arvados_internal git repository.
+    if !@fetched_commits[commit_hash]
+      src_repo = File.join(@repo_root, "#{repo_name}.git")
+      if not File.exists? src_repo
+        src_repo = File.join(@repo_root, repo_name, '.git')
+        if not File.exists? src_repo
+          fail_job job, "No #{repo_name}.git or #{repo_name}/.git at #{@repo_root}"
+          return nil
+        end
+      end
+
+      # check if the commit needs to be fetched or not
+      commit_rev = stdout_s(git_cmd("rev-list", "-n1", commit_hash),
+                            err: "/dev/null")
+      unless $? == 0 and commit_rev == commit_hash
+        # commit does not exist in internal repository, so import the source repository using git fetch-pack
+        cmd = git_cmd("fetch-pack", "--no-progress", "--all", src_repo)
+        $stderr.puts "dispatch: #{cmd}"
+        $stderr.puts(stdout_s(cmd))
+        unless $? == 0
+          fail_job job, "git fetch-pack failed"
+          return nil
+        end
+      end
+      @fetched_commits[commit_hash] = true
+    end
+    @fetched_commits[commit_hash]
+  end
+
+  def tag_commit(commit_hash, tag_name)
+    # @git_tags[T]==V if we know commit V has been tagged T in the
+    # arvados_internal repository.
+    if not @git_tags[tag_name]
+      cmd = git_cmd("tag", tag_name, commit_hash)
+      $stderr.puts "dispatch: #{cmd}"
+      $stderr.puts(stdout_s(cmd, err: "/dev/null"))
+      unless $? == 0
+        # git tag failed.  This may be because the tag already exists, so check for that.
+        tag_rev = stdout_s(git_cmd("rev-list", "-n1", tag_name))
+        if $? == 0
+          # We got a revision back
+          if tag_rev != commit_hash
+            # Uh oh, the tag doesn't point to the revision we were expecting.
+            # Someone has been monkeying with the job record and/or git.
+            fail_job job, "Existing tag #{tag_name} points to commit #{tag_rev} but expected commit #{commit_hash}"
+            return nil
+          end
+          # we're okay (fall through to setting @git_tags below)
+        else
+          # git rev-list failed for some reason.
+          fail_job job, "'git tag' for #{tag_name} failed but did not find any existing tag using 'git rev-list'"
+          return nil
+        end
+      end
+      # 'git tag' was successful, or there is an existing tag that points to the same revision.
+      @git_tags[tag_name] = commit_hash
+    elsif @git_tags[tag_name] != commit_hash
+      fail_job job, "Existing tag #{tag_name} points to commit #{@git_tags[tag_name]} but this job uses commit #{commit_hash}"
+      return nil
+    end
+    @git_tags[tag_name]
+  end
+
+  def start_jobs
+    @todo.each do |job|
+      next if @running[job.uuid]
+
+      cmd_args = nil
+      case Server::Application.config.crunch_job_wrapper
+      when :none
+        if @running.size > 0
+            # Don't run more than one at a time.
+            return
+        end
+        cmd_args = []
+      when :slurm_immediate
+        nodelist = nodes_available_for_job(job)
+        if nodelist.nil?
+          if Time.now < @node_wait_deadline
+            break
+          else
+            next
+          end
+        end
+        cmd_args = ["salloc",
+                    "--chdir=/",
+                    "--immediate",
+                    "--exclusive",
+                    "--no-kill",
+                    "--job-name=#{job.uuid}",
+                    "--nodelist=#{nodelist.join(',')}"]
+      else
+        raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
+      end
+
+      if Server::Application.config.crunch_job_user
+        cmd_args.unshift("sudo", "-E", "-u",
+                         Server::Application.config.crunch_job_user,
+                         "PATH=#{ENV['PATH']}",
+                         "PERLLIB=#{ENV['PERLLIB']}",
+                         "PYTHONPATH=#{ENV['PYTHONPATH']}",
+                         "RUBYLIB=#{ENV['RUBYLIB']}",
+                         "GEM_PATH=#{ENV['GEM_PATH']}")
+      end
+
+      ready = (get_authorization(job) and
+               get_commit(job.repository, job.script_version) and
+               tag_commit(job.script_version, job.uuid))
+      if ready and job.arvados_sdk_version
+        ready = (get_commit("arvados", job.arvados_sdk_version) and
+                 tag_commit(job.arvados_sdk_version, "#{job.uuid}-arvados-sdk"))
+      end
+      next unless ready
+
+      cmd_args += [@crunch_job_bin,
+                   '--job-api-token', @authorizations[job.uuid].api_token,
+                   '--job', job.uuid,
+                   '--git-dir', @arvados_internal]
+
+      $stderr.puts "dispatch: #{cmd_args.join ' '}"
+
+      begin
+        i, o, e, t = Open3.popen3(*cmd_args)
+      rescue
+        $stderr.puts "dispatch: popen3: #{$!}"
+        sleep 1
+        next
+      end
+
+      $stderr.puts "dispatch: job #{job.uuid}"
+      start_banner = "dispatch: child #{t.pid} start #{LogTime.now}"
+      $stderr.puts start_banner
+
+      @running[job.uuid] = {
+        stdin: i,
+        stdout: o,
+        stderr: e,
+        wait_thr: t,
+        job: job,
+        buf: {stderr: '', stdout: ''},
+        started: false,
+        sent_int: 0,
+        job_auth: @authorizations[job.uuid],
+        stderr_buf_to_flush: '',
+        stderr_flushed_at: Time.new(0),
+        bytes_logged: 0,
+        events_logged: 0,
+        log_throttle_is_open: true,
+        log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
+        log_throttle_bytes_so_far: 0,
+        log_throttle_lines_so_far: 0,
+        log_throttle_bytes_skipped: 0,
+      }
+      i.close
+      update_node_status
+    end
+  end
+
+  # Test for hard cap on total output and for log throttling.  Returns whether
+  # the log line should go to output or not.  Modifies "line" in place to
+  # replace it with an error if a logging limit is tripped.
+  def rate_limit running_job, line
+    message = false
+    linesize = line.size
+    if running_job[:log_throttle_is_open]
+      running_job[:log_throttle_lines_so_far] += 1
+      running_job[:log_throttle_bytes_so_far] += linesize
+      running_job[:bytes_logged] += linesize
+
+      if (running_job[:bytes_logged] >
+          Rails.configuration.crunch_limit_log_bytes_per_job)
+        message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
+        running_job[:log_throttle_reset_time] = Time.now + 100.years
+        running_job[:log_throttle_is_open] = false
+
+      elsif (running_job[:log_throttle_bytes_so_far] >
+             Rails.configuration.crunch_log_throttle_bytes)
+        remaining_time = running_job[:log_throttle_reset_time] - Time.now
+        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds.\n"
+        running_job[:log_throttle_is_open] = false
+
+      elsif (running_job[:log_throttle_lines_so_far] >
+             Rails.configuration.crunch_log_throttle_lines)
+        remaining_time = running_job[:log_throttle_reset_time] - Time.now
+        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds.\n"
+        running_job[:log_throttle_is_open] = false
+      end
+    end
+
+    if not running_job[:log_throttle_is_open]
+      # Don't log anything if any limit has been exceeded. Just count lossage.
+      running_job[:log_throttle_bytes_skipped] += linesize
+    end
+
+    if message
+      # Yes, write to logs, but use our "rate exceeded" message
+      # instead of the log message that exceeded the limit.
+      line.replace message
+      true
+    else
+      running_job[:log_throttle_is_open]
+    end
+  end
+
+  def read_pipes
+    @running.each do |job_uuid, j|
+      job = j[:job]
+
+      now = Time.now
+      if now > j[:log_throttle_reset_time]
+        # It has been more than throttle_period seconds since the last
+        # checkpoint so reset the throttle
+        if j[:log_throttle_bytes_skipped] > 0
+          message = "#{job_uuid} ! Skipped #{j[:log_throttle_bytes_skipped]} bytes of log"
+          $stderr.puts message
+          j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
+        end
+
+        j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
+        j[:log_throttle_bytes_so_far] = 0
+        j[:log_throttle_lines_so_far] = 0
+        j[:log_throttle_bytes_skipped] = 0
+        j[:log_throttle_is_open] = true
+      end
+
+      j[:buf].each do |stream, streambuf|
+        # Read some data from the child stream
+        buf = ''
+        begin
+          # It's important to use a big enough buffer here. When we're
+          # being flooded with logs, we must read and discard many
+          # bytes at once. Otherwise, we can easily peg a CPU with
+          # time-checking and other loop overhead. (Quick tests show a
+          # 1MiB buffer working 2.5x as fast as a 64 KiB buffer.)
+          #
+          # So don't reduce this buffer size!
+          buf = j[stream].read_nonblock(2**20)
+        rescue Errno::EAGAIN, EOFError
+        end
+
+        # Short circuit the counting code if we're just going to throw
+        # away the data anyway.
+        if not j[:log_throttle_is_open]
+          j[:log_throttle_bytes_skipped] += streambuf.size + buf.size
+          streambuf.replace ''
+          next
+        elsif buf == ''
+          next
+        end
+
+        # Append to incomplete line from previous read, if any
+        streambuf << buf
+
+        bufend = ''
+        streambuf.each_line do |line|
+          if not line.end_with? $/
+            if line.size > Rails.configuration.crunch_log_throttle_bytes
+              # Without a limit here, we'll use 2x an arbitrary amount
+              # of memory, and waste a lot of time copying strings
+              # around, all without providing any feedback to anyone
+              # about what's going on _or_ hitting any of our throttle
+              # limits.
+              #
+              # Here we leave "line" alone, knowing it will never be
+              # sent anywhere: rate_limit() will reach
+              # crunch_log_throttle_bytes immediately. However, we'll
+              # leave [...] in bufend: if the trailing end of the long
+              # line does end up getting sent anywhere, it will have
+              # some indication that it is incomplete.
+              bufend = "[...]"
+            else
+              # If line length is sane, we'll wait for the rest of the
+              # line to appear in the next read_pipes() call.
+              bufend = line
+              break
+            end
+          end
+          # rate_limit returns true or false as to whether to actually log
+          # the line or not.  It also modifies "line" in place to replace
+          # it with an error if a logging limit is tripped.
+          if rate_limit j, line
+            $stderr.print "#{job_uuid} ! " unless line.index(job_uuid)
+            $stderr.puts line
+            pub_msg = "#{LogTime.now} #{line.strip}\n"
+            j[:stderr_buf_to_flush] << pub_msg
+          end
+        end
+
+        # Leave the trailing incomplete line (if any) in streambuf for
+        # next time.
+        streambuf.replace bufend
+      end
+      # Flush buffered logs to the logs table, if appropriate. We have
+      # to do this even if we didn't collect any new logs this time:
+      # otherwise, buffered data older than seconds_between_events
+      # won't get flushed until new data arrives.
+      write_log j
+    end
+  end
+
+  def reap_children
+    return if 0 == @running.size
+    pid_done = nil
+    j_done = nil
+
+    if false
+      begin
+        pid_done = waitpid(-1, Process::WNOHANG | Process::WUNTRACED)
+        if pid_done
+          j_done = @running.values.
+            select { |j| j[:wait_thr].pid == pid_done }.
+            first
+        end
+      rescue SystemCallError
+        # I have @running processes but system reports I have no
+        # children. This is likely to happen repeatedly if it happens at
+        # all; I will log this no more than once per child process I
+        # start.
+        if 0 < @running.select { |uuid,j| j[:warned_waitpid_error].nil? }.size
+          children = @running.values.collect { |j| j[:wait_thr].pid }.join ' '
+          $stderr.puts "dispatch: IPC bug: waitpid() error (#{$!}), but I have children #{children}"
+        end
+        @running.each do |uuid,j| j[:warned_waitpid_error] = true end
+      end
+    else
+      @running.each do |uuid, j|
+        if j[:wait_thr].status == false
+          pid_done = j[:wait_thr].pid
+          j_done = j
+        end
+      end
+    end
+
+    return if !pid_done
+
+    job_done = j_done[:job]
+    $stderr.puts "dispatch: child #{pid_done} exit"
+    $stderr.puts "dispatch: job #{job_done.uuid} end"
+
+    # Ensure every last drop of stdout and stderr is consumed.
+    read_pipes
+    # Reset flush timestamp to make sure log gets written.
+    j_done[:stderr_flushed_at] = Time.new(0)
+    # Write any remaining logs.
+    write_log j_done
+
+    j_done[:buf].each do |stream, streambuf|
+      if streambuf != ''
+        $stderr.puts streambuf + "\n"
+      end
+    end
+
+    # Wait the thread (returns a Process::Status)
+    exit_status = j_done[:wait_thr].value.exitstatus
+
+    jobrecord = Job.find_by_uuid(job_done.uuid)
+    if exit_status != 75 and jobrecord.state == "Running"
+      # crunch-job did not return exit code 75 (see below) and left the job in
+      # the "Running" state, which means there was an unhandled error.  Fail
+      # the job.
+      jobrecord.state = "Failed"
+      if not jobrecord.save
+        $stderr.puts "dispatch: jobrecord.save failed"
+      end
+    else
+      # Don't fail the job if crunch-job didn't even get as far as
+      # starting it. If the job failed to run due to an infrastructure
+      # issue with crunch-job or slurm, we want the job to stay in the
+      # queue. If crunch-job exited after losing a race to another
+      # crunch-job process, it exits 75 and we should leave the job
+      # record alone so the winner of the race do its thing.
+      #
+      # There is still an unhandled race condition: If our crunch-job
+      # process is about to lose a race with another crunch-job
+      # process, but crashes before getting to its "exit 75" (for
+      # example, "cannot fork" or "cannot reach API server") then we
+      # will assume incorrectly that it's our process's fault
+      # jobrecord.started_at is non-nil, and mark the job as failed
+      # even though the winner of the race is probably still doing
+      # fine.
+    end
+
+    # Invalidate the per-job auth token, unless the job is still queued and we
+    # might want to try it again.
+    if jobrecord.state != "Queued"
+      j_done[:job_auth].update_attributes expires_at: Time.now
+    end
+
+    @running.delete job_done.uuid
+  end
+
+  def update_pipelines
+    expire_tokens = @pipe_auth_tokens.dup
+    @todo_pipelines.each do |p|
+      pipe_auth = (@pipe_auth_tokens[p.uuid] ||= ApiClientAuthorization.
+                   create(user: User.where('uuid=?', p.modified_by_user_uuid).first,
+                          api_client_id: 0))
+      puts `export ARVADOS_API_TOKEN=#{pipe_auth.api_token} && arv-run-pipeline-instance --run-pipeline-here --no-wait --instance #{p.uuid}`
+      expire_tokens.delete p.uuid
+    end
+
+    expire_tokens.each do |k, v|
+      v.update_attributes expires_at: Time.now
+      @pipe_auth_tokens.delete k
+    end
+  end
+
+  def run
+    act_as_system_user
+    $stderr.puts "dispatch: ready"
+    while !$signal[:term] or @running.size > 0
+      read_pipes
+      if $signal[:term]
+        @running.each do |uuid, j|
+          if !j[:started] and j[:sent_int] < 2
+            begin
+              Process.kill 'INT', j[:wait_thr].pid
+            rescue Errno::ESRCH
+              # No such pid = race condition + desired result is
+              # already achieved
+            end
+            j[:sent_int] += 1
+          end
+        end
+      else
+        refresh_todo unless did_recently(:refresh_todo, 1.0)
+        update_node_status unless did_recently(:update_node_status, 1.0)
+        unless @todo.empty? or did_recently(:start_jobs, 1.0) or $signal[:term]
+          start_jobs
+        end
+        unless (@todo_pipelines.empty? and @pipe_auth_tokens.empty?) or did_recently(:update_pipelines, 5.0)
+          update_pipelines
+        end
+      end
+      reap_children
+      select(@running.values.collect { |j| [j[:stdout], j[:stderr]] }.flatten,
+             [], [], 1)
+    end
+  end
+
+  protected
+
+  def did_recently(thing, min_interval)
+    if !@did_recently[thing] or @did_recently[thing] < Time.now - min_interval
+      @did_recently[thing] = Time.now
+      false
+    else
+      true
+    end
+  end
+
+  # send message to log table. we want these records to be transient
+  def write_log running_job
+    return if running_job[:stderr_buf_to_flush] == ''
+
+    # Send out to log event if buffer size exceeds the bytes per event or if
+    # it has been at least crunch_log_seconds_between_events seconds since
+    # the last flush.
+    if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
+        (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
+      begin
+        log = Log.new(object_uuid: running_job[:job].uuid,
+                      event_type: 'stderr',
+                      owner_uuid: running_job[:job].owner_uuid,
+                      properties: {"text" => running_job[:stderr_buf_to_flush]})
+        log.save!
+        running_job[:events_logged] += 1
+      rescue => exception
+        $stderr.puts "Failed to write logs"
+        $stderr.puts exception.backtrace
+      end
+      running_job[:stderr_buf_to_flush] = ''
+      running_job[:stderr_flushed_at] = Time.now
+    end
+  end
+end
+
+# This is how crunch-job child procs know where the "refresh" trigger file is
+ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
+
+Dispatcher.new.run
diff --git a/services/api/script/crunch_failure_report.py b/services/api/script/crunch_failure_report.py
new file mode 100755 (executable)
index 0000000..31ad0fe
--- /dev/null
@@ -0,0 +1,219 @@
+#! /usr/bin/env python
+
+import argparse
+import datetime
+import json
+import re
+import sys
+
+import arvados
+
+# Useful configuration variables:
+
+# Number of log lines to use as context in diagnosing failure.
+LOG_CONTEXT_LINES = 10
+
+# Regex that signifies a failed task.
+FAILED_TASK_REGEX = re.compile(' \d+ failure (.*permanent)')
+
+# Regular expressions used to classify failure types.
+JOB_FAILURE_TYPES = {
+    'sys/docker': 'Cannot destroy container',
+    'crunch/node': 'User not found on host',
+    'slurm/comm':  'Communication connection failure'
+}
+
+def parse_arguments(arguments):
+    arg_parser = argparse.ArgumentParser(
+        description='Produce a report of Crunch failures within a specified time range')
+
+    arg_parser.add_argument(
+        '--start',
+        help='Start date and time')
+    arg_parser.add_argument(
+        '--end',
+        help='End date and time')
+
+    args = arg_parser.parse_args(arguments)
+
+    if args.start and not is_valid_timestamp(args.start):
+        raise ValueError(args.start)
+    if args.end and not is_valid_timestamp(args.end):
+        raise ValueError(args.end)
+
+    return args
+
+
+def api_timestamp(when=None):
+    """Returns a string representing the timestamp 'when' in a format
+    suitable for delivering to the API server.  Defaults to the
+    current time.
+    """
+    if when is None:
+        when = datetime.datetime.utcnow()
+    return when.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
+def is_valid_timestamp(ts):
+    return re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', ts)
+
+
+def jobs_created_between_dates(api, start, end):
+    return arvados.util.list_all(
+        api.jobs().list,
+        filters=json.dumps([ ['created_at', '>=', start],
+                             ['created_at', '<=', end] ]))
+
+
+def job_logs(api, job):
+    # Returns the contents of the log for this job (as an array of lines).
+    if job['log']:
+        log_collection = arvados.CollectionReader(job['log'], api)
+        log_filename = "{}.log.txt".format(job['uuid'])
+        return log_collection.open(log_filename).readlines()
+    return []
+
+
+user_names = {}
+def job_user_name(api, user_uuid):
+    def _lookup_user_name(api, user_uuid):
+        try:
+            return api.users().get(uuid=user_uuid).execute()['full_name']
+        except arvados.errors.ApiError:
+            return user_uuid
+
+    if user_uuid not in user_names:
+        user_names[user_uuid] = _lookup_user_name(api, user_uuid)
+    return user_names[user_uuid]
+
+
+job_pipeline_names = {}
+def job_pipeline_name(api, job_uuid):
+    def _lookup_pipeline_name(api, job_uuid):
+        try:
+            pipelines = api.pipeline_instances().list(
+                filters='[["components", "like", "%{}%"]]'.format(job_uuid)).execute()
+            pi = pipelines['items'][0]
+            if pi['name']:
+                return pi['name']
+            else:
+                # Use the pipeline template name
+                pt = api.pipeline_templates().get(uuid=pi['pipeline_template_uuid']).execute()
+                return pt['name']
+        except (TypeError, ValueError, IndexError):
+            return ""
+
+    if job_uuid not in job_pipeline_names:
+        job_pipeline_names[job_uuid] = _lookup_pipeline_name(api, job_uuid)
+    return job_pipeline_names[job_uuid]
+
+
+def is_failed_task(logline):
+    return FAILED_TASK_REGEX.search(logline) != None
+
+
+def main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):
+    args = parse_arguments(arguments)
+
+    api = arvados.api('v1')
+
+    now = datetime.datetime.utcnow()
+    start_time = args.start or api_timestamp(now - datetime.timedelta(days=1))
+    end_time = args.end or api_timestamp(now)
+
+    # Find all jobs created within the specified window,
+    # and their corresponding job logs.
+    jobs_created = jobs_created_between_dates(api, start_time, end_time)
+    jobs_by_state = {}
+    for job in jobs_created:
+        jobs_by_state.setdefault(job['state'], [])
+        jobs_by_state[job['state']].append(job)
+
+    # Find failed jobs and record the job failure text.
+
+    # failure_stats maps failure types (e.g. "sys/docker") to
+    # a set of job UUIDs that failed for that reason.
+    failure_stats = {}
+    for job in jobs_by_state['Failed']:
+        job_uuid = job['uuid']
+        logs = job_logs(api, job)
+        # Find the first permanent task failure, and collect the
+        # preceding log lines.
+        failure_type = None
+        for i, lg in enumerate(logs):
+            if is_failed_task(lg):
+                # Get preceding log record to provide context.
+                log_start = i - LOG_CONTEXT_LINES if i >= LOG_CONTEXT_LINES else 0
+                log_end = i + 1
+                lastlogs = ''.join(logs[log_start:log_end])
+                # try to identify the type of failure.
+                for key, rgx in JOB_FAILURE_TYPES.iteritems():
+                    if re.search(rgx, lastlogs):
+                        failure_type = key
+                        break
+            if failure_type is not None:
+                break
+        if failure_type is None:
+            failure_type = 'unknown'
+        failure_stats.setdefault(failure_type, set())
+        failure_stats[failure_type].add(job_uuid)
+
+    # Report percentages of successful, failed and unfinished jobs.
+    print "Start: {:20s}".format(start_time)
+    print "End:   {:20s}".format(end_time)
+    print ""
+
+    print "Overview"
+    print ""
+
+    job_start_count = len(jobs_created)
+    print "  {: <25s} {:4d}".format('Started', job_start_count)
+    for state in ['Complete', 'Failed', 'Queued', 'Cancelled', 'Running']:
+        if state in jobs_by_state:
+            job_count = len(jobs_by_state[state])
+            job_percentage = job_count / float(job_start_count)
+            print "  {: <25s} {:4d} ({: >4.0%})".format(state,
+                                                        job_count,
+                                                        job_percentage)
+    print ""
+
+    # Report failure types.
+    failure_summary = ""
+    failure_detail = ""
+
+    # Generate a mapping from failed job uuids to job records, to assist
+    # in generating detailed statistics for job failures.
+    jobs_failed_map = { job['uuid']: job for job in jobs_by_state.get('Failed', []) }
+
+    # sort the failure stats in descending order by occurrence.
+    sorted_failures = sorted(failure_stats,
+                             reverse=True,
+                             key=lambda failure_type: len(failure_stats[failure_type]))
+    for failtype in sorted_failures:
+        job_uuids = failure_stats[failtype]
+        failstat = "  {: <25s} {:4d} ({: >4.0%})\n".format(
+            failtype,
+            len(job_uuids),
+            len(job_uuids) / float(len(jobs_by_state['Failed'])))
+        failure_summary = failure_summary + failstat
+        failure_detail = failure_detail + failstat
+        for j in job_uuids:
+            job_info = jobs_failed_map[j]
+            job_owner = job_user_name(api, job_info['modified_by_user_uuid'])
+            job_name = job_pipeline_name(api, job_info['uuid'])
+            failure_detail = failure_detail + "    {}  {: <15.15s}  {:29.29s}\n".format(j, job_owner, job_name)
+        failure_detail = failure_detail + "\n"
+
+    print "Failures by class"
+    print ""
+    print failure_summary
+
+    print "Failures by class (detail)"
+    print ""
+    print failure_detail
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/services/api/script/get_anonymous_user_token.rb b/services/api/script/get_anonymous_user_token.rb
new file mode 100755 (executable)
index 0000000..6964af0
--- /dev/null
@@ -0,0 +1,50 @@
+#!/usr/bin/env ruby
+
+# Get or Create an anonymous user token.
+# If get option is used, an existing anonymous user token is returned. If none exist, one is created.
+# If the get option is omitted, a new token is created and returned.
+
+require 'trollop'
+
+opts = Trollop::options do
+  banner ''
+  banner "Usage: get_anonymous_user_token "
+  banner ''
+  opt :get, <<-eos
+Get an existing anonymous user token. If no such token exists \
+or if this option is omitted, a new token is created and returned.
+  eos
+end
+
+get_existing = opts[:get]
+
+require File.dirname(__FILE__) + '/../config/environment'
+
+include ApplicationHelper
+act_as_system_user
+
+def create_api_client_auth
+  api_client_auth = ApiClientAuthorization.
+    new(user: anonymous_user,
+        api_client_id: 0,
+        expires_at: Time.now + 100.years,
+        scopes: ['GET /'])
+  api_client_auth.save!
+  api_client_auth.reload
+end
+
+if get_existing
+  api_client_auth = ApiClientAuthorization.
+    where('user_id=?', anonymous_user.id.to_i).
+    where('expires_at>?', Time.now).
+    select { |auth| auth.scopes == ['GET /'] }.
+    first
+end
+
+# either not a get or no api_client_auth was found
+if !api_client_auth
+  api_client_auth = create_api_client_auth
+end
+
+# print it to the console
+puts api_client_auth.api_token
diff --git a/services/api/script/rails b/services/api/script/rails
new file mode 100755 (executable)
index 0000000..901460c
--- /dev/null
@@ -0,0 +1,34 @@
+#!/usr/bin/env ruby
+# This command will automatically be run when you run "rails" with Rails 3 gems installed from the root of your application.
+
+
+##### SSL - ward, 2012-10-15
+require 'rubygems'
+require 'rails/commands/server'
+require 'rack'
+require 'webrick'
+require 'webrick/https'
+
+module Rails
+    class Server < ::Rack::Server
+        def default_options
+            super.merge({
+                :Port => 3030,
+                :environment => (ENV['RAILS_ENV'] || "development").dup,
+                :daemonize => false,
+                :debugger => false,
+                :pid => File.expand_path("tmp/pids/server.pid"),
+                :config => File.expand_path("config.ru"),
+                :SSLEnable => true,
+                :SSLVerifyClient => OpenSSL::SSL::VERIFY_NONE,
+                :SSLCertName => [["CN", "#{WEBrick::Utils::getservername} #{Time.now().to_s}"]]
+            })
+        end
+    end
+end
+######### /SSL
+
+
+APP_PATH = File.expand_path('../../config/application',  __FILE__)
+require File.expand_path('../../config/boot',  __FILE__)
+require 'rails/commands'
diff --git a/services/api/script/rake_test.sh b/services/api/script/rake_test.sh
new file mode 100755 (executable)
index 0000000..ab91aec
--- /dev/null
@@ -0,0 +1,13 @@
+#! /bin/sh
+
+# This script invokes `rake test' in a fresh Docker instance of the
+# API server, e.g.:
+#   docker run -t -i arvados/api /usr/src/arvados/services/api/script/rake_test.sh
+
+/etc/init.d/postgresql start
+
+export RAILS_ENV=test
+cd /usr/src/arvados/services/api
+cp config/environments/test.rb.example config/environments/test.rb
+bundle exec rake db:setup
+bundle exec rake test
diff --git a/services/api/script/restart-dns-server b/services/api/script/restart-dns-server
new file mode 100755 (executable)
index 0000000..061856c
--- /dev/null
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# usage:
+# "restart-dns-server <path-to-restart.txt>" (restart now if needed)
+# or
+# "restart-dns-server <path-to-restart.txt> -d" (wait for restart to be needed, restart, repeat)
+
+RESTART_TXT_PATH=$1
+
+if [[ "$RESTART_TXT_PATH" == "" ]]; then
+  echo
+  echo "Usage: "
+  echo "   $0 <path-to-restart.txt>      # restart now if needed"
+  echo "   $0 <path-to-restart.txt> -d   # wait for restart to be needed, restart, repeat"
+  echo
+  exit 1
+fi
+
+while :
+do
+  if [ -e $RESTART_TXT_PATH ]; then
+    RESTART_COMMAND=`cat $RESTART_TXT_PATH`
+    echo "restart command: $RESTART_COMMAND"
+    rm -f "$RESTART_TXT_PATH"
+    echo restarting
+    $RESTART_COMMAND
+  fi
+  if [ "-d" = "$2" ]
+  then
+    sleep 2
+  else
+    exit 0
+  fi
+done
diff --git a/services/api/script/setup-new-user.rb b/services/api/script/setup-new-user.rb
new file mode 100755 (executable)
index 0000000..af0de13
--- /dev/null
@@ -0,0 +1,71 @@
+#!/usr/bin/env ruby
+
+abort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'
+
+require 'logger'
+require 'trollop'
+
+log = Logger.new STDERR
+log.progname = $0.split('/').last
+
+opts = Trollop::options do
+  banner ''
+  banner "Usage: #{log.progname} " +
+    "{user_uuid_or_email} {user_and_repo_name} {vm_uuid}"
+  banner ''
+  opt :debug, <<-eos
+Show debug messages.
+  eos
+  opt :openid_prefix, <<-eos, default: 'https://www.google.com/accounts/o8/id'
+If creating a new user record, require authentication from an OpenID \
+with this OpenID prefix *and* a matching email address in order to \
+claim the account.
+  eos
+  opt :send_notification_email, <<-eos, default: 'true'
+Send notification email after successfully setting up the user.
+  eos
+end
+
+log.level = (ENV['DEBUG'] || opts.debug) ? Logger::DEBUG : Logger::WARN
+
+if ARGV.count != 3
+  Trollop::die "required arguments are missing"
+end
+
+user_arg, user_repo_name, vm_uuid = ARGV
+
+require 'arvados'
+arv = Arvados.new(api_version: 'v1')
+
+# Look up the given user by uuid or, failing that, email address.
+begin
+  found_user = arv.user.get(uuid: user_arg)
+rescue Arvados::TransactionFailedError
+  found = arv.user.list(where: {email: user_arg})[:items]
+
+  if found.count == 0
+    if !user_arg.match(/\w\@\w+\.\w+/)
+      abort "About to create new user, but #{user_arg.inspect} " +
+               "does not look like an email address. Stop."
+    end
+  elsif found.count != 1
+    abort "Found #{found.count} users with email. Stop."
+  else
+    found_user = found.first
+  end
+end
+
+# Invoke user setup method
+if (found_user)
+  user = arv.user.setup uuid: found_user[:uuid], repo_name: user_repo_name,
+          vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,
+          send_notification_email: opts.send_notification_email
+else
+  user = arv.user.setup user: {email: user_arg}, repo_name: user_repo_name,
+          vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,
+          send_notification_email: opts.send_notification_email
+end
+
+log.info {"user uuid: " + user[:uuid]}
+
+puts user.inspect
diff --git a/services/api/test/factories/api_client.rb b/services/api/test/factories/api_client.rb
new file mode 100644 (file)
index 0000000..7921c35
--- /dev/null
@@ -0,0 +1,10 @@
+FactoryGirl.define do
+  factory :api_client do
+    is_trusted false
+    to_create do |instance|
+      act_as_system_user do
+        instance.save!
+      end
+    end
+  end
+end
diff --git a/services/api/test/factories/api_client_authorization.rb b/services/api/test/factories/api_client_authorization.rb
new file mode 100644 (file)
index 0000000..8bd569e
--- /dev/null
@@ -0,0 +1,19 @@
+FactoryGirl.define do
+  factory :api_client_authorization do
+    api_client
+    scopes ['all']
+
+    trait :trusted do
+      association :api_client, factory: :api_client, is_trusted: true
+    end
+    factory :token do
+      # Just provides shorthand for "create :api_client_authorization"
+    end
+
+    to_create do |instance|
+      act_as_user instance.user do
+        instance.save!
+      end
+    end
+  end
+end
diff --git a/services/api/test/factories/group.rb b/services/api/test/factories/group.rb
new file mode 100644 (file)
index 0000000..70358e6
--- /dev/null
@@ -0,0 +1,4 @@
+FactoryGirl.define do
+  factory :group do
+  end
+end
diff --git a/services/api/test/factories/link.rb b/services/api/test/factories/link.rb
new file mode 100644 (file)
index 0000000..8a4649d
--- /dev/null
@@ -0,0 +1,7 @@
+FactoryGirl.define do
+  factory :link do
+    factory :permission_link do
+      link_class 'permission'
+    end
+  end
+end
diff --git a/services/api/test/factories/user.rb b/services/api/test/factories/user.rb
new file mode 100644 (file)
index 0000000..56e9125
--- /dev/null
@@ -0,0 +1,44 @@
+include CurrentApiClient
+
+FactoryGirl.define do
+  factory :user do
+    ignore do
+      join_groups []
+    end
+    after :create do |user, evaluator|
+      act_as_system_user do
+        evaluator.join_groups.each do |g|
+          Link.create!(tail_uuid: user.uuid,
+                       head_uuid: g.uuid,
+                       link_class: 'permission',
+                       name: 'can_read')
+          Link.create!(tail_uuid: g.uuid,
+                       head_uuid: user.uuid,
+                       link_class: 'permission',
+                       name: 'can_read')
+        end
+      end
+    end
+    first_name "Factory"
+    last_name "Factory"
+    identity_url do
+      "https://example.com/#{rand(2**24).to_s(36)}"
+    end
+    factory :active_user do
+      is_active true
+      after :create do |user|
+        act_as_system_user do
+          Link.create!(tail_uuid: user.uuid,
+                       head_uuid: Group.where('uuid ~ ?', '-f+$').first.uuid,
+                       link_class: 'permission',
+                       name: 'can_read')
+        end
+      end
+    end
+    to_create do |instance|
+      act_as_system_user do
+        instance.save!
+      end
+    end
+  end
+end
diff --git a/services/api/test/fixtures/.gitkeep b/services/api/test/fixtures/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/fixtures/api_client_authorizations.yml b/services/api/test/fixtures/api_client_authorizations.yml
new file mode 100644 (file)
index 0000000..0b4d874
--- /dev/null
@@ -0,0 +1,207 @@
+# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
+
+admin:
+  api_client: untrusted
+  user: admin
+  api_token: 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h
+  expires_at: 2038-01-01 00:00:00
+
+admin_trustedclient:
+  api_client: trusted_workbench
+  user: admin
+  api_token: 1a9ffdcga2o7cw8q12dndskomgs1ygli3ns9k2o9hgzgmktc78
+  expires_at: 2038-01-01 00:00:00
+
+miniadmin:
+  api_client: untrusted
+  user: miniadmin
+  api_token: 2zb2y9pw3e70270te7oe3ewaantea3adyxjascvkz0zob7q7xb
+  expires_at: 2038-01-01 00:00:00
+
+rominiadmin:
+  api_client: untrusted
+  user: rominiadmin
+  api_token: 5tsb2pc3zlatn1ortl98s2tqsehpby88wmmnzmpsjmzwa6payh
+  expires_at: 2038-01-01 00:00:00
+
+active:
+  api_client: untrusted
+  user: active
+  api_token: 3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi
+  expires_at: 2038-01-01 00:00:00
+
+active_trustedclient:
+  api_client: trusted_workbench
+  user: active
+  api_token: 27bnddk6x2nmq00a1e3gq43n9tsl5v87a3faqar2ijj8tud5en
+  expires_at: 2038-01-01 00:00:00
+
+active_noscope:
+  api_client: untrusted
+  user: active
+  api_token: activenoscopeabcdefghijklmnopqrstuvwxyz12345678901
+  expires_at: 2038-01-01 00:00:00
+  scopes: []
+
+project_viewer:
+  api_client: untrusted
+  user: project_viewer
+  api_token: projectviewertoken1234567890abcdefghijklmnopqrstuv
+  expires_at: 2038-01-01 00:00:00
+
+project_viewer_trustedclient:
+  api_client: trusted_workbench
+  user: project_viewer
+  api_token: projectviewertrustedtoken1234567890abcdefghijklmno
+  expires_at: 2038-01-01 00:00:00
+
+subproject_admin:
+  api_client: untrusted
+  user: subproject_admin
+  api_token: subprojectadmintoken1234567890abcdefghijklmnopqrst
+  expires_at: 2038-01-01 00:00:00
+
+admin_vm:
+  api_client: untrusted
+  user: admin
+  api_token: adminvirtualmachineabcdefghijklmnopqrstuvwxyz12345
+  expires_at: 2038-01-01 00:00:00
+  # scope refers to the testvm fixture.
+  scopes: ["GET /arvados/v1/virtual_machines/zzzzz-2x53u-382brsig8rp3064/logins"]
+
+admin_noscope:
+  api_client: untrusted
+  user: admin
+  api_token: adminnoscopeabcdefghijklmnopqrstuvwxyz123456789012
+  expires_at: 2038-01-01 00:00:00
+  scopes: []
+
+active_all_collections:
+  api_client: untrusted
+  user: active
+  api_token: activecollectionsabcdefghijklmnopqrstuvwxyz1234567
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/collections/", "GET /arvados/v1/keep_disks"]
+
+active_userlist:
+  api_client: untrusted
+  user: active
+  api_token: activeuserlistabcdefghijklmnopqrstuvwxyz1234568900
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/users"]
+
+active_specimens:
+  api_client: untrusted
+  user: active
+  api_token: activespecimensabcdefghijklmnopqrstuvwxyz123456890
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/specimens/"]
+
+active_apitokens:
+  api_client: trusted_workbench
+  user: active
+  api_token: activeapitokensabcdefghijklmnopqrstuvwxyz123456789
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/api_client_authorizations",
+           "POST /arvados/v1/api_client_authorizations"]
+
+active_readonly:
+  api_client: untrusted
+  user: active
+  api_token: activereadonlyabcdefghijklmnopqrstuvwxyz1234568790
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /"]
+
+spectator:
+  api_client: untrusted
+  user: spectator
+  api_token: zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu
+  expires_at: 2038-01-01 00:00:00
+
+spectator_specimens:
+  api_client: untrusted
+  user: spectator
+  api_token: spectatorspecimensabcdefghijklmnopqrstuvwxyz123245
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /arvados/v1/specimens", "GET /arvados/v1/specimens/",
+           "POST /arvados/v1/specimens"]
+
+inactive:
+  api_client: untrusted
+  user: inactive
+  api_token: 5s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0
+  expires_at: 2038-01-01 00:00:00
+
+inactive_uninvited:
+  api_client: untrusted
+  user: inactive_uninvited
+  api_token: 62mhllc0otp78v08e3rpa3nsmf8q8ogk47f7u5z4erp5gpj9al
+  expires_at: 2038-01-01 00:00:00
+
+inactive_but_signed_user_agreement:
+  api_client: untrusted
+  user: inactive_but_signed_user_agreement
+  api_token: 64k3bzw37iwpdlexczj02rw3m333rrb8ydvn2qq99ohv68so5k
+  expires_at: 2038-01-01 00:00:00
+
+expired:
+  api_client: untrusted
+  user: active
+  api_token: 2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx
+  expires_at: 1970-01-01 00:00:00
+
+expired_trustedclient:
+  api_client: trusted_workbench
+  user: active
+  api_token: 5hpni7izokzcatku2896xxwqdbt5ptomn04r6auc7fohnli82v
+  expires_at: 1970-01-01 00:00:00
+
+valid_token_deleted_user:
+  api_client: trusted_workbench
+  user_id: 1234567
+  api_token: tewfa58099sndckyqhlgd37za6e47o6h03r9l1vpll23hudm8b
+  expires_at: 2038-01-01 00:00:00
+
+anonymous:
+  api_client: untrusted
+  user: anonymous
+  api_token: 4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi
+  expires_at: 2038-01-01 00:00:00
+  scopes: ["GET /"]
+
+job_reader:
+  api_client: untrusted
+  user: job_reader
+  api_token: e99512cdc0f3415c2428b9758f33bdfb07bc3561b00e86e7e6
+  expires_at: 2038-01-01 00:00:00
+
+active_no_prefs:
+  api_client: untrusted
+  user: active_no_prefs
+  api_token: 3kg612cdc0f3415c2428b9758f33bdfb07bc3561b00e86qdmi
+  expires_at: 2038-01-01 00:00:00
+
+active_no_prefs_profile:
+  api_client: untrusted
+  user: active_no_prefs_profile
+  api_token: 3kg612cdc0f3415c242856758f33bdfb07bc3561b00e86qdmi
+  expires_at: 2038-01-01 00:00:00
+
+user_foo_in_sharing_group:
+  api_client: untrusted
+  user: user_foo_in_sharing_group
+  api_token: 2p1pou8p4ls208mcbedeewlotghppenobcyrmyhq8pyf51xd8u
+  expires_at: 2038-01-01 00:00:00
+
+user1_with_load:
+  api_client: untrusted
+  user: user1_with_load
+  api_token: 1234k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi
+  expires_at: 2038-01-01 00:00:00
+
+fuse:
+  api_client: untrusted
+  user: fuse
+  api_token: 4nagbkv8eap0uok7pxm72nossq5asihls3yn5p4xmvqx5t5e7p
+  expires_at: 2038-01-01 00:00:00
+
diff --git a/services/api/test/fixtures/api_clients.yml b/services/api/test/fixtures/api_clients.yml
new file mode 100644 (file)
index 0000000..79bddf0
--- /dev/null
@@ -0,0 +1,15 @@
+# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
+
+trusted_workbench:
+  uuid: zzzzz-ozdt8-teyxzyd8qllg11h
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Official Workbench
+  url_prefix: https://official-workbench.local/
+  is_trusted: true
+
+untrusted:
+  uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Untrusted
+  url_prefix: https://untrusted.local/
+  is_trusted: false
diff --git a/services/api/test/fixtures/authorized_keys.yml b/services/api/test/fixtures/authorized_keys.yml
new file mode 100644 (file)
index 0000000..b0103fa
--- /dev/null
@@ -0,0 +1,31 @@
+active:
+  uuid: zzzzz-fngyi-12nc9ov4osp8nae
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  authorized_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  key_type: SSH
+  name: active
+  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+8pc/xNohU3Mo2pAieLohLJcWy9OmNOnsEWlegYYoeynkczimicKRmB2iP50v2oKrtshIXwigfU26b0rGEJayFvsA7FCstz5G/tJy3YJGnQUDmrQBuB8SsQDL/O0Nnh8B8XmKSlxuv3FxLyPhUmcxxjIUIEMWVMlIKAfzmySsPby/QREJffUkFPa+luNkOVd5cyvwd6dnl0SLbrqZgcF3fbkOLDVgv3oceIYLjcy/SjqGR4wtGWHFFuna0M2/5YEvWpxD/HNO3WkFEdlAUEEWpvd/u3bmHq2p7ADbaX9ZaNDb8YbjFIOUxaJh+Vf0V6nDhEnUPylzM07F3fnvXQM53Xu5oYA6cp0Com61MBaXUDwM/w6PS2RtF8CG3ICMs5AsIy+Cnsuowj3fRlK29dgZ7K2pYRV2SlQj4vxjwpUcQCL/TFv31VnCMFKQBqmqh8iwZV3U6LLc3cwL9COXnIPF4lXjODL3geWsBNXo3hfoj6qD+2/+9/zOZUtGbQXlBmNC/wG/cK1A1L4S9docZT4QAiaSCdwcLB68hIvQMEOpffoeQhNZj0SddLLdEyjJY6rfWjbmnV68TzXoDz26hoPtagD+wvHOxz3D8BQ9RIqfNI1jNlwVkoKNVfszIPmESwJCu99+6TnyJl4923MTEXNOrJ7LgVUemWchOlkTDINuw== active-user@arvados.local
+
+admin:
+  uuid: zzzzz-fngyi-g290j3i3u701duh
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  authorized_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  key_type: SSH
+  name: admin
+  public_key: ssh-dss AAAAB3NzaC1kc3MAAACBAKy1IDMGwa7/Yjas77vLSShBE3SzpPXqXu6nRMC9zdIoMdctjhfP+GOOyQQP12rMs16NYmfdOxX+sa2t9syI/8NhDxTmNbHVw2jHimC6SL02v8WHDIw2vaBCVN+CHdeYbZsBB/8/M+2PO3uUWbr0TjoXcxrKYScS/aTTjSAWRg4ZAAAAFQDR/xAdrewj1ORNIQs+kWWdjmiO0wAAAIBC+G92r2ZeGaHLCMI0foKnfuQzg9fKp5krEvE6tvRNju7iOqtB9xe1qsAqr6GPZQjfSrNPac6T1pxMoh+an4PfNs5xgBIpvy93oqALd4maQt6483vsIyVCw6nQD7s/8IpIHpwxFEFs5/5moYxzY64eY0ldSXJwvPsrBTruhuUdugAAAIBut96rWQYTnYUdngyUK9EoJzgKn3l7gg0IQoFC4hS96D8vUm0wIdSEQHt01pSc0KR1Nnb4JrnNz/qCH45wOy5oB9msQ/2Pq2brTDZJcIPcN1LbMCps9PetUruz1OjK1NzDuLmvsrP3GBLxJrtmrCoKHLzPZ6QSefW0OymFgaDFGg==
+
+spectator:
+  uuid: zzzzz-fngyi-3uze1ipbnz2c2c2
+  owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  authorized_user_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  key_type: SSH
+  name: spectator
+  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJK4hxmgXzg1gty+91JfkpgikAZxTvFTQoaFUJYTHIygz2V3FgU64NkK3yfwh+bhs7n8YIMftuCHfojKEJTtedbiv/mYpItetzdOwYONCGSEk1VnfipGhnFvL7FZDESTxLN9KNve3ZmZh8HvO6s8fdlTlqTTNKpsdwLiQn2s3W1TWvru/NP504MD5qPeZ4+8jZEh/uiuRaeXqPDAlE9QGPV4FRAA1xo0dBZIrRMwQC8kOttq/i2pLgHq1xW9p4J23oV68O/kkeBb7VwrX3Av/M61kvRsP8tA5gqh+HMKVO2qTP4yG6eGkAobIokQAcyZetPQIDmfVeoB0NzwPfAy4r
+
+project_viewer:
+  uuid: zzzzz-fngyi-5d3av1396niwcej
+  owner_uuid: zzzzz-tpzed-projectviewer1a
+  authorized_user_uuid: zzzzz-tpzed-projectviewer1a
+  key_type: SSH
+  name: project_viewer
+  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPkOJMQzT9n6RousrLMU7c/KFKTI7I5JifDIEtGJJ1MMZW0GVoxtXALU90HcaRjEOwGPvQPxj7IDYqXs2N9uvm8SUWJMiz6c8NIjhGTkUoOnTFl4E9YTvkkKNs0P+3eT1Y+6zfTcFJHKP3AR4kZX+oiPHowRpCIlnLjXCFxX+E+YI554A7bS4yfOZO9lf6vtiT9I+6EqxC8a0hzZauPC1ZC3d/AFgBnrXJ2fBlAEySznru39quHN1u3v4qHTyaO2pDbG6vdI6O3JDCXCJKRv/B2FLuLTlzB0YesM1FiE6w8QgPxqb42B+uWTZb969UZliH8Pzw/mscOLAjmARDC02z
diff --git a/services/api/test/fixtures/collections.yml b/services/api/test/fixtures/collections.yml
new file mode 100644 (file)
index 0000000..f28606a
--- /dev/null
@@ -0,0 +1,395 @@
+user_agreement:
+  uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  portable_data_hash: b519d9cb706a29fc7ea24dbea2f05851+93
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T19:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2013-12-26T19:22:54Z
+  updated_at: 2013-12-26T19:22:54Z
+  manifest_text: ". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:GNU_General_Public_License,_version_3.pdf\n"
+  name: user_agreement
+
+collection_owned_by_active:
+  uuid: zzzzz-4zz18-bv31uwvy3neko21
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: owned_by_active
+
+foo_file:
+  uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: foo_file
+
+bar_file:
+  uuid: zzzzz-4zz18-ehbhgtheo8909or
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: bar_file
+
+baz_file:
+  uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: baz_file
+
+multilevel_collection_1:
+  uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
+  portable_data_hash: 1fd08fc162a5c6413070a8bd0bffc818+150
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 0:0:file1 0:0:file2 0:0:file3\n./dir1 0:0:file1 0:0:file2 0:0:file3\n./dir1/subdir 0:0:file1 0:0:file2 0:0:file3\n./dir2 0:0:file1 0:0:file2 0:0:file3\n"
+  name: multilevel_collection_1
+
+multilevel_collection_2:
+  uuid: zzzzz-4zz18-45xf9hw1sxkhl6q
+  # All of this collection's files are deep in subdirectories.
+  portable_data_hash: 80cf6dd2cf079dd13f272ec4245cb4a8+48
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: "./dir1/sub1 0:0:a 0:0:b\n./dir2/sub2 0:0:c 0:0:d\n"
+  name: multilevel_collection_2
+
+docker_image:
+  uuid: zzzzz-4zz18-1v45jub259sjjgb
+  # This Collection has links with Docker image metadata.
+  portable_data_hash: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  owner_uuid: qr1hi-tpzed-000000000000000
+  created_at: 2014-06-11T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-11T17:22:54Z
+  updated_at: 2014-06-11T17:22:54Z
+  manifest_text: ". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\n"
+  name: docker_image
+
+unlinked_docker_image:
+  uuid: zzzzz-4zz18-d0d8z5wofvfgwad
+  # This Collection contains a file that looks like a Docker image,
+  # but has no Docker metadata links pointing to it.
+  portable_data_hash: 9ae44d5792468c58bcf85ce7353c7027+124
+  owner_uuid: qr1hi-tpzed-000000000000000
+  created_at: 2014-06-11T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-11T17:22:54Z
+  updated_at: 2014-06-11T17:22:54Z
+  manifest_text: ". fca529cfe035e3e384563ee55eadbb2f+67108863 0:67108863:bcd02158b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\n"
+  name: unlinked_docker_image
+
+empty:
+  uuid: zzzzz-4zz18-gs9ooj1h9sd5mde
+  # Empty collection owned by anonymous_group is added with rake db:seed.
+  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-11T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-11T17:22:54Z
+  updated_at: 2014-06-11T17:22:54Z
+  manifest_text: ""
+  name: empty_collection
+
+foo_collection_in_aproject:
+  uuid: zzzzz-4zz18-fy296fx3hot09f7
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
+
+user_agreement_in_anonymously_accessible_project:
+  uuid: zzzzz-4zz18-uukreo9rbgwsujr
+  portable_data_hash: b519d9cb706a29fc7ea24dbea2f05851+93
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  manifest_text: ". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:GNU_General_Public_License,_version_3.pdf\n"
+  name: GNU General Public License, version 3
+
+baz_collection_name_in_asubproject:
+  uuid: zzzzz-4zz18-lsitwcf548ui4oe
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: "zzzzz-4zz18-lsitwcf548ui4oe added sometime"
+
+empty_collection_name_in_active_user_home_project:
+  uuid: zzzzz-4zz18-5qa38qghh1j3nvv
+  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  manifest_text: ""
+  name: Empty collection
+
+baz_file_in_asubproject:
+  uuid: zzzzz-4zz18-0mri2x4u7ftngez
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: baz_file
+
+collection_to_move_around_in_aproject:
+  uuid: zzzzz-4zz18-0mri2x4u7ft1234
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: collection_to_move_around
+
+expired_collection:
+  uuid: zzzzz-4zz18-mto52zx1s7sn3ih
+  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  expires_at: 2001-01-01T00:00:00Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\n"
+  name: expired_collection
+
+collection_expires_in_future:
+  uuid: zzzzz-4zz18-padkqo7yb8d9i3j
+  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  expires_at: 2038-01-01T00:00:00Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\n"
+  name: collection_expires_in_future
+
+# a collection with a log file that can be parsed by the log viewer
+# This collection hash matches the following log text:
+#    2014-01-01_12:00:01 zzzzz-8i9sb-abcdefghijklmno 0  log message 1
+#    2014-01-01_12:00:02 zzzzz-8i9sb-abcdefghijklmno 0  log message 2
+#    2014-01-01_12:00:03 zzzzz-8i9sb-abcdefghijklmno 0  log message 3
+#
+real_log_collection:
+  uuid: zzzzz-4zz18-op4e2lbej01tcvu
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-09-01 12:00:00
+  portable_data_hash: 0b9a7787660e1fce4a93f33e01376ba6+81
+  manifest_text: ". cdd549ae79fe6640fa3d5c6261d8303c+195 0:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt\n"
+  name: real_log_collection
+
+collection_in_home_project_with_same_name_as_in_aproject:
+  uuid: zzzzz-4zz18-12342x4u7ftabcd
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: collection_with_same_name_in_aproject_and_home_project
+
+collection_in_aproject_with_same_name_as_in_home_project:
+  uuid: zzzzz-4zz18-56782x4u7ftefgh
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: collection_with_same_name_in_aproject_and_home_project
+
+collection_owned_by_foo:
+  uuid: zzzzz-4zz18-50surkhkbhsp31b
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  created_at: 2014-02-03T17:22:54Z
+  name: collection_owned_by_foo
+
+collection_to_remove_from_subproject:
+  # The Workbench tests remove this from subproject.
+  uuid: zzzzz-4zz18-subprojgonecoll
+  portable_data_hash: 2386ca6e3fffd4be5e197a72c6c80fb2+51
+  manifest_text: ". 8258b505536a9ab47baa2f4281cb932a+9 0:9:missingno\n"
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-10-15T10:45:00
+  name: Collection to remove from subproject
+
+collection_with_files_in_subdir:
+  uuid: zzzzz-4zz18-filesinsubdir00
+  name: collection_files_in_subdir
+  portable_data_hash: 85877ca2d7e05498dd3d109baf2df106+95
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 85877ca2d7e05498dd3d109baf2df106+95+A3a4e26a366ee7e4ed3e476ccf05354761be2e4ae@545a9920 0:95:file_in_subdir1\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file1_in_subdir3.txt 32:32:file2_in_subdir3.txt\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file1_in_subdir4.txt 32:32:file2_in_subdir4.txt"
+
+graph_test_collection1:
+  uuid: zzzzz-4zz18-bv31uwvy3neko22
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: bar_file
+
+graph_test_collection2:
+  uuid: zzzzz-4zz18-uukreo9rbgwsujx
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  portable_data_hash: 65b17c95fdbc9800fc48acda4e9dcd0b+93
+  manifest_text: ". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:FOO_General_Public_License,_version_3.pdf\n"
+  name: "FOO General Public License, version 3"
+
+graph_test_collection3:
+  uuid: zzzzz-4zz18-uukreo9rbgwsujj
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: "baz file"
+
+collection_1_owned_by_fuse:
+  uuid: zzzzz-4zz18-ovx05bfzormx3bg
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: "collection #1 owned by FUSE"
+
+collection_2_owned_by_fuse:
+  uuid: zzzzz-4zz18-8ubpy4w74twtwzr
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: "collection #2 owned by FUSE"
+
+collection_in_fuse_project:
+  uuid: zzzzz-4zz18-vx4mtkjqfrb534f
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
+  created_at: 2014-02-03T17:22:54Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-02-03T17:22:54Z
+  updated_at: 2014-02-03T17:22:54Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: "collection in FUSE project"
+
+collection_with_no_name_in_aproject:
+  uuid: zzzzz-4zz18-00000nonamecoll
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+
+collection_to_search_for_in_aproject:
+  uuid: zzzzz-4zz18-abcd6fx123409f7
+  portable_data_hash: 5bd9c1ad0bc8c7f34be170a7b7b39089+45
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  manifest_text: ". juku76584cc2f85cedef654fjyhtgimh+3 0:3:foo\n"
+  name: "zzzzz-4zz18-abcd6fx123409f7 used to search with any"
+
+upload_sandbox:
+  uuid: zzzzz-4zz18-js48y3ykkfdfjd3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-12-09 15:03:16
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-12-09 15:03:16
+  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0
+  updated_at: 2014-12-09 15:03:16
+  manifest_text: ''
+  name: upload sandbox
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# collections in project_with_10_collections
+<% for i in 1..10 do %>
+collection_<%=i%>_of_10:
+  name: Collection_<%= i %>
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  uuid: zzzzz-4zz18-10gneyn6brkx<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-0010collections
+  created_at: <%= i.minute.ago.to_s(:db) %>
+<% end %>
+
+# collections in project_with_201_collections
+<% for i in 1..201 do %>
+collection_<%=i%>_of_201:
+  name: Collection_<%= i %>
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  uuid: zzzzz-4zz18-201gneyn6brd<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-0201collections
+  created_at: <%= i.minute.ago.to_s(:db) %>
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/groups.yml b/services/api/test/fixtures/groups.yml
new file mode 100644 (file)
index 0000000..86815c0
--- /dev/null
@@ -0,0 +1,238 @@
+public:
+  uuid: zzzzz-j7d0g-it30l961gq3t0oi
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  name: Public
+  description: Public Group
+  group_class: role
+
+private:
+  uuid: zzzzz-j7d0g-rew6elm53kancon
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Private
+  description: Private Group
+  group_class: role
+
+private_and_can_read_foofile:
+  uuid: zzzzz-j7d0g-22xp1wpjul508rk
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Private and Can Read Foofile
+  description: Another Private Group
+  group_class: role
+
+activeandfriends:
+  uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:02:18.481582707 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:02:18.481319501 Z
+  name: Active User and friends
+  description:
+  updated_at: 2014-08-22 14:02:18.481166435 Z
+  group_class: role
+
+system_group:
+  uuid: zzzzz-j7d0g-000000000000000
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: System Private
+  description: System-owned Group
+  group_class: role
+
+empty_lonely_group:
+  uuid: zzzzz-j7d0g-jtp06ulmvsezgyu
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Empty
+  description: Empty Group
+  group_class: role
+
+all_users:
+  uuid: zzzzz-j7d0g-fffffffffffffff
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: All users
+  description: All users
+  group_class: role
+
+testusergroup_admins:
+  uuid: zzzzz-j7d0g-48foin4vonvc2at
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Administrators of a subset of users
+
+aproject:
+  uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: A Project
+  description: Test project belonging to active user
+  group_class: project
+
+asubproject:
+  uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: A Subproject
+  description: "Test project belonging to active user's first test project"
+  group_class: project
+
+future_project_viewing_group:
+  uuid: zzzzz-j7d0g-futrprojviewgrp
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: Future Project Viewing Group
+  description: "Group used to test granting Group Project viewing"
+  group_class: role
+
+bad_group_has_ownership_cycle_a:
+  uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+  owner_uuid: zzzzz-j7d0g-0077nzts8c178lw
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-05-03 18:50:08 -0400
+  modified_at: 2014-05-03 18:50:08 -0400
+  updated_at: 2014-05-03 18:50:08 -0400
+  name: Owned by bad group b
+
+bad_group_has_ownership_cycle_b:
+  uuid: zzzzz-j7d0g-0077nzts8c178lw
+  owner_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-05-03 18:50:08 -0400
+  modified_at: 2014-05-03 18:50:08 -0400
+  updated_at: 2014-05-03 18:50:08 -0400
+  name: Owned by bad group a
+
+anonymous_group:
+  uuid: zzzzz-j7d0g-anonymouspublic
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Anonymous group
+  description: Anonymous group
+
+anonymously_accessible_project:
+  uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  name: Unrestricted public data
+  group_class: project
+  description: An anonymously accessible project
+
+active_user_has_can_manage:
+  uuid: zzzzz-j7d0g-ptt1ou6a9lxrv07
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  name: Active user has can_manage
+
+# Group for testing granting permission between users who share a group.
+group_for_sharing_tests:
+  uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+  owner_uuid: zzzzz-tpzed-000000000000000
+  name: Group for sharing tests
+  description: Users who can share objects with each other
+  group_class: role
+
+empty_project:
+  uuid: zzzzz-j7d0g-9otoxmrksam74q6
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-12-16 15:56:27.967534940 Z
+  modified_by_client_uuid: ~
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-12-16 15:56:27.967358199 Z
+  name: Empty project
+  description: ~
+  updated_at: 2014-12-16 15:56:27.967242142 Z
+  group_class: project
+
+project_with_10_collections:
+  uuid: zzzzz-j7d0g-0010collections
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 10 collections
+  description: This will result in one page in the display
+  group_class: project
+
+project_with_201_collections:
+  uuid: zzzzz-j7d0g-0201collections
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 201 collections
+  description: This will result in two pages in the display
+  group_class: project
+
+project_with_10_pipelines:
+  uuid: zzzzz-j7d0g-000010pipelines
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 10 pipelines
+  description: project with 10 pipelines
+  group_class: project
+
+project_with_2_pipelines_and_60_jobs:
+  uuid: zzzzz-j7d0g-nnjobspipelines
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 2 pipelines and 60 jobs
+  description: This will result in two pages in the display
+  group_class: project
+
+project_with_25_pipelines:
+  uuid: zzzzz-j7d0g-000025pipelines
+  owner_uuid: zzzzz-tpzed-user1withloadab
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-user1withloadab
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: project with 25 pipelines
+  description: project with 25 pipelines
+  group_class: project
+
+fuse_owned_project:
+  uuid: zzzzz-j7d0g-0000ownedbyfuse
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  name: FUSE Test Project
+  description: Test project belonging to FUSE test user
+  group_class: project
+
+# This wouldn't pass model validation, but it enables a workbench
+# infinite-loop test. See #4389
+project_owns_itself:
+  uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t
+  owner_uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t
+  created_at: 2014-11-05 22:31:24.258424340 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: 6pbr1-tpzed-000000000000000
+  modified_at: 2014-11-05 22:31:24.258242890 Z
+  name: zzzzz-j7d0g-7rqh7hdshd5yp5t
+  description: ~
+  updated_at: 2014-11-05 22:31:24.258093171 Z
+  group_class: project
diff --git a/services/api/test/fixtures/jobs.yml b/services/api/test/fixtures/jobs.yml
new file mode 100644 (file)
index 0000000..888cb2a
--- /dev/null
@@ -0,0 +1,389 @@
+running:
+  uuid: zzzzz-8i9sb-pshmckwoma9plh7
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+
+running_cancelled:
+  uuid: zzzzz-8i9sb-4cf0nhn6xte809j
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: <%= 1.minute.ago.to_s(:db) %>
+  cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Cancelled
+
+uses_nonexistent_script_version:
+  uuid: zzzzz-8i9sb-7m339pu0x9mla88
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  created_at: <%= 5.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: false
+  success: true
+  output: d41d8cd98f00b204e9800998ecf8427e+0
+  priority: 0
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+
+foobar:
+  uuid: zzzzz-8i9sb-aceg2bnq7jt7kon
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  script_parameters:
+    input: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: false
+  success: true
+  output: fa7aeb5140e2848d39b416daeef4ffc5+45
+  priority: 0
+  log: ea10d51bcf88862dbcc36eb292017dfd+45
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+
+barbaz:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: 1
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: false
+  success: true
+  repository: foo
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  priority: 0
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 0
+    done: 1
+  runtime_constraints: {}
+  state: Complete
+
+runningbarbaz:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: 1
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: <%= 2.minute.ago.to_s(:db) %>
+  running: true
+  success: ~
+  repository: foo
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  priority: 0
+  log: d41d8cd98f00b204e9800998ecf8427e+0
+  is_locked_by_uuid: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 1
+    done: 0
+  runtime_constraints: {}
+  state: Running
+
+previous_job_run:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  success: true
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  state: Complete
+
+previous_docker_job_run:
+  uuid: zzzzz-8i9sb-k6emstgk4kw4yhi
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  runtime_constraints:
+    docker_image: arvados/test
+  success: true
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  docker_image_locator: fa3c1a9cb6783f85f2ecda037e07b8c3+167
+  state: Complete
+
+previous_job_run_with_arvados_sdk_version:
+  uuid: zzzzz-8i9sb-eoo0321or2dw2jg
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: foo
+  script: hash
+  script_version: 31ce37fe365b3dc204300a3e4c396ad333ed0556
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  runtime_constraints:
+    arvados_sdk_version: commit2
+  arvados_sdk_version: 00634b2b8a492d6f121e3cf1d6587b821136a9a7
+  success: true
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+  state: Complete
+
+previous_job_run_no_output:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykppp
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "2"
+  success: true
+  output: ~
+  state: Complete
+
+nondeterminisic_job_run:
+  uuid: zzzzz-8i9sb-cjs4pklxxjykyyy
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: foo
+  script: hash2
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    an_integer: "1"
+  success: true
+  nondeterministic: true
+  state: Complete
+
+nearly_finished_job:
+  uuid: zzzzz-8i9sb-2gx6rz0pjl033w3
+  created_at: <%= 14.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  repository: arvados
+  script: doesnotexist
+  script_version: 309e25a64fe994867db8459543af372f850e25b9
+  script_parameters:
+    input: b519d9cb706a29fc7ea24dbea2f05851+249025
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  running: true
+  success: ~
+  tasks_summary:
+    failed: 0
+    todo: 0
+    running: 1
+    done: 0
+  runtime_constraints: {}
+  state: Complete
+
+queued:
+  uuid: zzzzz-8i9sb-grx15v5mjnsyxk7
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  started_at: ~
+  finished_at: ~
+  script: foo
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  script_parameters: {}
+  running: ~
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: ~
+  tasks_summary: {}
+  runtime_constraints: {}
+  state: Queued
+
+# A job with a log collection that can be parsed by the log viewer.
+job_with_real_log:
+  uuid: zzzzz-8i9sb-0vsrcqi7whchuil
+  created_at: 2014-09-01 12:00:00
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  log: 0b9a7787660e1fce4a93f33e01376ba6+81
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  state: Complete
+
+cancelled:
+  uuid: zzzzz-8i9sb-4cf0abc123e809j
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_at: <%= 1.minute.ago.to_s(:db) %>
+  cancelled_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  cancelled_by_client_uuid: zzzzz-ozdt8-obw7foaks3qjyej
+  created_at: <%= 4.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: false
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Cancelled
+
+job_in_subproject:
+  uuid: zzzzz-8i9sb-subprojectjob01
+  created_at: 2014-10-15 12:00:00
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  log: ~
+  repository: foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+
+running_will_be_completed:
+  uuid: zzzzz-8i9sb-rshmckwoma9pjh8
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  cancelled_at: ~
+  cancelled_by_user_uuid: ~
+  cancelled_by_client_uuid: ~
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  started_at: <%= 3.minute.ago.to_s(:db) %>
+  finished_at: ~
+  script_version: 1de84a854e2b440dc53bf42f8548afa4c17da332
+  running: true
+  success: ~
+  output: ~
+  priority: 0
+  log: ~
+  is_locked_by_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  tasks_summary:
+    failed: 0
+    todo: 3
+    running: 1
+    done: 1
+  runtime_constraints: {}
+  state: Running
+
+graph_stage1:
+  uuid: zzzzz-8i9sb-graphstage10000
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  repository: foo
+  script: hash
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  output: fa7aeb5140e2848d39b416daeef4ffc5+45
+
+graph_stage2:
+  uuid: zzzzz-8i9sb-graphstage20000
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  repository: foo
+  script: hash2
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff"
+  output: 65b17c95fdbc9800fc48acda4e9dcd0b+93
+
+graph_stage3:
+  uuid: zzzzz-8i9sb-graphstage30000
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  repository: foo
+  script: hash2
+  script_version: 4fe459abe02d9b365932b8f5dc419439ab4e2577
+  state: Complete
+  script_parameters:
+    input: fa7aeb5140e2848d39b416daeef4ffc5+45
+    input2: "stuff2"
+  output: ea10d51bcf88862dbcc36eb292017dfd+45
+
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# jobs in project_with_2_pipelines_and_60_jobs
+<% for i in 1..60 do %>
+job_<%=i%>_of_60:
+  uuid: zzzzz-8i9sb-oneof100jobs<%= i.to_s.rjust(3, '0') %>
+  created_at: <%= ((i+5)/5).minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-j7d0g-nnjobspipelines
+  script_version: 7def43a4d3f20789dda4700f703b5514cc3ed250
+  state: Complete
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/keep_disks.yml b/services/api/test/fixtures/keep_disks.yml
new file mode 100644 (file)
index 0000000..462b244
--- /dev/null
@@ -0,0 +1,29 @@
+nonfull:
+  uuid: zzzzz-penuu-5w2o2t1q5wy7fhn
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
+  keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+  last_read_at: <%= 1.minute.ago.to_s(:db) %>
+  last_write_at: <%= 2.minute.ago.to_s(:db) %>
+  last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+  ping_secret: z9xz2tc69dho51g1dmkdy5fnupdhsprahcwxdbjs0zms4eo6i
+
+full:
+  uuid: zzzzz-penuu-4kmq58ui07xuftx
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  node_uuid: zzzzz-7ekkf-53y36l1lu5ijveb
+  keep_service_uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+  last_read_at: <%= 1.minute.ago.to_s(:db) %>
+  last_write_at: <%= 2.day.ago.to_s(:db) %>
+  last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+  ping_secret: xx3ieejcufbjy4lli6yt5ig4e8w5l2hhgmbyzpzuq38gri6lj
+
+nonfull2:
+  uuid: zzzzz-penuu-1ydrih9k2er5j11
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  node_uuid: zzzzz-7ekkf-2z3mc76g2q73aio
+  keep_service_uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
+  last_read_at: <%= 1.minute.ago.to_s(:db) %>
+  last_write_at: <%= 2.minute.ago.to_s(:db) %>
+  last_ping_at: <%= 3.minute.ago.to_s(:db) %>
+  ping_secret: 4rs260ibhdum1d242xy23qv320rlerc0j7qg9vyqnchbgmjeek
diff --git a/services/api/test/fixtures/keep_services.yml b/services/api/test/fixtures/keep_services.yml
new file mode 100644 (file)
index 0000000..f668cbc
--- /dev/null
@@ -0,0 +1,23 @@
+keep0:
+  uuid: zzzzz-bi6l4-6zhilxar6r8ey90
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  service_host: keep0.zzzzz.arvadosapi.com
+  service_port: 25107
+  service_ssl_flag: false
+  service_type: disk
+
+keep1:
+  uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  service_host: keep1.zzzzz.arvadosapi.com
+  service_port: 25107
+  service_ssl_flag: false
+  service_type: disk
+
+proxy:
+  uuid: zzzzz-bi6l4-h0a0xwut9qa6g3a
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  service_host: keep.zzzzz.arvadosapi.com
+  service_port: 25333
+  service_ssl_flag: true
+  service_type: proxy
diff --git a/services/api/test/fixtures/links.yml b/services/api/test/fixtures/links.yml
new file mode 100644 (file)
index 0000000..4d576a2
--- /dev/null
@@ -0,0 +1,793 @@
+user_agreement_required:
+  uuid: zzzzz-o0j2j-j2qe76q7s3c8aro
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T19:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2013-12-26T19:52:21Z
+  updated_at: 2013-12-26T19:52:21Z
+  tail_uuid: zzzzz-tpzed-000000000000000
+  link_class: signature
+  name: require
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+user_agreement_readable:
+  uuid: zzzzz-o0j2j-qpf60gg4fwjlmex
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+active_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-ctbysaduejxfrs5
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+active_user_can_manage_group:
+  uuid: zzzzz-o0j2j-3sa30nd3bqn1msh
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-02-03 15:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-02-03 15:42:26 -0800
+  updated_at: 2014-02-03 15:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-ptt1ou6a9lxrv07
+  properties: {}
+
+user_agreement_signed_by_active:
+  uuid: zzzzz-o0j2j-4x85a69tqlrud1z
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: signature
+  name: click
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+user_agreement_signed_by_inactive:
+  uuid: zzzzz-o0j2j-lh7er2o3k6bmetw
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  link_class: signature
+  name: click
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties: {}
+
+spectator_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-0s8ql1redzf8kvn
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+inactive_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-osckxpy5hl5fjk5
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-x9kqpd79egh49c7
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+inactive_signed_ua_user_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-qkhyjcr6tidk652
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2013-12-26T20:52:21Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  modified_at: 2013-12-26T20:52:21Z
+  updated_at: 2013-12-26T20:52:21Z
+  tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+foo_file_readable_by_active:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw22r
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+foo_file_readable_by_active_duplicate_permission:
+  uuid: zzzzz-o0j2j-2qlmhgothiur55r
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+foo_file_readable_by_active_redundant_permission_via_private_group:
+  uuid: zzzzz-o0j2j-5s8ry7sn6bwxb7w
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-22xp1wpjul508rk
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+foo_file_readable_by_aproject:
+  uuid: zzzzz-o0j2j-fp1d8395ldqw22p
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w
+  properties: {}
+
+bar_file_readable_by_active:
+  uuid: zzzzz-o0j2j-8hppiuduf8eqdng
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-ehbhgtheo8909or
+  properties: {}
+
+bar_file_readable_by_spectator:
+  uuid: zzzzz-o0j2j-0mhldkqozsltcli
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-ehbhgtheo8909or
+  properties: {}
+
+baz_file_publicly_readable:
+  uuid: zzzzz-o0j2j-132ne3lk954vtoc
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-y9vne9npefyxh8g
+  properties: {}
+
+barbaz_job_readable_by_spectator:
+  uuid: zzzzz-o0j2j-cpy7p41hpk531e1
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-8i9sb-cjs4pklxxjykyuq
+  properties: {}
+
+runningbarbaz_job_readable_by_spectator:
+  uuid: zzzzz-o0j2j-cpy7p41hpk531e2
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-8i9sb-cjs4pklxxjykyuj
+  properties: {}
+
+arvados_repository_readable_by_all_users:
+  uuid: zzzzz-o0j2j-allcanreadarvrp
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-j7d0g-fffffffffffffff
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-s0uqq-arvadosrepo0123
+  properties: {}
+
+foo_repository_readable_by_spectator:
+  uuid: zzzzz-o0j2j-cpy7p41hpk5xxx
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-s0uqq-382brsig8rp3666
+  properties: {}
+
+foo_repository_manageable_by_active:
+  uuid: zzzzz-o0j2j-8tdfjd8g0s4rn1k
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-s0uqq-382brsig8rp3666
+  properties: {}
+
+repository3_readable_by_active:
+  uuid: zzzzz-o0j2j-43iem9bdtefa76g
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-09-23 13:52:46 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-09-23 13:52:46 -0400
+  updated_at: 2014-09-23 13:52:46 -0400
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-s0uqq-38orljkqpyo1j61
+  properties: {}
+
+miniadmin_user_is_a_testusergroup_admin:
+  uuid: zzzzz-o0j2j-38vvkciz7qc12j9
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-04-01 13:53:33 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-04-01 13:53:33 -0400
+  updated_at: 2014-04-01 13:53:33 -0400
+  tail_uuid: zzzzz-tpzed-2bg9x0oeydcw5hm
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-48foin4vonvc2at
+  properties: {}
+
+rominiadmin_user_is_a_testusergroup_admin:
+  uuid: zzzzz-o0j2j-6b0hz5hr107mc90
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-04-01 13:53:33 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-04-01 13:53:33 -0400
+  updated_at: 2014-04-01 13:53:33 -0400
+  tail_uuid: zzzzz-tpzed-4hvxm4n25emegis
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-48foin4vonvc2at
+  properties: {}
+
+testusergroup_can_manage_active_user:
+  uuid: zzzzz-o0j2j-2vaqhxz6hsf4k1d
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-04-01 13:56:10 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-04-01 13:56:10 -0400
+  updated_at: 2014-04-01 13:56:10 -0400
+  tail_uuid: zzzzz-j7d0g-48foin4vonvc2at
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  properties: {}
+
+test_timestamps:
+  uuid: zzzzz-o0j2j-4abnk2w5t86x4uc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-15 13:17:14 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-15 13:17:14 -0400
+  updated_at: 2014-04-15 13:17:14 -0400
+  link_class: test
+  name: test
+  properties: {}
+
+admin_can_write_aproject:
+  # Yes, this permission is effectively redundant.
+  # We use it to test that other project admins can see
+  # all the project's sharing.
+  uuid: zzzzz-o0j2j-adminmgsproject
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  link_class: permission
+  name: can_write
+  head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  properties: {}
+
+project_viewer_can_read_project:
+  uuid: zzzzz-o0j2j-projviewerreadp
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-projectviewer1a
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  properties: {}
+
+subproject_admin_can_manage_subproject:
+  uuid: zzzzz-o0j2j-subprojadminlnk
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-10-15 10:00:00 -0000
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-10-15 10:00:00 -0000
+  updated_at: 2014-10-15 10:00:00 -0000
+  tail_uuid: zzzzz-tpzed-subprojectadmin
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  properties: {}
+
+foo_collection_tag:
+  uuid: zzzzz-o0j2j-eedahfaho8aphiv
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-fy296fx3hot09f7
+  link_class: tag
+  name: foo_tag
+  properties: {}
+
+active_user_can_manage_bad_group_cx2al9cqkmsf1hs:
+  uuid: zzzzz-o0j2j-ezv55ahzc9lvjwe
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-03 18:50:08 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-03 18:50:08 -0400
+  updated_at: 2014-05-03 18:50:08 -0400
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_manage
+  head_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs
+  properties: {}
+
+multilevel_collection_1_readable_by_active:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw22j
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-pyw8yp9g3pr7irn
+  properties: {}
+
+has_symbol_keys_in_database_somehow:
+  uuid: zzzzz-o0j2j-enl1wg58310loc6
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-28 16:24:02.314722162 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-28 16:24:02.314484982 Z
+  tail_uuid: ~
+  link_class: test
+  name: ~
+  head_uuid: ~
+  properties:
+    :foo: "bar"
+    baz:
+      - waz
+      - :waz
+      - :waz
+      - 1
+      - ~
+      - false
+      - true
+  updated_at: 2014-05-28 16:24:02.314296411 Z
+
+bug2931_link_with_null_head_uuid:
+  uuid: zzzzz-o0j2j-uru66qok2wruasb
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-30 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-30 14:30:00.184019565 Z
+  updated_at: 2014-05-30 14:30:00.183829316 Z
+  link_class: permission
+  name: bug2931
+  tail_uuid: ~
+  head_uuid: ~
+  properties: {}
+
+anonymous_group_can_read_anonymously_accessible_project:
+  uuid: zzzzz-o0j2j-15gpzezqjg4bc4z
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-05-30 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-05-30 14:30:00.184019565 Z
+  updated_at: 2014-05-30 14:30:00.183829316 Z
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-j7d0g-anonymouspublic
+  head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  properties: {}
+
+user_agreement_readable_by_anonymously_accessible_project:
+  uuid: zzzzz-o0j2j-o5ds5gvhkztdc8h
+  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+
+active_user_permission_to_docker_image_collection:
+  uuid: zzzzz-o0j2j-dp1d8395ldqw33s
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties: {}
+
+active_user_permission_to_unlinked_docker_image_collection:
+  uuid: zzzzz-o0j2j-g5i0sa8cr3b1psf
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-4zz18-d0d8z5wofvfgwad
+  properties: {}
+
+docker_image_collection_hash:
+  uuid: zzzzz-o0j2j-dockercollhasha
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_hash
+  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties:
+    image_timestamp: "2014-06-10T14:30:00.184019565Z"
+
+docker_image_collection_tag:
+  uuid: zzzzz-o0j2j-dockercolltagbb
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:latest
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties:
+    image_timestamp: "2014-06-10T14:30:00.184019565Z"
+
+docker_image_collection_tag2:
+  uuid: zzzzz-o0j2j-dockercolltagbc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-11 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-11 14:30:00.184019565 Z
+  updated_at: 2014-06-11 14:30:00.183829316 Z
+  link_class: docker_image_repo+tag
+  name: arvados/apitestfixture:june10
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-1v45jub259sjjgb
+  properties:
+    image_timestamp: "2014-06-10T14:30:00.184019565Z"
+
+ancient_docker_image_collection_hash:
+  # This image helps test that searches for Docker images find
+  # the latest available image: the hash is the same as
+  # docker_image_collection_hash, but it points to a different
+  # Collection and has an older image timestamp.
+  uuid: zzzzz-o0j2j-dockercollhashz
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-06-12 14:30:00.184389725 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-12 14:30:00.184019565 Z
+  updated_at: 2014-06-12 14:30:00.183829316 Z
+  link_class: docker_image_hash
+  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678
+  tail_uuid: ~
+  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y
+  properties:
+    image_timestamp: "2010-06-10T14:30:00.184019565Z"
+
+job_reader_can_read_previous_job_run:
+  # Permission link giving job_reader permission
+  # to read previous_job_run
+  uuid: zzzzz-o0j2j-8bbd851795ebafd
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-tpzed-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-905b42d1dd4a354
+  head_uuid: zzzzz-8i9sb-cjs4pklxxjykqqq
+
+job_reader_can_read_foo_repo:
+  # Permission link giving job_reader permission
+  # to read foo_repo
+  uuid: zzzzz-o0j2j-072ec05dc9487f8
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-06-13 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-tpzed-000000000000000
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-06-13 20:42:26 -0800
+  updated_at: 2014-06-13 20:42:26 -0800
+  link_class: permission
+  name: can_read
+  tail_uuid: zzzzz-tpzed-905b42d1dd4a354
+  head_uuid: zzzzz-s0uqq-382brsig8rp3666
+
+baz_collection_name_in_asubproject:
+  uuid: zzzzz-o0j2j-bazprojectname2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-04-21 15:37:48 -0400
+  updated_at: 2014-04-21 15:37:48 -0400
+  tail_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  head_uuid: ea10d51bcf88862dbcc36eb292017dfd+45
+  link_class: name
+  # This should resemble the default name assigned when a
+  # Collection is added to a Project.
+  name: "ea10d51bcf88862dbcc36eb292017dfd+45 added sometime"
+  properties: {}
+
+empty_collection_name_in_active_user_home_project:
+  uuid: zzzzz-o0j2j-i3n6m552x6tmoi4
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: name
+  name: Empty collection
+  head_uuid: d41d8cd98f00b204e9800998ecf8427e+0
+  properties: {}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+active_user_can_read_activeandfriends:
+  uuid: zzzzz-o0j2j-8184f5vk8c851ts
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:03:46.321059945 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:03:46.320865926 Z
+  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  properties: {}
+  updated_at: 2014-08-22 14:03:46.320743213 Z
+
+active_user_joined_activeandfriends:
+  uuid: zzzzz-o0j2j-t63rdd7vupqvnco
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:03:28.835064240 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:03:28.834849409 Z
+  tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  properties: {}
+  updated_at: 2014-08-22 14:03:28.834720558 Z
+
+future_project_can_read_activeandfriends:
+  uuid: zzzzz-o0j2j-bkdtnddpmwxqiza
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:04:18.811622057 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:04:18.811463859 Z
+  tail_uuid: zzzzz-tpzed-futureprojview2
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  properties: {}
+  updated_at: 2014-08-22 14:04:18.811387314 Z
+
+future_project_user_joined_activeandfriends:
+  uuid: zzzzz-o0j2j-ksl8bo92eokv332
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-08-22 14:04:24.182103355 Z
+  modified_by_client_uuid:
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-08-22 14:04:24.181939129 Z
+  tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-futureprojview2
+  properties: {}
+  updated_at: 2014-08-22 14:04:24.181799856 Z
+
+auto_setup_vm_login_username_can_login_to_test_vm:
+  uuid: zzzzz-o0j2j-i3n6m98766tmoi4
+  owner_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-xabcdjxw79nv3jz
+  link_class: permission
+  name: can_login
+  head_uuid: zzzzz-2x53u-382brsig8rp3064
+  properties: {username: 'auto_setup_vm_login'}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
+user_foo_can_read_sharing_group:
+  uuid: zzzzz-o0j2j-gdpvwvpj9kjs5in
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+
+user_foo_is_in_sharing_group:
+  uuid: zzzzz-o0j2j-bwmcf9nqwomvtny
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+
+user_bar_can_read_sharing_group:
+  uuid: zzzzz-o0j2j-23djaoza9g2zvjx
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-tpzed-n3oaj4sm5fcnwib
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+
+user_bar_is_in_sharing_group:
+  uuid: zzzzz-o0j2j-ga7fgy3xsz4hu28
+  owner_uuid: zzzzz-tpzed-000000000000000
+  tail_uuid: zzzzz-j7d0g-t4ucgncwteul7zt
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-tpzed-n3oaj4sm5fcnwib
+
+user1-with-load_member_of_all_users_group:
+  uuid: zzzzz-o0j2j-user1-with-load
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-01-24 20:42:26 -0800
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  modified_at: 2014-01-24 20:42:26 -0800
+  updated_at: 2014-01-24 20:42:26 -0800
+  tail_uuid: zzzzz-tpzed-user1withloadab
+  link_class: permission
+  name: can_read
+  head_uuid: zzzzz-j7d0g-fffffffffffffff
+  properties: {}
+
+empty_collection_name_in_fuse_user_home_project:
+  uuid: zzzzz-o0j2j-hw3mcg3c8pwo6ar
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  created_at: 2014-08-06 22:11:51.242392533 Z
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
+  modified_at: 2014-08-06 22:11:51.242150425 Z
+  tail_uuid: zzzzz-tpzed-0fusedrivertest
+  link_class: name
+  name: Empty collection
+  head_uuid: d41d8cd98f00b204e9800998ecf8427e+0
+  properties: {}
+  updated_at: 2014-08-06 22:11:51.242010312 Z
+
diff --git a/services/api/test/fixtures/logs.yml b/services/api/test/fixtures/logs.yml
new file mode 100644 (file)
index 0000000..058c387
--- /dev/null
@@ -0,0 +1,46 @@
+log1:
+  id: 1
+  uuid: zzzzz-xxxxx-pshmckwoma9plh7
+  object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  event_at: <%= 1.minute.ago.to_s(:db) %>
+
+log2: # admin changes repository2, which is owned by active user
+  id: 2
+  uuid: zzzzz-xxxxx-pshmckwoma00002
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
+  object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  event_at: <%= 2.minute.ago.to_s(:db) %>
+
+log3: # admin changes specimen owned_by_spectator
+  id: 3
+  uuid: zzzzz-xxxxx-pshmckwoma00003
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  object_uuid: zzzzz-2x53u-3b0xxwzlbzxq5yr # specimen owned_by_spectator
+  object_owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r # spectator user
+  event_at: <%= 3.minute.ago.to_s(:db) %>
+
+log4: # foo collection added, readable by active through link
+  id: 4
+  uuid: zzzzz-xxxxx-pshmckwoma00004
+  owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  object_uuid: zzzzz-4zz18-znfnqtbbv4spc3w # foo file
+  object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  event_at: <%= 4.minute.ago.to_s(:db) %>
+
+log5: # baz collection added, readable by active and spectator through group 'all users' group membership
+  id: 5
+  uuid: zzzzz-xxxxx-pshmckwoma00005
+  owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  object_uuid: zzzzz-4zz18-y9vne9npefyxh8g # baz file
+  object_owner_uuid: zzzzz-tpzed-000000000000000 # system user
+  event_at: <%= 5.minute.ago.to_s(:db) %>
+
+log_owned_by_active:
+  id: 6
+  uuid: zzzzz-xxxxx-pshmckwoma12345
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo
+  object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  event_at: <%= 2.minute.ago.to_s(:db) %>
+  summary: non-admin use can read own logs
diff --git a/services/api/test/fixtures/nodes.yml b/services/api/test/fixtures/nodes.yml
new file mode 100644 (file)
index 0000000..1511501
--- /dev/null
@@ -0,0 +1,56 @@
+busy:
+  uuid: zzzzz-7ekkf-53y36l1lu5ijveb
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute0
+  slot_number: 0
+  domain: ""
+  ip_address: 172.17.2.172
+  last_ping_at: <%= 1.minute.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: zzzzz-8i9sb-2gx6rz0pjl033w3  # nearly_finished_job
+  info:
+    ping_secret: "48dpm3b8ijyj3jkr2yczxw0844dqd2752bhll7klodvgz9bg80"
+    slurm_state: "alloc"
+
+down:
+  uuid: zzzzz-7ekkf-2vbompg3ecc6e2s
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute1
+  slot_number: 1
+  domain: ""
+  ip_address: 172.17.2.173
+  last_ping_at: <%= 1.hour.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: ~
+  info:
+    ping_secret: "2k3i71depad36ugwmlgzilbi4e8n0illb2r8l4efg9mzkb3a1k"
+
+idle:
+  uuid: zzzzz-7ekkf-2z3mc76g2q73aio
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute2
+  slot_number: 2
+  domain: ""
+  ip_address: 172.17.2.174
+  last_ping_at: <%= 2.minute.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: ~
+  info:
+    ping_secret: "69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0"
+    slurm_state: "idle"
+    total_cpu_cores: 16
+
+was_idle_now_down:
+  uuid: zzzzz-7ekkf-xuzpkdasl0uzwyz
+  owner_uuid: zzzzz-tpzed-000000000000000
+  hostname: compute3
+  slot_number: ~
+  domain: ""
+  ip_address: 172.17.2.173
+  last_ping_at: <%= 1.hour.ago.to_s(:db) %>
+  first_ping_at: <%= 23.hour.ago.to_s(:db) %>
+  job_uuid: ~
+  info:
+    ping_secret: "1bd1yi0x4lb5q4gzqqtrnq30oyj08r8dtdimmanbqw49z1anz2"
+    slurm_state: "idle"
+    total_cpu_cores: 16
diff --git a/services/api/test/fixtures/pipeline_instances.yml b/services/api/test/fixtures/pipeline_instances.yml
new file mode 100644 (file)
index 0000000..d42120a
--- /dev/null
@@ -0,0 +1,253 @@
+new_pipeline:
+  state: New
+  uuid: zzzzz-d1hrv-f4gneyn6br1xize
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+
+new_pipeline_in_subproject:
+  state: New
+  uuid: zzzzz-d1hrv-subprojpipeline
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: <%= 1.minute.ago.to_s(:db) %>
+
+has_component_with_no_script_parameters:
+  state: Ready
+  uuid: zzzzz-d1hrv-1xfj6xkicf2muk2
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 10.minute.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+
+has_component_with_empty_script_parameters:
+  state: Ready
+  uuid: zzzzz-d1hrv-jq16l10gcsnyumo
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.minute.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+
+has_component_with_completed_jobs:
+  # Test that the job "started_at" and "finished_at" fields are
+  # parsed into Time fields when rendering. This job must *not*
+  # have its own fixture; the point is to force the
+  # pipeline_instances_controller_test in Workbench to parse
+  # the "components" field.
+  state: Complete
+  uuid: zzzzz-d1hrv-i3e77t9z5y8j9cc
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+    job:
+      uuid: zzzzz-8i9sb-rft1xdewxkwgxnz
+      script_version: master
+      started_at: <%= 10.minute.ago.to_s(:db) %>
+      finished_at: <%= 9.minute.ago.to_s(:db) %>
+
+has_job:
+  name: pipeline_with_job
+  state: Ready
+  uuid: zzzzz-d1hrv-1yfj6xkidf2muk3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.1.minute.ago.to_s(:db) %>
+  components:
+   foo:
+    script: foo
+    script_version: master
+    script_parameters: {}
+    job: {
+            uuid: zzzzz-8i9sb-pshmckwoma9plh7,
+            script_version: master
+         }
+
+components_is_jobspec:
+  # Helps test that clients cope with funny-shaped components.
+  # For an example, see #3321.
+  uuid: zzzzz-d1hrv-jobspeccomponts
+  created_at: <%= 30.minute.ago.to_s(:db) %>
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  state: RunningOnServer
+  components:
+    script: foo
+    script_version: master
+    script_parameters:
+      input:
+        required: true
+        dataclass: Collection
+        title: "Foo/bar pair"
+        description: "Provide a collection containing at least two files."
+
+pipeline_with_tagged_collection_input:
+  name: pipeline_with_tagged_collection_input
+  state: Ready
+  uuid: zzzzz-d1hrv-1yfj61234abcdk3
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.1.minute.ago.to_s(:db) %>
+  components:
+    part-one:
+      script_parameters:
+        input:
+          value: zzzzz-4zz18-znfnqtbbv4spc3w
+
+pipeline_to_merge_params:
+  name: pipeline_to_merge_params
+  state: Ready
+  uuid: zzzzz-d1hrv-1yfj6dcba4321k3
+  pipeline_template_uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: <%= 3.1.minute.ago.to_s(:db) %>
+  components:
+    part-one:
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "Foo/bar pair"
+          description: "Provide a collection containing at least two files."
+    part-two:
+      script_parameters:
+        input:
+          output_of: part-one
+        integer_with_default:
+          default: 123
+        integer_with_value:
+          value: 123
+        string_with_default:
+          default: baz
+        string_with_value:
+          value: baz
+        plain_string: qux
+        array_with_default:
+          default: [1,1,2,3,5]
+        array_with_value:
+          value: [1,1,2,3,5]
+
+pipeline_with_newer_template:
+  state: Complete
+  uuid: zzzzz-d1hrv-9fm8l10i9z2kqc6
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  created_at: 2014-09-15 12:00:00
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_instance_owned_by_fuse:
+  state: Complete
+  uuid: zzzzz-d1hrv-ri9dvgkgqs9y09j
+  owner_uuid: zzzzz-tpzed-0fusedrivertest
+  pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  created_at: 2014-09-15 12:00:00
+  name: "pipeline instance owned by FUSE"
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+pipeline_instance_in_fuse_project:
+  state: Complete
+  uuid: zzzzz-d1hrv-scarxiyajtshq3l
+  owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
+  pipeline_template_uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  created_at: 2014-09-15 12:00:00
+  name: "pipeline instance in FUSE project"
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+
+
+# Test Helper trims the rest of the file
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
+
+# pipelines in project_with_10_pipelines
+<% for i in 1..10 do %>
+pipeline_<%=i%>_of_10:
+  name: pipeline_<%= i %>
+  state: Failed
+  uuid: zzzzz-d1hrv-10pipelines0<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-000010pipelines
+  created_at: <%= (2*(i-1)).hour.ago.to_s(:db) %>
+  started_at: <%= (2*(i-1)).hour.ago.to_s(:db) %>
+  finished_at: <%= (i-1).minute.ago.to_s(:db) %>
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+<% end %>
+
+# pipelines in project_with_2_pipelines_and_100_jobs
+<% for i in 1..2 do %>
+pipeline_<%=i%>_of_2_pipelines_and_100_jobs:
+  name: pipeline_<%= i %>
+  state: New
+  uuid: zzzzz-d1hrv-abcgneyn6brx<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-nnjobspipelines
+  created_at: <%= i.minute.ago.to_s(:db) %>
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+<% end %>
+
+# pipelines in project_with_25_pipelines
+<% for i in 1..25 do %>
+pipeline_<%=i%>_of_25:
+  name: pipeline_<%=i%>
+  state: Failed
+  uuid: zzzzz-d1hrv-25pipelines0<%= i.to_s.rjust(3, '0') %>
+  owner_uuid: zzzzz-j7d0g-000025pipelines
+  created_at: <%= i.hour.ago.to_s(:db) %>
+  started_at: <%= i.hour.ago.to_s(:db) %>
+  finished_at: <%= i.minute.ago.to_s(:db) %>
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo instance input
+<% end %>
+
+# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper
diff --git a/services/api/test/fixtures/pipeline_templates.yml b/services/api/test/fixtures/pipeline_templates.yml
new file mode 100644 (file)
index 0000000..260eab8
--- /dev/null
@@ -0,0 +1,166 @@
+two_part:
+  uuid: zzzzz-p5p6p-aox0k0ofxrystgw
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Two Part Pipeline Template
+  components:
+    part-one:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "Foo/bar pair"
+    part-two:
+      script: bar
+      script_version: master
+      script_parameters:
+        input:
+          output_of: part-one
+        integer_with_default:
+          default: 123
+        integer_with_value:
+          value: 123
+        string_with_default:
+          default: baz
+        string_with_value:
+          value: baz
+        plain_string: qux
+        array_with_default: # important to test repeating values in the array!
+          default: [1,1,2,3,5]
+        array_with_value: # important to test repeating values in the array!
+          value: [1,1,2,3,5]
+
+components_is_jobspec:
+  # Helps test that clients cope with funny-shaped components.
+  # For an example, see #3321.
+  uuid: zzzzz-p5p6p-jobspeccomponts
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Pipeline Template with Jobspec Components
+  components:
+    script: foo
+    script_version: master
+    script_parameters:
+      input:
+        required: true
+        dataclass: Collection
+        title: "Foo/bar pair"
+        description: "Provide a collection containing at least two files."
+
+parameter_with_search:
+  uuid: zzzzz-p5p6p-paramwsearch345
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Pipeline Template with Input Parameter with Search
+  components:
+    with-search:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "Foo/bar pair"
+          description: "Provide a collection containing at least two files."
+          search_for: sometime  # Matches baz_collection_in_asubproject
+
+new_pipeline_template:
+  # This template must include components that are not
+  # present in the pipeline instance 'pipeline_with_newer_template',
+  # at least one of which has a script_parameter that is a hash
+  # with a 'dataclass' field (ticket #4000)
+  uuid: zzzzz-p5p6p-vq4wuvy84xvaq2r
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-09-14 12:00:00
+  modified_at: 2014-09-16 12:00:00
+  name: Pipeline Template Newer Than Instance
+  components:
+    foo:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: foo template input
+    bar:
+      script: bar
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: bar template input
+
+pipeline_template_in_fuse_project:
+  uuid: zzzzz-p5p6p-templinfuseproj
+  owner_uuid: zzzzz-j7d0g-0000ownedbyfuse
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest
+  name: pipeline template in FUSE project
+  components:
+    foo_component:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: Collection
+          title: "default input"
+          description: "input collection"
+
+template_with_dataclass_file:
+  uuid: zzzzz-p5p6p-k0xoa0ofxrystgw
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-14 12:35:04 -0400
+  updated_at: 2014-04-14 12:35:04 -0400
+  modified_at: 2014-04-14 12:35:04 -0400
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  name: Two Part Template with dataclass File
+  components:
+    part-one:
+      script: foo
+      script_version: master
+      script_parameters:
+        input:
+          required: true
+          dataclass: File
+          title: "Foo/bar pair"
+          description: "Provide an input file"
+    part-two:
+      script: bar
+      script_version: master
+      script_parameters:
+        input:
+          output_of: part-one
+        integer_with_default:
+          default: 123
+        integer_with_value:
+          value: 123
+        string_with_default:
+          default: baz
+        string_with_value:
+          value: baz
+        plain_string: qux
+        array_with_default: # important to test repeating values in the array!
+          default: [1,1,2,3,5]
+        array_with_value: # important to test repeating values in the array!
+          value: [1,1,2,3,5]
diff --git a/services/api/test/fixtures/repositories.yml b/services/api/test/fixtures/repositories.yml
new file mode 100644 (file)
index 0000000..5775f8a
--- /dev/null
@@ -0,0 +1,29 @@
+crunch_dispatch_test:
+  uuid: zzzzz-s0uqq-382brsig8rp3665
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: crunch_dispatch_test
+
+arvados:
+  uuid: zzzzz-s0uqq-arvadosrepo0123
+  owner_uuid: zzzzz-tpzed-000000000000000 # root
+  name: arvados
+
+foo:
+  uuid: zzzzz-s0uqq-382brsig8rp3666
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: foo
+
+repository2:
+  uuid: zzzzz-s0uqq-382brsig8rp3667
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: foo2
+
+repository3:
+  uuid: zzzzz-s0uqq-38orljkqpyo1j61
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user
+  name: foo3
+
+auto_setup_repository:
+  uuid: zzzzz-s0uqq-382brabc8rp3667
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user
+  name: auto_setup_repo
diff --git a/services/api/test/fixtures/specimens.yml b/services/api/test/fixtures/specimens.yml
new file mode 100644 (file)
index 0000000..074b88b
--- /dev/null
@@ -0,0 +1,29 @@
+owned_by_active_user:
+  uuid: zzzzz-j58dm-3zx463qyo0k4xrn
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_private_group:
+  uuid: zzzzz-j58dm-5m3qwg45g3nlpu6
+  owner_uuid: zzzzz-j7d0g-rew6elm53kancon
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+owned_by_spectator:
+  uuid: zzzzz-j58dm-3b0xxwzlbzxq5yr
+  owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+in_aproject:
+  uuid: zzzzz-j58dm-7r18rnd5nzhg5yk
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
+
+in_asubproject:
+  uuid: zzzzz-j58dm-c40lddwcqqr1ffs
+  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x
+  created_at: 2014-04-21 15:37:48 -0400
+  modified_at: 2014-04-21 15:37:48 -0400
diff --git a/services/api/test/fixtures/traits.yml b/services/api/test/fixtures/traits.yml
new file mode 100644 (file)
index 0000000..7628cd6
--- /dev/null
@@ -0,0 +1,5 @@
+owned_by_aproject_with_no_name:
+  uuid: zzzzz-q1cn2-ypsjlol9dofwijz
+  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso
+  created_at: 2014-05-05 04:11:52 -0400
+  modified_at: 2014-05-05 04:11:52 -0400
diff --git a/services/api/test/fixtures/users.yml b/services/api/test/fixtures/users.yml
new file mode 100644 (file)
index 0000000..c04aa47
--- /dev/null
@@ -0,0 +1,263 @@
+# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html
+
+system_user:
+  uuid: zzzzz-tpzed-000000000000000
+  owner_uuid: zzzzz-tpzed-000000000000000
+  created_at: 2014-11-27 06:38:21.215463000 Z
+  modified_by_client_uuid: zzzzz-ozdt8-teyxzyd8qllg11h
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  modified_at: 2014-11-27 06:38:21.208036000 Z
+  email: root
+  first_name: root
+  last_name: ''
+  identity_url:
+  is_admin: true
+  prefs: {}
+  updated_at: 2014-11-27 06:38:21.207873000 Z
+  is_active: true
+
+admin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-d9tiejq69daie8f
+  email: admin@arvados.local
+  first_name: TestCase
+  last_name: Administrator
+  identity_url: https://admin.openid.local
+  is_active: true
+  is_admin: true
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+
+miniadmin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-2bg9x0oeydcw5hm
+  email: miniadmin@arvados.local
+  first_name: TestCase
+  last_name: User Group Administrator
+  identity_url: https://miniadmin.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+
+rominiadmin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-4hvxm4n25emegis
+  email: rominiadmin@arvados.local
+  first_name: TestCase
+  last_name: Read-Only User Group Administrator
+  identity_url: https://rominiadmin.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+
+active:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  email: active-user@arvados.local
+  first_name: Active
+  last_name: User
+  identity_url: https://active-user.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+project_viewer:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-projectviewer1a
+  email: project-viewer@arvados.local
+  first_name: Project
+  last_name: Viewer
+  identity_url: https://project-viewer.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+future_project_user:
+  # Workbench tests give this user permission on aproject.
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-futureprojview2
+  email: future-project-user@arvados.local
+  first_name: Future Project
+  last_name: User
+  identity_url: https://future-project-user.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+subproject_admin:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-subprojectadmin
+  email: subproject-admin@arvados.local
+  first_name: Subproject
+  last_name: Admin
+  identity_url: https://subproject-admin.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+spectator:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-l1s2piq4t4mps8r
+  email: spectator@arvados.local
+  first_name: Spect
+  last_name: Ator
+  identity_url: https://spectator.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+inactive_uninvited:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-rf2ec3ryh4vb5ma
+  email: inactive-uninvited-user@arvados.local
+  first_name: Inactive and Uninvited
+  last_name: User
+  identity_url: https://inactive-uninvited-user.openid.local
+  is_active: false
+  is_admin: false
+  prefs: {}
+
+inactive:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-x9kqpd79egh49c7
+  email: inactive-user@arvados.local
+  first_name: Inactive
+  last_name: User
+  identity_url: https://inactive-user.openid.local
+  is_active: false
+  is_admin: false
+  prefs: {}
+
+inactive_but_signed_user_agreement:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-7sg468ezxwnodxs
+  email: inactive-user-signed-ua@arvados.local
+  first_name: Inactive But Agreeable
+  last_name: User
+  identity_url: https://inactive-but-agreeable-user.openid.local
+  is_active: false
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+anonymous:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-anonymouspublic
+  email: anonymouspublic
+  first_name: anonymouspublic
+  last_name: anonymouspublic
+  is_active: false
+  is_admin: false
+  prefs: {}
+
+job_reader:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-905b42d1dd4a354
+  email: jobber@arvados.local
+  first_name: Job
+  last_name: Er
+  identity_url: https://spectator.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: Computational biologist
+
+active_no_prefs:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-a46c42d1td4aoj4
+  email: active_no_prefs@arvados.local
+  first_name: NoPrefs
+  last_name: NoProfile
+  identity_url: https://active_no_prefs.openid.local
+  is_active: true
+  is_admin: false
+  prefs: {}
+
+active_no_prefs_profile:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-a46c98d1td4aoj4
+  email: active_no_prefs_profile@arvados.local
+  first_name: HasPrefs
+  last_name: NoProfile
+  identity_url: https://active_no_prefs_profile.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    test: abc
+
+# Fixtures to test granting and removing permissions.
+
+user_foo_in_sharing_group:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-81hsbo6mk8nl05c
+  email: user_foo_in_sharing_group@arvados.local
+  first_name: Foo
+  last_name: Sharing
+  identity_url: https://user_foo_in_sharing_group.openid.local
+  is_active: true
+  is_admin: false
+
+user_bar_in_sharing_group:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-n3oaj4sm5fcnwib
+  email: user_bar_in_sharing_group@arvados.local
+  first_name: Bar
+  last_name: Sharing
+  identity_url: https://user_bar_in_sharing_group.openid.local
+  is_active: true
+  is_admin: false
+
+user1_with_load:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-user1withloadab
+  email: user1_with_load@arvados.local
+  first_name: user1_with_load
+  last_name: User
+  identity_url: https://user1_with_load.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
+
+fuse:
+  owner_uuid: zzzzz-tpzed-000000000000000
+  uuid: zzzzz-tpzed-0fusedrivertest
+  email: fuse@arvados.local
+  first_name: FUSE
+  last_name: User
+  identity_url: https://fuse.openid.local
+  is_active: true
+  is_admin: false
+  prefs:
+    profile:
+      organization: example.com
+      role: IT
diff --git a/services/api/test/fixtures/virtual_machines.yml b/services/api/test/fixtures/virtual_machines.yml
new file mode 100644 (file)
index 0000000..f5e0163
--- /dev/null
@@ -0,0 +1,9 @@
+testvm:
+  uuid: zzzzz-2x53u-382brsig8rp3064
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  hostname: testvm.shell
+
+testvm2:
+  uuid: zzzzz-2x53u-382brsig8rp3065
+  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  hostname: testvm2.shell
diff --git a/services/api/test/functional/.gitkeep b/services/api/test/functional/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/functional/application_controller_test.rb b/services/api/test/functional/application_controller_test.rb
new file mode 100644 (file)
index 0000000..3a4a244
--- /dev/null
@@ -0,0 +1,93 @@
+require 'test_helper'
+
+class ApplicationControllerTest < ActionController::TestCase
+  BAD_UUID = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+  def now_timestamp
+    Time.now.utc.to_i
+  end
+
+  setup do
+    # These tests are meant to check behavior in ApplicationController.
+    # We instantiate a small concrete controller for convenience.
+    @controller = Arvados::V1::SpecimensController.new
+    @start_stamp = now_timestamp
+  end
+
+  def check_error_token
+    token = json_response['error_token']
+    assert_not_nil token
+    token_time = token.split('+', 2).first.to_i
+    assert_operator(token_time, :>=, @start_stamp, "error token too old")
+    assert_operator(token_time, :<=, now_timestamp, "error token too new")
+  end
+
+  def check_404(errmsg="Path not found")
+    assert_response 404
+    assert_equal([errmsg], json_response['errors'])
+    check_error_token
+  end
+
+  test "requesting nonexistent object returns 404 error" do
+    authorize_with :admin
+    get(:show, id: BAD_UUID)
+    check_404
+  end
+
+  test "requesting object without read permission returns 404 error" do
+    authorize_with :spectator
+    get(:show, id: specimens(:owned_by_active_user).uuid)
+    check_404
+  end
+
+  test "submitting bad object returns error" do
+    authorize_with :spectator
+    post(:create, specimen: {badattr: "badvalue"})
+    assert_response 422
+    check_error_token
+  end
+
+  ['foo', '', 'FALSE', 'TRUE', nil, [true], {a:true}, '"true"'].each do |bogus|
+    test "bogus boolean parameter #{bogus.inspect} returns error" do
+      @controller = Arvados::V1::GroupsController.new
+      authorize_with :active
+      post :create, {
+        group: {},
+        ensure_unique_name: bogus
+      }
+      assert_response 422
+      assert_match(/parameter must be a boolean/, json_response['errors'].first,
+                   'Helpful error message not found')
+    end
+  end
+
+  [[true, [true, 'true', 1, '1']],
+   [false, [false, 'false', 0, '0']]].each do |bool, boolparams|
+    boolparams.each do |boolparam|
+      # Ensure boolparam is acceptable as a boolean
+      test "boolean parameter #{boolparam.inspect} acceptable" do
+        @controller = Arvados::V1::GroupsController.new
+        authorize_with :active
+        post :create, {
+          group: {},
+          ensure_unique_name: boolparam
+        }
+        assert_response :success
+      end
+
+      # Ensure boolparam is acceptable as the _intended_ boolean
+      test "boolean parameter #{boolparam.inspect} accepted as #{bool.inspect}" do
+        @controller = Arvados::V1::GroupsController.new
+        authorize_with :active
+        post :create, {
+          group: {
+            name: groups(:aproject).name,
+            owner_uuid: groups(:aproject).owner_uuid
+          },
+          ensure_unique_name: boolparam
+        }
+        assert_response (bool ? :success : 422)
+      end
+    end
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb b/services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb
new file mode 100644 (file)
index 0000000..8877719
--- /dev/null
@@ -0,0 +1,69 @@
+require 'test_helper'
+
+class Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::TestCase
+  test "should get index" do
+    authorize_with :active_trustedclient
+    get :index
+    assert_response :success
+  end
+
+  test "should not get index with expired auth" do
+    authorize_with :expired
+    get :index, format: :json
+    assert_response 401
+  end
+
+  test "should not get index from untrusted client" do
+    authorize_with :active
+    get :index
+    assert_response 403
+  end
+
+  test "create system auth" do
+    authorize_with :admin_trustedclient
+    post :create_system_auth, scopes: '["test"]'
+    assert_response :success
+  end
+
+  test "prohibit create system auth with token from non-trusted client" do
+    authorize_with :admin
+    post :create_system_auth, scopes: '["test"]'
+    assert_response 403
+  end
+
+  test "prohibit create system auth by non-admin" do
+    authorize_with :active
+    post :create_system_auth, scopes: '["test"]'
+    assert_response 403
+  end
+
+  def assert_found_tokens(auth, search_params, *expected_tokens)
+    authorize_with auth
+    expected_tokens.map! { |name| api_client_authorizations(name).api_token }
+    get :index, search_params
+    assert_response :success
+    got_tokens = JSON.parse(@response.body)['items']
+      .map { |auth| auth['api_token'] }
+    assert_equal(expected_tokens.sort, got_tokens.sort,
+                 "wrong results for #{search_params.inspect}")
+  end
+
+  # Three-tuples with auth to use, scopes to find, and expected tokens.
+  # Make two tests for each tuple, one searching with where and the other
+  # with filter.
+  [[:admin_trustedclient, [], :admin_noscope],
+   [:active_trustedclient, ["GET /arvados/v1/users"], :active_userlist],
+   [:active_trustedclient,
+    ["POST /arvados/v1/api_client_authorizations",
+     "GET /arvados/v1/api_client_authorizations"],
+    :active_apitokens],
+  ].each do |auth, scopes, *expected|
+    test "#{auth.to_s} can find auths where scopes=#{scopes.inspect}" do
+      assert_found_tokens(auth, {where: {scopes: scopes}}, *expected)
+    end
+
+    test "#{auth.to_s} can find auths filtered with scopes=#{scopes.inspect}" do
+      assert_found_tokens(auth, {filters: [['scopes', '=', scopes]]}, *expected)
+    end
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/authorized_keys_controller_test.rb b/services/api/test/functional/arvados/v1/authorized_keys_controller_test.rb
new file mode 100644 (file)
index 0000000..265eaef
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class Arvados::V1::AuthorizedKeysControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/collections_controller_test.rb b/services/api/test/functional/arvados/v1/collections_controller_test.rb
new file mode 100644 (file)
index 0000000..269474a
--- /dev/null
@@ -0,0 +1,664 @@
+require 'test_helper'
+
+class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
+
+  def permit_unsigned_manifests isok=true
+    # Set security model for the life of a test.
+    Rails.configuration.permit_create_collection_with_unsigned_manifest = isok
+  end
+
+  def assert_signed_manifest manifest_text, label=''
+    assert_not_nil manifest_text, "#{label} manifest_text was nil"
+    manifest_text.scan(/ [[:xdigit:]]{32}\S*/) do |tok|
+      assert_match(/\+A[[:xdigit:]]+@[[:xdigit:]]{8}\b/, tok,
+                   "Locator in #{label} manifest_text was not signed")
+    end
+  end
+
+  test "should get index" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert(assigns(:objects).andand.any?, "no Collections returned in index")
+    refute(json_response["items"].any? { |c| c.has_key?("manifest_text") },
+           "basic Collections index included manifest_text")
+  end
+
+  test "collections.get returns signed locators" do
+    permit_unsigned_manifests
+    authorize_with :active
+    get :show, {id: collections(:foo_file).uuid}
+    assert_response :success
+    assert_signed_manifest json_response['manifest_text'], 'foo_file'
+  end
+
+  test "index with manifest_text selected returns signed locators" do
+    columns = %w(uuid owner_uuid manifest_text)
+    authorize_with :active
+    get :index, select: columns
+    assert_response :success
+    assert(assigns(:objects).andand.any?,
+           "no Collections returned for index with columns selected")
+    json_response["items"].each do |coll|
+      assert_equal(columns, columns & coll.keys,
+                   "Collections index did not respect selected columns")
+      assert_signed_manifest coll['manifest_text'], coll['uuid']
+    end
+  end
+
+  [0,1,2].each do |limit|
+    test "get index with limit=#{limit}" do
+      authorize_with :active
+      get :index, limit: limit
+      assert_response :success
+      assert_equal limit, assigns(:objects).count
+      resp = JSON.parse(@response.body)
+      assert_equal limit, resp['limit']
+    end
+  end
+
+  test "items.count == items_available" do
+    authorize_with :active
+    get :index, limit: 100000
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_equal resp['items_available'], assigns(:objects).length
+    assert_equal resp['items_available'], resp['items'].count
+    unique_uuids = resp['items'].collect { |i| i['uuid'] }.compact.uniq
+    assert_equal unique_uuids.count, resp['items'].count
+  end
+
+  test "items.count == items_available with filters" do
+    authorize_with :active
+    get :index, {
+      limit: 100,
+      filters: [['uuid','=',collections(:foo_file).uuid]]
+    }
+    assert_response :success
+    assert_equal 1, assigns(:objects).length
+    assert_equal 1, json_response['items_available']
+    assert_equal 1, json_response['items'].count
+  end
+
+  test "get index with limit=2 offset=99999" do
+    # Assume there are not that many test fixtures.
+    authorize_with :active
+    get :index, limit: 2, offset: 99999
+    assert_response :success
+    assert_equal 0, assigns(:objects).count
+    resp = JSON.parse(@response.body)
+    assert_equal 2, resp['limit']
+    assert_equal 99999, resp['offset']
+  end
+
+  test "admin can create collection with unsigned manifest" do
+    authorize_with :admin
+    test_collection = {
+      manifest_text: <<-EOS
+. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt
+. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt
+. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt
+./baz acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt
+EOS
+    }
+    test_collection[:portable_data_hash] =
+      Digest::MD5.hexdigest(test_collection[:manifest_text]) +
+      '+' +
+      test_collection[:manifest_text].length.to_s
+
+    # post :create will modify test_collection in place, so we save a copy first.
+    # Hash.deep_dup is not sufficient as it preserves references of strings (??!?)
+    post_collection = Marshal.load(Marshal.dump(test_collection))
+    post :create, {
+      collection: post_collection
+    }
+
+    assert_response :success
+    assert_nil assigns(:objects)
+
+    response_collection = assigns(:object)
+
+    stored_collection = Collection.select([:uuid, :portable_data_hash, :manifest_text]).
+      where(portable_data_hash: response_collection['portable_data_hash']).first
+
+    assert_equal test_collection[:portable_data_hash], stored_collection['portable_data_hash']
+
+    # The manifest in the response will have had permission hints added.
+    # Remove any permission hints in the response before comparing it to the source.
+    stripped_manifest = stored_collection['manifest_text'].gsub(/\+A[A-Za-z0-9@_-]+/, '')
+    assert_equal test_collection[:manifest_text], stripped_manifest
+
+    # TBD: create action should add permission signatures to manifest_text in the response,
+    # and we need to check those permission signatures here.
+  end
+
+  [:admin, :active].each do |user|
+    test "#{user} can get collection using portable data hash" do
+      authorize_with user
+
+      foo_collection = collections(:foo_file)
+
+      # Get foo_file using its portable data hash
+      get :show, {
+        id: foo_collection[:portable_data_hash]
+      }
+      assert_response :success
+      assert_not_nil assigns(:object)
+      resp = assigns(:object)
+      assert_equal foo_collection[:portable_data_hash], resp['portable_data_hash']
+      assert_signed_manifest resp['manifest_text']
+
+      # The manifest in the response will have had permission hints added.
+      # Remove any permission hints in the response before comparing it to the source.
+      stripped_manifest = resp['manifest_text'].gsub(/\+A[A-Za-z0-9@_-]+/, '')
+      assert_equal foo_collection[:manifest_text], stripped_manifest
+    end
+  end
+
+  test "create with owner_uuid set to owned group" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: 'zzzzz-j7d0g-rew6elm53kancon',
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_equal 'zzzzz-j7d0g-rew6elm53kancon', resp['owner_uuid']
+  end
+
+  test "create fails with duplicate name" do
+    permit_unsigned_manifests
+    authorize_with :admin
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: 'zzzzz-tpzed-000000000000000',
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47",
+        name: "foo_file"
+      }
+    }
+    assert_response 422
+    response_errors = json_response['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert(response_errors.first.include?('duplicate key'),
+           "Expected 'duplicate key' error in #{response_errors.first}")
+  end
+
+  [false, true].each do |unsigned|
+    test "create with duplicate name, ensure_unique_name, unsigned=#{unsigned}" do
+      permit_unsigned_manifests unsigned
+      authorize_with :active
+      manifest_text = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:foo.txt\n"
+      if !unsigned
+        manifest_text = Collection.sign_manifest manifest_text, api_token(:active)
+      end
+      post :create, {
+        collection: {
+          owner_uuid: users(:active).uuid,
+          manifest_text: manifest_text,
+          name: "owned_by_active"
+        },
+        ensure_unique_name: true
+      }
+      assert_response :success
+      assert_equal 'owned_by_active (2)', json_response['name']
+    end
+  end
+
+  test "create with owner_uuid set to group i can_manage" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: groups(:active_user_has_can_manage).uuid,
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_equal groups(:active_user_has_can_manage).uuid, resp['owner_uuid']
+  end
+
+  test "create with owner_uuid fails on group with only can_read permission" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: groups(:all_users).uuid,
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response 403
+  end
+
+  test "create with owner_uuid fails on group with no permission" do
+    permit_unsigned_manifests
+    authorize_with :active
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: groups(:public).uuid,
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response 422
+  end
+
+  test "admin create with owner_uuid set to group with no permission" do
+    permit_unsigned_manifests
+    authorize_with :admin
+    manifest_text = ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        owner_uuid: 'zzzzz-j7d0g-it30l961gq3t0oi',
+        manifest_text: manifest_text,
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
+      }
+    }
+    assert_response :success
+  end
+
+  test "should create with collection passed as json" do
+    permit_unsigned_manifests
+    authorize_with :active
+    post :create, {
+      collection: <<-EOS
+      {
+        "manifest_text":". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n",\
+        "portable_data_hash":"d30fe8ae534397864cb96c544f4cf102+47"\
+      }
+      EOS
+    }
+    assert_response :success
+  end
+
+  test "should fail to create with checksum mismatch" do
+    permit_unsigned_manifests
+    authorize_with :active
+    post :create, {
+      collection: <<-EOS
+      {
+        "manifest_text":". d41d8cd98f00b204e9800998ecf8427e 0:0:bar.txt\n",\
+        "portable_data_hash":"d30fe8ae534397864cb96c544f4cf102+47"\
+      }
+      EOS
+    }
+    assert_response 422
+  end
+
+  test "collection UUID is normalized when created" do
+    permit_unsigned_manifests
+    authorize_with :active
+    post :create, {
+      collection: {
+        manifest_text: ". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\n",
+        portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47+Khint+Xhint+Zhint"
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal "d30fe8ae534397864cb96c544f4cf102+47", resp['portable_data_hash']
+  end
+
+  test "get full provenance for baz file" do
+    authorize_with :active
+    get :provenance, id: 'ea10d51bcf88862dbcc36eb292017dfd+45'
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
+    assert_not_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # bar
+    assert_not_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # foo
+    assert_not_nil resp['zzzzz-8i9sb-cjs4pklxxjykyuq'] # bar->baz
+    assert_not_nil resp['zzzzz-8i9sb-aceg2bnq7jt7kon'] # foo->bar
+  end
+
+  test "get no provenance for foo file" do
+    # spectator user cannot even see baz collection
+    authorize_with :spectator
+    get :provenance, id: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'
+    assert_response 404
+  end
+
+  test "get partial provenance for baz file" do
+    # spectator user can see bar->baz job, but not foo->bar job
+    authorize_with :spectator
+    get :provenance, id: 'ea10d51bcf88862dbcc36eb292017dfd+45'
+    assert_response :success
+    resp = JSON.parse(@response.body)
+    assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz
+    assert_not_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # bar
+    assert_not_nil resp['zzzzz-8i9sb-cjs4pklxxjykyuq']     # bar->baz
+    assert_nil resp['zzzzz-8i9sb-aceg2bnq7jt7kon']         # foo->bar
+    assert_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # foo
+  end
+
+  test "search collections with 'any' operator" do
+    authorize_with :active
+    get :index, {
+      where: { any: ['contains', 'd0bc8c7f34be170a7b7b'] }
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:portable_data_hash)
+    assert_equal 1, found.count
+    assert_equal true, !!found.index('5bd9c1ad0bc8c7f34be170a7b7b39089+45')
+  end
+
+  [false, true].each do |permit_unsigned|
+    test "create collection with signed manifest, permit_unsigned=#{permit_unsigned}" do
+      permit_unsigned_manifests permit_unsigned
+      authorize_with :active
+      locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+      unsigned_manifest = locators.map { |loc|
+        ". " + loc + " 0:0:foo.txt\n"
+      }.join()
+      manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+        '+' +
+        unsigned_manifest.length.to_s
+
+      # Build a manifest with both signed and unsigned locators.
+      signing_opts = {
+        key: Rails.configuration.blob_signing_key,
+        api_token: api_token(:active),
+      }
+      signed_locators = locators.collect do |x|
+        Blob.sign_locator x, signing_opts
+      end
+      if permit_unsigned
+        # Leave a non-empty blob unsigned.
+        signed_locators[1] = locators[1]
+      else
+        # Leave the empty blob unsigned. This should still be allowed.
+        signed_locators[0] = locators[0]
+      end
+      signed_manifest =
+        ". " + signed_locators[0] + " 0:0:foo.txt\n" +
+        ". " + signed_locators[1] + " 0:0:foo.txt\n" +
+        ". " + signed_locators[2] + " 0:0:foo.txt\n"
+
+      post :create, {
+        collection: {
+          manifest_text: signed_manifest,
+          portable_data_hash: manifest_uuid,
+        }
+      }
+      assert_response :success
+      assert_not_nil assigns(:object)
+      resp = JSON.parse(@response.body)
+      assert_equal manifest_uuid, resp['portable_data_hash']
+      # All of the locators in the output must be signed.
+      resp['manifest_text'].lines.each do |entry|
+        m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+        if m
+          assert Blob.verify_signature m[0], signing_opts
+        end
+      end
+    end
+  end
+
+  test "create collection with signed manifest and explicit TTL" do
+    authorize_with :active
+    locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+    unsigned_manifest = locators.map { |loc|
+      ". " + loc + " 0:0:foo.txt\n"
+    }.join()
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+      '+' +
+      unsigned_manifest.length.to_s
+
+    # build a manifest with both signed and unsigned locators.
+    # TODO(twp): in phase 4, all locators will need to be signed, so
+    # this test should break and will need to be rewritten. Issue #2755.
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+      ttl: 3600   # 1 hour
+    }
+    signed_manifest =
+      ". " + locators[0] + " 0:0:foo.txt\n" +
+      ". " + Blob.sign_locator(locators[1], signing_opts) + " 0:0:foo.txt\n" +
+      ". " + Blob.sign_locator(locators[2], signing_opts) + " 0:0:foo.txt\n"
+
+    post :create, {
+      collection: {
+        manifest_text: signed_manifest,
+        portable_data_hash: manifest_uuid,
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal manifest_uuid, resp['portable_data_hash']
+    # All of the locators in the output must be signed.
+    resp['manifest_text'].lines.each do |entry|
+      m = /([[:xdigit:]]{32}\+\S+)/.match(entry)
+      if m
+        assert Blob.verify_signature m[0], signing_opts
+      end
+    end
+  end
+
+  test "create fails with invalid signature" do
+    authorize_with :active
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+
+    # Generate a locator with a bad signature.
+    unsigned_locator = "d41d8cd98f00b204e9800998ecf8427e+0"
+    bad_locator = unsigned_locator + "+Affffffff@ffffffff"
+    assert !Blob.verify_signature(bad_locator, signing_opts)
+
+    # Creating a collection with this locator should
+    # produce 403 Permission denied.
+    unsigned_manifest = ". #{unsigned_locator} 0:0:foo.txt\n"
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+      '+' +
+      unsigned_manifest.length.to_s
+
+    bad_manifest = ". #{bad_locator} 0:0:foo.txt\n"
+    post :create, {
+      collection: {
+        manifest_text: bad_manifest,
+        portable_data_hash: manifest_uuid
+      }
+    }
+
+    assert_response 403
+  end
+
+  test "create fails with uuid of signed manifest" do
+    authorize_with :active
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+
+    unsigned_locator = "d41d8cd98f00b204e9800998ecf8427e+0"
+    signed_locator = Blob.sign_locator(unsigned_locator, signing_opts)
+    signed_manifest = ". #{signed_locator} 0:0:foo.txt\n"
+    manifest_uuid = Digest::MD5.hexdigest(signed_manifest) +
+      '+' +
+      signed_manifest.length.to_s
+
+    post :create, {
+      collection: {
+        manifest_text: signed_manifest,
+        portable_data_hash: manifest_uuid
+      }
+    }
+
+    assert_response 422
+  end
+
+  test "multiple locators per line" do
+    permit_unsigned_manifests
+    authorize_with :active
+    locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+    manifest_text = [".", *locators, "0:0:foo.txt\n"].join(" ")
+    manifest_uuid = Digest::MD5.hexdigest(manifest_text) +
+      '+' +
+      manifest_text.length.to_s
+
+    test_collection = {
+      manifest_text: manifest_text,
+      portable_data_hash: manifest_uuid,
+    }
+    post_collection = Marshal.load(Marshal.dump(test_collection))
+    post :create, {
+      collection: post_collection
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal manifest_uuid, resp['portable_data_hash']
+
+    # The manifest in the response will have had permission hints added.
+    # Remove any permission hints in the response before comparing it to the source.
+    stripped_manifest = resp['manifest_text'].gsub(/\+A[A-Za-z0-9@_-]+/, '')
+    assert_equal manifest_text, stripped_manifest
+  end
+
+  test "multiple signed locators per line" do
+    permit_unsigned_manifests
+    authorize_with :active
+    locators = %w(
+      d41d8cd98f00b204e9800998ecf8427e+0
+      acbd18db4cc2f85cedef654fccc4a4d8+3
+      ea10d51bcf88862dbcc36eb292017dfd+45)
+
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+
+    unsigned_manifest = [".", *locators, "0:0:foo.txt\n"].join(" ")
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +
+      '+' +
+      unsigned_manifest.length.to_s
+
+    signed_locators = locators.map { |loc| Blob.sign_locator loc, signing_opts }
+    signed_manifest = [".", *signed_locators, "0:0:foo.txt\n"].join(" ")
+
+    post :create, {
+      collection: {
+        manifest_text: signed_manifest,
+        portable_data_hash: manifest_uuid,
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    resp = JSON.parse(@response.body)
+    assert_equal manifest_uuid, resp['portable_data_hash']
+    # All of the locators in the output must be signed.
+    # Each line is of the form "path locator locator ... 0:0:file.txt"
+    # entry.split[1..-2] will yield just the tokens in the middle of the line
+    returned_locator_count = 0
+    resp['manifest_text'].lines.each do |entry|
+      entry.split[1..-2].each do |tok|
+        returned_locator_count += 1
+        assert Blob.verify_signature tok, signing_opts
+      end
+    end
+    assert_equal locators.count, returned_locator_count
+  end
+
+  test 'Reject manifest with unsigned blob' do
+    permit_unsigned_manifests false
+    authorize_with :active
+    unsigned_manifest = ". 0cc175b9c0f1b6a831c399e269772661+1 0:1:a.txt\n"
+    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest)
+    post :create, {
+      collection: {
+        manifest_text: unsigned_manifest,
+        portable_data_hash: manifest_uuid,
+      }
+    }
+    assert_response 403,
+    "Creating a collection with unsigned blobs should respond 403"
+    assert_empty Collection.where('uuid like ?', manifest_uuid+'%'),
+    "Collection should not exist in database after failed create"
+  end
+
+  test 'List expired collection returns empty list' do
+    authorize_with :active
+    get :index, {
+      where: {name: 'expired_collection'},
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_equal 0, found.count
+  end
+
+  test 'Show expired collection returns 404' do
+    authorize_with :active
+    get :show, {
+      id: 'zzzzz-4zz18-mto52zx1s7sn3ih',
+    }
+    assert_response 404
+  end
+
+  test 'Update expired collection returns 404' do
+    authorize_with :active
+    post :update, {
+      id: 'zzzzz-4zz18-mto52zx1s7sn3ih',
+      collection: {
+        name: "still expired"
+      }
+    }
+    assert_response 404
+  end
+
+  test 'List collection with future expiration time succeeds' do
+    authorize_with :active
+    get :index, {
+      where: {name: 'collection_expires_in_future'},
+    }
+    found = assigns(:objects)
+    assert_equal 1, found.count
+  end
+
+
+  test 'Show collection with future expiration time succeeds' do
+    authorize_with :active
+    get :show, {
+      id: 'zzzzz-4zz18-padkqo7yb8d9i3j',
+    }
+    assert_response :success
+  end
+
+  test 'Update collection with future expiration time succeeds' do
+    authorize_with :active
+    post :update, {
+      id: 'zzzzz-4zz18-padkqo7yb8d9i3j',
+      collection: {
+        name: "still not expired"
+      }
+    }
+    assert_response :success
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/commits_controller_test.rb b/services/api/test/functional/arvados/v1/commits_controller_test.rb
new file mode 100644 (file)
index 0000000..ceaebff
--- /dev/null
@@ -0,0 +1,102 @@
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+# NOTE: calling Commit.find_commit_range(user, nil, nil, 'rev') will produce
+# an error message "fatal: bad object 'rev'" on stderr if 'rev' does not exist
+# in a given repository.  Many of these tests report such errors; their presence
+# does not represent a fatal condition.
+#
+# TODO(twp): consider better error handling of these messages, or
+# decide to abandon it.
+
+class Arvados::V1::CommitsControllerTest < ActionController::TestCase
+  fixtures :repositories, :users
+
+  # See git_setup.rb for the commit log for test.git.tar
+  include GitTestHelper
+
+  test "test_find_commit_range" do
+    authorize_with :active
+
+  # single
+    a = Commit.find_commit_range(users(:active), nil, nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
+    assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+  #test "test_branch1" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+    a = Commit.find_commit_range(users(:active), nil, nil, 'master', nil)
+    assert_includes(a, 'f35f99b7d32bac257f5989df02b9f12ee1a9b0d6')
+    assert_includes(a, '077ba2ad3ea24a929091a9e6ce545c93199b8e57')
+
+  #test "test_branch2" do
+    a = Commit.find_commit_range(users(:active), 'foo', nil, 'b1', nil)
+    assert_equal ['1de84a854e2b440dc53bf42f8548afa4c17da332'], a
+
+  #test "test_branch3" do
+    a = Commit.find_commit_range(users(:active), 'foo', nil, 'HEAD', nil)
+    assert_equal ['1de84a854e2b440dc53bf42f8548afa4c17da332'], a
+
+  #test "test_single_revision_repo" do
+    a = Commit.find_commit_range(users(:active), "foo", nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
+    assert_equal ['31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+    a = Commit.find_commit_range(users(:active), "bar", nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', nil)
+    assert_equal nil, a
+
+  #test "test_multi_revision" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+    a = Commit.find_commit_range(users(:active), nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', nil)
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+  #test "test_tag" do
+    # complains "fatal: ambiguous argument 'tag1': unknown revision or path
+    # not in the working tree."
+    a = Commit.find_commit_range(users(:active), nil, 'tag1', 'master', nil)
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '4fe459abe02d9b365932b8f5dc419439ab4e2577'], a
+
+  #test "test_multi_revision_exclude" do
+    a = Commit.find_commit_range(users(:active), nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['4fe459abe02d9b365932b8f5dc419439ab4e2577'])
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+  #test "test_multi_revision_tagged_exclude" do
+    # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+    a = Commit.find_commit_range(users(:active), nil, '31ce37fe365b3dc204300a3e4c396ad333ed0556', '077ba2ad3ea24a929091a9e6ce545c93199b8e57', ['tag1'])
+    assert_equal ['077ba2ad3ea24a929091a9e6ce545c93199b8e57', '31ce37fe365b3dc204300a3e4c396ad333ed0556'], a
+
+    Dir.mktmpdir do |touchdir|
+      # invalid input to maximum
+      a = Commit.find_commit_range(users(:active), nil, nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", nil)
+      assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
+      assert_equal nil, a
+
+      # invalid input to maximum
+      a = Commit.find_commit_range(users(:active), nil, nil, "$(uname>#{touchdir}/uh_oh)", nil)
+      assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'maximum' parameter of find_commit_range is exploitable"
+      assert_equal nil, a
+
+      # invalid input to minimum
+      a = Commit.find_commit_range(users(:active), nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556 ; touch #{touchdir}/uh_oh", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
+      assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
+      assert_equal nil, a
+
+      # invalid input to minimum
+      a = Commit.find_commit_range(users(:active), nil, "$(uname>#{touchdir}/uh_oh)", "31ce37fe365b3dc204300a3e4c396ad333ed0556", nil)
+      assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'minimum' parameter of find_commit_range is exploitable"
+      assert_equal nil, a
+
+      # invalid input to 'excludes'
+      # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+      a = Commit.find_commit_range(users(:active), nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["4fe459abe02d9b365932b8f5dc419439ab4e2577 ; touch #{touchdir}/uh_oh"])
+      assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
+      assert_equal nil, a
+
+      # invalid input to 'excludes'
+      # complains "fatal: bad object 077ba2ad3ea24a929091a9e6ce545c93199b8e57"
+      a = Commit.find_commit_range(users(:active), nil, "31ce37fe365b3dc204300a3e4c396ad333ed0556", "077ba2ad3ea24a929091a9e6ce545c93199b8e57", ["$(uname>#{touchdir}/uh_oh)"])
+      assert !File.exists?("#{touchdir}/uh_oh"), "#{touchdir}/uh_oh should not exist, 'excludes' parameter of find_commit_range is exploitable"
+      assert_equal nil, a
+
+    end
+
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/filters_test.rb b/services/api/test/functional/arvados/v1/filters_test.rb
new file mode 100644 (file)
index 0000000..2e8e231
--- /dev/null
@@ -0,0 +1,16 @@
+require 'test_helper'
+
+class Arvados::V1::FiltersTest < ActionController::TestCase
+  test '"not in" filter passes null values' do
+    @controller = Arvados::V1::GroupsController.new
+    authorize_with :admin
+    get :index, {
+      filters: [ ['group_class', 'not in', ['project']] ],
+      controller: 'groups',
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_includes(found.collect(&:group_class), nil,
+                    "'group_class not in ['project']' filter should pass null")
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/groups_controller_test.rb b/services/api/test/functional/arvados/v1/groups_controller_test.rb
new file mode 100644 (file)
index 0000000..c974076
--- /dev/null
@@ -0,0 +1,371 @@
+require 'test_helper'
+
+class Arvados::V1::GroupsControllerTest < ActionController::TestCase
+
+  test "attempt to delete group without read or write access" do
+    authorize_with :active
+    post :destroy, id: groups(:empty_lonely_group).uuid
+    assert_response 404
+  end
+
+  test "attempt to delete group without write access" do
+    authorize_with :active
+    post :destroy, id: groups(:all_users).uuid
+    assert_response 403
+  end
+
+  test "get list of projects" do
+    authorize_with :active
+    get :index, filters: [['group_class', '=', 'project']], format: :json
+    assert_response :success
+    group_uuids = []
+    json_response['items'].each do |group|
+      assert_equal 'project', group['group_class']
+      group_uuids << group['uuid']
+    end
+    assert_includes group_uuids, groups(:aproject).uuid
+    assert_includes group_uuids, groups(:asubproject).uuid
+    assert_not_includes group_uuids, groups(:system_group).uuid
+    assert_not_includes group_uuids, groups(:private).uuid
+  end
+
+  test "get list of groups that are not projects" do
+    authorize_with :active
+    get :index, filters: [['group_class', '!=', 'project']], format: :json
+    assert_response :success
+    group_uuids = []
+    json_response['items'].each do |group|
+      assert_not_equal 'project', group['group_class']
+      group_uuids << group['uuid']
+    end
+    assert_not_includes group_uuids, groups(:aproject).uuid
+    assert_not_includes group_uuids, groups(:asubproject).uuid
+    assert_includes group_uuids, groups(:private).uuid
+  end
+
+  test "get list of groups with bogus group_class" do
+    authorize_with :active
+    get :index, {
+      filters: [['group_class', '=', 'nogrouphasthislittleclass']],
+      format: :json,
+    }
+    assert_response :success
+    assert_equal [], json_response['items']
+    assert_equal 0, json_response['items_available']
+  end
+
+  def check_project_contents_response
+    assert_response :success
+    assert_operator 2, :<=, json_response['items_available']
+    assert_operator 2, :<=, json_response['items'].count
+    kinds = json_response['items'].collect { |i| i['kind'] }.uniq
+    expect_kinds = %w'arvados#group arvados#specimen arvados#pipelineTemplate arvados#job'
+    assert_equal expect_kinds, (expect_kinds & kinds)
+
+    json_response['items'].each do |i|
+      if i['kind'] == 'arvados#group'
+        assert(i['group_class'] == 'project',
+               "group#contents returned a non-project group")
+      end
+    end
+  end
+
+  test 'get group-owned objects' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      format: :json,
+      include_linked: true,
+    }
+    check_project_contents_response
+  end
+
+  test "user with project read permission can see project objects" do
+    authorize_with :project_viewer
+    get :contents, {
+      id: groups(:aproject).uuid,
+      format: :json,
+      include_linked: true,
+    }
+    check_project_contents_response
+  end
+
+  test "list objects across projects" do
+    authorize_with :project_viewer
+    get :contents, {
+      format: :json,
+      filters: [['uuid', 'is_a', 'arvados#specimen']]
+    }
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    [[:in_aproject, true],
+     [:in_asubproject, true],
+     [:owned_by_private_group, false]].each do |specimen_fixture, should_find|
+      if should_find
+        assert_includes found_uuids, specimens(specimen_fixture).uuid, "did not find specimen fixture '#{specimen_fixture}'"
+      else
+        refute_includes found_uuids, specimens(specimen_fixture).uuid, "found specimen fixture '#{specimen_fixture}'"
+      end
+    end
+  end
+
+  test "list objects in home project" do
+    authorize_with :active
+    get :contents, {
+      format: :json,
+      id: users(:active).uuid
+    }
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    assert_includes found_uuids, specimens(:owned_by_active_user).uuid, "specimen did not appear in home project"
+    refute_includes found_uuids, specimens(:in_asubproject).uuid, "specimen appeared unexpectedly in home project"
+  end
+
+  test "user with project read permission can see project collections" do
+    authorize_with :project_viewer
+    get :contents, {
+      id: groups(:asubproject).uuid,
+      format: :json,
+    }
+    ids = json_response['items'].map { |item| item["uuid"] }
+    assert_includes ids, collections(:baz_file_in_asubproject).uuid
+  end
+
+  [['asc', :<=],
+   ['desc', :>=]].each do |order, operator|
+    test "user with project read permission can sort project collections #{order}" do
+      authorize_with :project_viewer
+      get :contents, {
+        id: groups(:asubproject).uuid,
+        format: :json,
+        filters: [['uuid', 'is_a', "arvados#collection"]],
+        order: "collections.name #{order}"
+      }
+      sorted_names = json_response['items'].collect { |item| item["name"] }
+      # Here we avoid assuming too much about the database
+      # collation. Both "alice"<"Bob" and "alice">"Bob" can be
+      # correct. Hopefully it _is_ safe to assume that if "a" comes
+      # before "b" in the ascii alphabet, "aX">"bY" is never true for
+      # any strings X and Y.
+      reliably_sortable_names = sorted_names.select do |name|
+        name[0] >= 'a' and name[0] <= 'z'
+      end.uniq do |name|
+        name[0]
+      end
+      # Preserve order of sorted_names. But do not use &=. If
+      # sorted_names has out-of-order duplicates, we want to preserve
+      # them here, so we can detect them and fail the test below.
+      sorted_names.select! do |name|
+        reliably_sortable_names.include? name
+      end
+      actually_checked_anything = false
+      previous = nil
+      sorted_names.each do |entry|
+        if previous
+          assert_operator(previous, operator, entry,
+                          "Entries sorted incorrectly.")
+          actually_checked_anything = true
+        end
+        previous = entry
+      end
+      assert actually_checked_anything, "Didn't even find two names to compare."
+    end
+  end
+
+  test 'list objects across multiple projects' do
+    authorize_with :project_viewer
+    get :contents, {
+      format: :json,
+      include_linked: false,
+      filters: [['uuid', 'is_a', 'arvados#specimen']]
+    }
+    assert_response :success
+    found_uuids = json_response['items'].collect { |i| i['uuid'] }
+    [[:in_aproject, true],
+     [:in_asubproject, true],
+     [:owned_by_private_group, false]].each do |specimen_fixture, should_find|
+      if should_find
+        assert_includes found_uuids, specimens(specimen_fixture).uuid, "did not find specimen fixture '#{specimen_fixture}'"
+      else
+        refute_includes found_uuids, specimens(specimen_fixture).uuid, "found specimen fixture '#{specimen_fixture}'"
+      end
+    end
+  end
+
+  # Even though the project_viewer tests go through other controllers,
+  # I'm putting them here so they're easy to find alongside the other
+  # project tests.
+  def check_new_project_link_fails(link_attrs)
+    @controller = Arvados::V1::LinksController.new
+    post :create, link: {
+      link_class: "permission",
+      name: "can_read",
+      head_uuid: groups(:aproject).uuid,
+    }.merge(link_attrs)
+    assert_includes(403..422, response.status)
+  end
+
+  test "user with project read permission can't add users to it" do
+    authorize_with :project_viewer
+    check_new_project_link_fails(tail_uuid: users(:spectator).uuid)
+  end
+
+  test "user with project read permission can't add items to it" do
+    authorize_with :project_viewer
+    check_new_project_link_fails(tail_uuid: collections(:baz_file).uuid)
+  end
+
+  test "user with project read permission can't rename items in it" do
+    authorize_with :project_viewer
+    @controller = Arvados::V1::LinksController.new
+    post :update, {
+      id: jobs(:running).uuid,
+      name: "Denied test name",
+    }
+    assert_includes(403..404, response.status)
+  end
+
+  test "user with project read permission can't remove items from it" do
+    @controller = Arvados::V1::PipelineTemplatesController.new
+    authorize_with :project_viewer
+    post :update, {
+      id: pipeline_templates(:two_part).uuid,
+      pipeline_template: {
+        owner_uuid: users(:project_viewer).uuid,
+      }
+    }
+    assert_response 403
+  end
+
+  test "user with project read permission can't delete it" do
+    authorize_with :project_viewer
+    post :destroy, {id: groups(:aproject).uuid}
+    assert_response 403
+  end
+
+  test 'get group-owned objects with limit' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      limit: 1,
+      format: :json,
+    }
+    assert_response :success
+    assert_operator 1, :<, json_response['items_available']
+    assert_equal 1, json_response['items'].count
+  end
+
+  test 'get group-owned objects with limit and offset' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      limit: 1,
+      offset: 12345,
+      format: :json,
+    }
+    assert_response :success
+    assert_operator 1, :<, json_response['items_available']
+    assert_equal 0, json_response['items'].count
+  end
+
+  test 'get group-owned objects with additional filter matching nothing' do
+    authorize_with :active
+    get :contents, {
+      id: groups(:aproject).uuid,
+      filters: [['uuid', 'in', ['foo_not_a_uuid','bar_not_a_uuid']]],
+      format: :json,
+    }
+    assert_response :success
+    assert_equal [], json_response['items']
+    assert_equal 0, json_response['items_available']
+  end
+
+  %w(offset limit).each do |arg|
+    ['foo', '', '1234five', '0x10', '-8'].each do |val|
+      test "Raise error on bogus #{arg} parameter #{val.inspect}" do
+        authorize_with :active
+        get :contents, {
+          :id => groups(:aproject).uuid,
+          :format => :json,
+          arg => val,
+        }
+        assert_response 422
+      end
+    end
+  end
+
+  test 'get writable_by list for owned group' do
+    authorize_with :active
+    get :show, {
+      id: groups(:aproject).uuid,
+      format: :json
+    }
+    assert_response :success
+    assert_not_nil(json_response['writable_by'],
+                   "Should receive uuid list in 'writable_by' field")
+    assert_includes(json_response['writable_by'], users(:active).uuid,
+                    "owner should be included in writable_by list")
+  end
+
+  test 'no writable_by list for group with read-only access' do
+    authorize_with :rominiadmin
+    get :show, {
+      id: groups(:testusergroup_admins).uuid,
+      format: :json
+    }
+    assert_response :success
+    assert_equal([json_response['owner_uuid']],
+                 json_response['writable_by'],
+                 "Should only see owner_uuid in 'writable_by' field")
+  end
+
+  test 'get writable_by list by admin user' do
+    authorize_with :admin
+    get :show, {
+      id: groups(:testusergroup_admins).uuid,
+      format: :json
+    }
+    assert_response :success
+    assert_not_nil(json_response['writable_by'],
+                   "Should receive uuid list in 'writable_by' field")
+    assert_includes(json_response['writable_by'],
+                    users(:admin).uuid,
+                    "Current user should be included in 'writable_by' field")
+  end
+
+  test 'creating subproject with duplicate name fails' do
+    authorize_with :active
+    post :create, {
+      group: {
+        name: 'A Project',
+        owner_uuid: users(:active).uuid,
+        group_class: 'project',
+      },
+    }
+    assert_response 422
+    response_errors = json_response['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert(response_errors.first.include?('duplicate key'),
+           "Expected 'duplicate key' error in #{response_errors.first}")
+  end
+
+  test 'creating duplicate named subproject succeeds with ensure_unique_name' do
+    authorize_with :active
+    post :create, {
+      group: {
+        name: 'A Project',
+        owner_uuid: users(:active).uuid,
+        group_class: 'project',
+      },
+      ensure_unique_name: true
+    }
+    assert_response :success
+    new_project = json_response
+    assert_not_equal(new_project['uuid'],
+                     groups(:aproject).uuid,
+                     "create returned same uuid as existing project")
+    assert_equal(new_project['name'],
+                 'A Project (2)',
+                 "new project name '#{new_project['name']}' was expected to be 'A Project (2)'")
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/humans_controller_test.rb b/services/api/test/functional/arvados/v1/humans_controller_test.rb
new file mode 100644 (file)
index 0000000..8eec04f
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class Arvados::V1::HumansControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/job_reuse_controller_test.rb b/services/api/test/functional/arvados/v1/job_reuse_controller_test.rb
new file mode 100644 (file)
index 0000000..9b66851
--- /dev/null
@@ -0,0 +1,704 @@
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class Arvados::V1::JobReuseControllerTest < ActionController::TestCase
+  fixtures :repositories, :users, :jobs, :links, :collections
+
+  # See git_setup.rb for the commit log for test.git.tar
+  include GitTestHelper
+
+  setup do
+    @controller = Arvados::V1::JobsController.new
+    authorize_with :active
+  end
+
+  test "reuse job with no_reuse=false" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "reuse job with find_or_create=true" do
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+        repository: "foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "reuse job with symbolic script_version" do
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "tag1",
+        repository: "foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "do not reuse job because no_reuse=true" do
+    post :create, {
+      job: {
+        no_reuse: true,
+        script: "hash",
+        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+        repository: "foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  [false, "false"].each do |whichfalse|
+    test "do not reuse job because find_or_create=#{whichfalse.inspect}" do
+      post :create, {
+        job: {
+          script: "hash",
+          script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+          repository: "foo",
+          script_parameters: {
+            input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+            an_integer: '1'
+          }
+        },
+        find_or_create: whichfalse
+      }
+      assert_response :success
+      assert_not_nil assigns(:object)
+      new_job = JSON.parse(@response.body)
+      assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+      assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+    end
+  end
+
+  test "do not reuse job because output is not readable by user" do
+    authorize_with :job_reader
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+        repository: "foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_no_output" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '2'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykppp', new_job['uuid']
+  end
+
+  test "test_reuse_job_range" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      minimum_script_version: "tag1",
+      script_version: "master",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "cannot_reuse_job_no_minimum_given_so_must_use_specified_commit" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "master",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_different_input" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '2'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_different_version" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "master",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '2'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+  end
+
+  test "test_can_reuse_job_submitted_nondeterministic" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      },
+      nondeterministic: true
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_past_nondeterministic" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash2",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykyyy', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_no_permission" do
+    authorize_with :spectator
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "test_cannot_reuse_job_excluded" do
+    post :create, job: {
+      no_reuse: false,
+      script: "hash",
+      minimum_script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
+      script_version: "master",
+      repository: "foo",
+      exclude_script_versions: ["tag1"],
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1'
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_not_equal('4fe459abe02d9b365932b8f5dc419439ab4e2577',
+                     new_job['script_version'])
+  end
+
+  test "cannot reuse job with find_or_create but excluded version" do
+    post :create, {
+      job: {
+        script: "hash",
+        script_version: "master",
+        repository: "foo",
+        script_parameters: {
+          input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+          an_integer: '1'
+        }
+      },
+      find_or_create: true,
+      minimum_script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
+      exclude_script_versions: ["tag1"],
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_not_equal('4fe459abe02d9b365932b8f5dc419439ab4e2577',
+                     new_job['script_version'])
+  end
+
+  BASE_FILTERS = {
+    'repository' => ['=', 'foo'],
+    'script' => ['=', 'hash'],
+    'script_version' => ['in git', 'master'],
+    'docker_image_locator' => ['=', nil],
+    'arvados_sdk_version' => ['=', nil],
+  }
+
+  def filters_from_hash(hash)
+    hash.each_pair.map { |name, filter| [name] + filter }
+  end
+
+  test "can reuse a Job based on filters" do
+    filters_hash = BASE_FILTERS.
+      merge('script_version' => ['in git', 'tag1'])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "master",
+             repository: "foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             }
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "can not reuse a Job based on filters" do
+    filters = filters_from_hash(BASE_FILTERS
+                                  .reject { |k| k == 'script_version' })
+    filters += [["script_version", "in git",
+                 "31ce37fe365b3dc204300a3e4c396ad333ed0556"],
+                ["script_version", "not in git", ["tag1"]]]
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "master",
+             repository: "foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             }
+           },
+           filters: filters,
+           find_or_create: true,
+         })
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '077ba2ad3ea24a929091a9e6ce545c93199b8e57', new_job['script_version']
+  end
+
+  test "can not reuse a Job based on arbitrary filters" do
+    filters_hash = BASE_FILTERS.
+      merge("created_at" => ["<", "2010-01-01T00:00:00Z"])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             }
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_equal 'zzzzz-8i9sb-cjs4pklxxjykqqq', new_job['uuid']
+    assert_equal '4fe459abe02d9b365932b8f5dc419439ab4e2577', new_job['script_version']
+  end
+
+  test "can reuse a Job with a Docker image" do
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+             runtime_constraints: {
+               docker_image: 'arvados/apitestfixture',
+             }
+           },
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    target_job = jobs(:previous_docker_job_run)
+    [:uuid, :script_version, :docker_image_locator].each do |attr|
+      assert_equal(target_job.send(attr), new_job.send(attr))
+    end
+  end
+
+  test "can reuse a Job with a Docker image hash filter" do
+    filters_hash = BASE_FILTERS.
+      merge("script_version" =>
+              ["=", "4fe459abe02d9b365932b8f5dc419439ab4e2577"],
+            "docker_image_locator" =>
+              ["in docker", links(:docker_image_collection_hash).name])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    target_job = jobs(:previous_docker_job_run)
+    [:uuid, :script_version, :docker_image_locator].each do |attr|
+      assert_equal(target_job.send(attr), new_job.send(attr))
+    end
+  end
+
+  test "reuse Job with Docker image repo+tag" do
+    filters_hash = BASE_FILTERS.
+      merge("script_version" =>
+              ["=", "4fe459abe02d9b365932b8f5dc419439ab4e2577"],
+            "docker_image_locator" =>
+              ["in docker", links(:docker_image_collection_tag2).name])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    target_job = jobs(:previous_docker_job_run)
+    [:uuid, :script_version, :docker_image_locator].each do |attr|
+      assert_equal(target_job.send(attr), new_job.send(attr))
+    end
+  end
+
+  test "new job with unknown Docker image filter" do
+    filters_hash = BASE_FILTERS.
+      merge("docker_image_locator" => ["in docker", "_nonesuchname_"])
+    post(:create, {
+           job: {
+             script: "hash",
+             script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+             repository: "foo",
+             script_parameters: {
+               input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+               an_integer: '1'
+             },
+           },
+           filters: filters_from_hash(filters_hash),
+           find_or_create: true,
+         })
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    assert_not_equal(jobs(:previous_docker_job_run).uuid, new_job.uuid)
+  end
+
+  ["repository", "script"].each do |skip_key|
+    test "missing #{skip_key} filter raises an error" do
+      filters = filters_from_hash(BASE_FILTERS.reject { |k| k == skip_key })
+      post(:create, {
+             job: {
+               script: "hash",
+               script_version: "master",
+               repository: "foo",
+               script_parameters: {
+                 input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+                 an_integer: '1'
+               }
+             },
+             filters: filters,
+             find_or_create: true,
+           })
+      assert_includes(405..599, @response.code.to_i,
+                      "bad status code with missing #{skip_key} filter")
+    end
+  end
+
+  test "find Job with script version range" do
+    get :index, filters: [["repository", "=", "foo"],
+                          ["script", "=", "hash"],
+                          ["script_version", "in git", "tag1"]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "find Job with script version range exclusions" do
+    get :index, filters: [["repository", "=", "foo"],
+                          ["script", "=", "hash"],
+                          ["script_version", "not in git", "tag1"]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "find Job with Docker image range" do
+    get :index, filters: [["docker_image_locator", "in docker",
+                           "arvados/apitestfixture"]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "find Job with Docker image using reader tokens" do
+    authorize_with :inactive
+    get(:index, {
+          filters: [["docker_image_locator", "in docker",
+                     "arvados/apitestfixture"]],
+          reader_tokens: [api_token(:active)],
+        })
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "'in docker' filter accepts arrays" do
+    get :index, filters: [["docker_image_locator", "in docker",
+                           ["_nonesuchname_", "arvados/apitestfixture"]]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+  end
+
+  test "'not in docker' filter accepts arrays" do
+    get :index, filters: [["docker_image_locator", "not in docker",
+                           ["_nonesuchname_", "arvados/apitestfixture"]]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_job_run).uuid)
+    refute_includes(assigns(:objects).map { |job| job.uuid },
+                    jobs(:previous_docker_job_run).uuid)
+  end
+
+  def create_foo_hash_job_params(params)
+    if not params.has_key?(:find_or_create)
+      params[:find_or_create] = true
+    end
+    job_attrs = params.delete(:job) || {}
+    params[:job] = {
+      script: "hash",
+      script_version: "4fe459abe02d9b365932b8f5dc419439ab4e2577",
+      repository: "foo",
+      script_parameters: {
+        input: 'fa7aeb5140e2848d39b416daeef4ffc5+45',
+        an_integer: '1',
+      },
+    }.merge(job_attrs)
+    params
+  end
+
+  def check_new_job_created_from(params)
+    start_time = Time.now
+    post(:create, create_foo_hash_job_params(params))
+    assert_response :success
+    new_job = assigns(:object)
+    assert_not_nil new_job
+    assert_operator(start_time, :<=, new_job.created_at)
+    new_job
+  end
+
+  def check_errors_from(params)
+    post(:create, create_foo_hash_job_params(params))
+    assert_includes(405..499, @response.code.to_i)
+    errors = json_response.fetch("errors", [])
+    assert(errors.any?, "no errors assigned from #{params}")
+    refute(errors.any? { |msg| msg =~ /^#<[A-Za-z]+: / },
+           "errors include raw exception")
+    errors
+  end
+
+  # 1de84a8 is on the b1 branch, after master's tip.
+  test "new job created from unsatisfiable minimum version filter" do
+    filters_hash = BASE_FILTERS.merge("script_version" => ["in git", "1de84a8"])
+    check_new_job_created_from(filters: filters_from_hash(filters_hash))
+  end
+
+  test "new job created from unsatisfiable minimum version parameter" do
+    check_new_job_created_from(minimum_script_version: "1de84a8")
+  end
+
+  test "new job created from unsatisfiable minimum version attribute" do
+    check_new_job_created_from(job: {minimum_script_version: "1de84a8"})
+  end
+
+  test "graceful error from nonexistent minimum version filter" do
+    filters_hash = BASE_FILTERS.merge("script_version" =>
+                                      ["in git", "__nosuchbranch__"])
+    errors = check_errors_from(filters: filters_from_hash(filters_hash))
+    assert(errors.any? { |msg| msg.include? "__nosuchbranch__" },
+           "bad refspec not mentioned in error message")
+  end
+
+  test "graceful error from nonexistent minimum version parameter" do
+    errors = check_errors_from(minimum_script_version: "__nosuchbranch__")
+    assert(errors.any? { |msg| msg.include? "__nosuchbranch__" },
+           "bad refspec not mentioned in error message")
+  end
+
+  test "graceful error from nonexistent minimum version attribute" do
+    errors = check_errors_from(job: {minimum_script_version: "__nosuchbranch__"})
+    assert(errors.any? { |msg| msg.include? "__nosuchbranch__" },
+           "bad refspec not mentioned in error message")
+  end
+
+  test "can't reuse job with older Arvados SDK version" do
+    params = {
+      script_version: "31ce37fe365b3dc204300a3e4c396ad333ed0556",
+      runtime_constraints: {
+        "arvados_sdk_version" => "master",
+        "docker_image" => links(:docker_image_collection_tag).name,
+      },
+    }
+    check_new_job_created_from(job: params)
+  end
+
+  test "reuse job from arvados_sdk_version git filters" do
+    filters_hash = BASE_FILTERS.
+      merge("arvados_sdk_version" => ["in git", "commit2"])
+    filters_hash.delete("script_version")
+    params = create_foo_hash_job_params(filters:
+                                        filters_from_hash(filters_hash))
+    post(:create, params)
+    assert_response :success
+    assert_equal(jobs(:previous_job_run_with_arvados_sdk_version).uuid,
+                 assigns(:object).uuid)
+  end
+
+  test "create new job because of arvados_sdk_version 'not in git' filters" do
+    filters_hash = BASE_FILTERS.reject { |k| k == "script_version" }
+    filters = filters_from_hash(filters_hash)
+    # Allow anything from the root commit, but before commit 2.
+    filters += [["arvados_sdk_version", "in git", "436637c8"],
+                ["arvados_sdk_version", "not in git", "00634b2b"]]
+    check_new_job_created_from(filters: filters)
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/job_tasks_controller_test.rb b/services/api/test/functional/arvados/v1/job_tasks_controller_test.rb
new file mode 100644 (file)
index 0000000..44a4c07
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class Arvados::V1::JobTasksControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/jobs_controller_test.rb b/services/api/test/functional/arvados/v1/jobs_controller_test.rb
new file mode 100644 (file)
index 0000000..07e7f84
--- /dev/null
@@ -0,0 +1,395 @@
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class Arvados::V1::JobsControllerTest < ActionController::TestCase
+
+  include GitTestHelper
+
+  test "submit a job" do
+    authorize_with :active
+    post :create, job: {
+      script: "hash",
+      script_version: "master",
+      repository: "foo",
+      script_parameters: {}
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = JSON.parse(@response.body)
+    assert_not_nil new_job['uuid']
+    assert_not_nil new_job['script_version'].match(/^[0-9a-f]{40}$/)
+    assert_equal 0, new_job['priority']
+  end
+
+  test "normalize output and log uuids when creating job" do
+    authorize_with :active
+    post :create, job: {
+      script: "hash",
+      script_version: "master",
+      script_parameters: {},
+      repository: "foo",
+      started_at: Time.now,
+      finished_at: Time.now,
+      running: false,
+      success: true,
+      output: 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy',
+      log: 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy'
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_job = assigns(:object)
+    assert_equal 'd41d8cd98f00b204e9800998ecf8427e+0', new_job['log']
+    assert_equal 'd41d8cd98f00b204e9800998ecf8427e+0', new_job['output']
+    version = new_job['script_version']
+
+    # Make sure version doesn't get mangled by normalize
+    assert_not_nil version.match(/^[0-9a-f]{40}$/)
+    assert_equal 'master', json_response['supplied_script_version']
+  end
+
+  test "normalize output and log uuids when updating job" do
+    authorize_with :active
+
+    foobar_job = jobs(:foobar)
+
+    new_output = 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy'
+    new_log = 'd41d8cd98f00b204e9800998ecf8427e+0+K@xyzzy'
+    put :update, {
+      id: foobar_job['uuid'],
+      job: {
+        output: new_output,
+        log: new_log
+      }
+    }
+
+    updated_job = json_response
+    assert_not_equal foobar_job['log'], updated_job['log']
+    assert_not_equal new_log, updated_job['log']  # normalized during update
+    assert_equal new_log[0,new_log.rindex('+')], updated_job['log']
+    assert_not_equal foobar_job['output'], updated_job['output']
+    assert_not_equal new_output, updated_job['output']  # normalized during update
+    assert_equal new_output[0,new_output.rindex('+')], updated_job['output']
+  end
+
+  test "cancel a running job" do
+    # We need to verify that "cancel" creates a trigger file, so first
+    # let's make sure there is no stale trigger file.
+    begin
+      File.unlink(Rails.configuration.crunch_refresh_trigger)
+    rescue Errno::ENOENT
+    end
+
+    authorize_with :active
+    put :update, {
+      id: jobs(:running).uuid,
+      job: {
+        cancelled_at: 4.day.ago
+      }
+    }
+    assert_response :success
+    assert_not_nil assigns(:object)
+    job = JSON.parse(@response.body)
+    assert_not_nil job['uuid']
+    assert_not_nil job['cancelled_at']
+    assert_not_nil job['cancelled_by_user_uuid']
+    assert_not_nil job['cancelled_by_client_uuid']
+    assert_equal(true, Time.parse(job['cancelled_at']) > 1.minute.ago,
+                 'server should correct bogus cancelled_at ' +
+                 job['cancelled_at'])
+    assert_equal(true,
+                 File.exists?(Rails.configuration.crunch_refresh_trigger),
+                 'trigger file should be created when job is cancelled')
+  end
+
+  [
+   [:put, :update, {job:{cancelled_at: Time.now}}, :success],
+   [:put, :update, {job:{cancelled_at: nil}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Cancelled'}}, :success],
+   [:put, :update, {job:{state: 'Queued'}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Running'}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Failed'}}, :unprocessable_entity],
+   [:put, :update, {job:{state: 'Complete'}}, :unprocessable_entity],
+   [:post, :cancel, {}, :success],
+  ].each do |http_method, action, params, expected_response|
+    test "cancelled job stays cancelled after #{[http_method, action, params].inspect}" do
+      # We need to verify that "cancel" creates a trigger file, so first
+      # let's make sure there is no stale trigger file.
+      begin
+        File.unlink(Rails.configuration.crunch_refresh_trigger)
+      rescue Errno::ENOENT
+      end
+
+      authorize_with :active
+      self.send http_method, action, { id: jobs(:cancelled).uuid }.merge(params)
+      assert_response expected_response
+      if expected_response == :success
+        job = json_response
+        assert_not_nil job['cancelled_at'], 'job cancelled again using #{attribute}=#{value} did not have cancelled_at value'
+        assert_equal job['state'], 'Cancelled', 'cancelled again job state changed when updated using using #{attribute}=#{value}'
+      end
+      # Verify database record still says Cancelled
+      assert_equal 'Cancelled', Job.find(jobs(:cancelled).id).state, 'job was un-cancelled'
+    end
+  end
+
+  test "cancelled job updated to any other state change results in error" do
+    # We need to verify that "cancel" creates a trigger file, so first
+    # let's make sure there is no stale trigger file.
+    begin
+      File.unlink(Rails.configuration.crunch_refresh_trigger)
+    rescue Errno::ENOENT
+    end
+
+    authorize_with :active
+    put :update, {
+      id: jobs(:running_cancelled).uuid,
+      job: {
+        cancelled_at: nil
+      }
+    }
+    assert_response 422
+  end
+
+  ['abc.py', 'hash.py'].each do |script|
+    test "update job script attribute to #{script} without failing script_version check" do
+      authorize_with :admin
+      put :update, {
+        id: jobs(:uses_nonexistent_script_version).uuid,
+        job: {
+          script: script
+        }
+      }
+      assert_response :success
+      resp = assigns(:object)
+      assert_equal jobs(:uses_nonexistent_script_version).script_version, resp['script_version']
+    end
+  end
+
+  test "search jobs by uuid with >= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '>=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal false, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
+  end
+
+  test "search jobs by uuid with <= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '<=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal true, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
+  end
+
+  test "search jobs by uuid with >= and <= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '>=', 'zzzzz-8i9sb-pshmckwoma9plh7'],
+              ['uuid', '<=', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found, ['zzzzz-8i9sb-pshmckwoma9plh7']
+  end
+
+  test "search jobs by uuid with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', '<', 'zzzzz-8i9sb-pshmckwoma9plh7']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal false, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal true, !!found.index('zzzzz-8i9sb-4cf0nhn6xte809j')
+  end
+
+  test "search jobs by uuid with like query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', 'like', '%hmckwoma9pl%']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found, ['zzzzz-8i9sb-pshmckwoma9plh7']
+  end
+
+  test "search jobs by uuid with 'in' query" do
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', 'in', ['zzzzz-8i9sb-4cf0nhn6xte809j',
+                                'zzzzz-8i9sb-pshmckwoma9plh7']]]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal found.sort, ['zzzzz-8i9sb-4cf0nhn6xte809j',
+                              'zzzzz-8i9sb-pshmckwoma9plh7']
+  end
+
+  test "search jobs by uuid with 'not in' query" do
+    exclude_uuids = [jobs(:running).uuid,
+                     jobs(:running_cancelled).uuid]
+    authorize_with :active
+    get :index, {
+      filters: [['uuid', 'not in', exclude_uuids]]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_not_empty found, "'not in' query returned nothing"
+    assert_empty(found & exclude_uuids,
+                 "'not in' query returned uuids I asked not to get")
+  end
+
+  ['=', '!='].each do |operator|
+    [['uuid', 'zzzzz-8i9sb-pshmckwoma9plh7'],
+     ['output', nil]].each do |attr, operand|
+      test "search jobs with #{attr} #{operator} #{operand.inspect} query" do
+        authorize_with :active
+        get :index, {
+          filters: [[attr, operator, operand]]
+        }
+        assert_response :success
+        values = assigns(:objects).collect { |x| x.send(attr) }
+        assert_not_empty values, "query should return non-empty result"
+        if operator == '='
+          assert_empty values - [operand], "query results do not satisfy query"
+        else
+          assert_empty values & [operand], "query results do not satisfy query"
+        end
+      end
+    end
+  end
+
+  test "search jobs by started_at with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '<', Time.now.to_s]]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+  end
+
+  test "search jobs by started_at with > query" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '>', Time.now.to_s]]
+    }
+    assert_response :success
+    assert_equal 0, assigns(:objects).count
+  end
+
+  test "search jobs by started_at with >= query on metric date" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '>=', '2014-01-01']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+  end
+
+  test "search jobs by started_at with >= query on metric date and time" do
+    authorize_with :active
+    get :index, {
+      filters: [['started_at', '>=', '2014-01-01 01:23:45']]
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+  end
+
+  test "search jobs with 'any' operator" do
+    authorize_with :active
+    get :index, {
+      where: { any: ['contains', 'pshmckw'] }
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal 0, found.index('zzzzz-8i9sb-pshmckwoma9plh7')
+    assert_equal 1, found.count
+  end
+
+  test "search jobs by nonexistent column with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['is_borked', '<', 'fizzbuzz']]
+    }
+    assert_response 422
+  end
+
+  test "finish a job" do
+    authorize_with :active
+    put :update, {
+      id: jobs(:nearly_finished_job).uuid,
+      job: {
+        output: '551392cc37a317abf865b95f66f4ef94+101',
+        log: '9215de2a951a721f5f156bc08cf63ad7+93',
+        tasks_summary: {done: 1, running: 0, todo: 0, failed: 0},
+        success: true,
+        running: false,
+        finished_at: Time.now.to_s
+      }
+    }
+    assert_response :success
+  end
+
+  [:spectator, :admin].each_with_index do |which_token, i|
+    test "get job queue as #{which_token} user" do
+      authorize_with which_token
+      get :queue
+      assert_response :success
+      assert_equal i, assigns(:objects).count
+    end
+  end
+
+  test "get job queue as with a = filter" do
+    authorize_with :admin
+    get :queue, { filters: [['script','=','foo']] }
+    assert_response :success
+    assert_equal ['foo'], assigns(:objects).collect(&:script).uniq
+    assert_equal 0, assigns(:objects)[0].queue_position
+  end
+
+  test "get job queue as with a != filter" do
+    authorize_with :admin
+    get :queue, { filters: [['script','!=','foo']] }
+    assert_response :success
+    assert_equal 0, assigns(:objects).count
+  end
+
+  [:spectator, :admin].each do |which_token|
+    test "get queue_size as #{which_token} user" do
+      authorize_with which_token
+      get :queue_size
+      assert_response :success
+      assert_equal 1, JSON.parse(@response.body)["queue_size"]
+    end
+  end
+
+  test "job includes assigned nodes" do
+    authorize_with :active
+    get :show, {id: jobs(:nearly_finished_job).uuid}
+    assert_response :success
+    assert_equal([nodes(:busy).uuid], json_response["node_uuids"])
+  end
+
+  test "job lock success" do
+    authorize_with :active
+    post :lock, {id: jobs(:queued).uuid}
+    assert_response :success
+    job = Job.where(uuid: jobs(:queued).uuid).first
+    assert_equal "Running", job.state
+  end
+
+  test "job lock conflict" do
+    authorize_with :active
+    post :lock, {id: jobs(:running).uuid}
+    assert_response 403 # forbidden
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/keep_disks_controller_test.rb b/services/api/test/functional/arvados/v1/keep_disks_controller_test.rb
new file mode 100644 (file)
index 0000000..82067b2
--- /dev/null
@@ -0,0 +1,100 @@
+require 'test_helper'
+
+class Arvados::V1::KeepDisksControllerTest < ActionController::TestCase
+
+  def default_ping_opts
+    {ping_secret: '', service_ssl_flag: false, service_port: 1234}
+  end
+
+  test "add keep disk with admin token" do
+    authorize_with :admin
+    post :ping, default_ping_opts.
+      merge(filesystem_uuid: 'eb1e77a1-db84-4193-b6e6-ca2894f67d5f')
+    assert_response :success
+    assert_not_nil assigns(:object)
+    new_keep_disk = JSON.parse(@response.body)
+    assert_not_nil new_keep_disk['uuid']
+    assert_not_nil new_keep_disk['ping_secret']
+    assert_not_equal '', new_keep_disk['ping_secret']
+  end
+
+  [
+    {},
+    {filesystem_uuid: ''},
+  ].each do |opts|
+    test "add keep disk with[out] filesystem_uuid #{opts}" do
+      authorize_with :admin
+      post :ping, default_ping_opts.merge(opts)
+      assert_response :success
+      assert_not_nil JSON.parse(@response.body)['uuid']
+    end
+  end
+
+  test "refuse to add keep disk without admin token" do
+    post :ping, default_ping_opts
+    assert_response 404
+  end
+
+  test "ping keep disk" do
+    post :ping, default_ping_opts.
+      merge(id: keep_disks(:nonfull).uuid,
+            ping_secret: keep_disks(:nonfull).ping_secret,
+            filesystem_uuid: keep_disks(:nonfull).filesystem_uuid)
+    assert_response :success
+    assert_not_nil assigns(:object)
+    keep_disk = JSON.parse(@response.body)
+    assert_not_nil keep_disk['uuid']
+    assert_not_nil keep_disk['ping_secret']
+  end
+
+  test "admin should get index with ping_secret" do
+    authorize_with :admin
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, items.size
+    assert_not_nil items[0]['ping_secret']
+  end
+
+  # inactive user sees keep disks
+  test "inactive user should get index" do
+    authorize_with :inactive
+    get :index
+    assert_response :success
+    items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, items.size
+
+    # Check these are still included
+    assert items[0]['service_host']
+    assert items[0]['service_port']
+  end
+
+  # active user sees non-secret attributes of keep disks
+  test "active user should get non-empty index with no ping_secret" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, items.size
+    items.each do |item|
+      assert_nil item['ping_secret']
+      assert_not_nil item['is_readable']
+      assert_not_nil item['is_writable']
+      assert_not_nil item['service_host']
+      assert_not_nil item['service_port']
+    end
+  end
+
+  test "search keep_services with 'any' operator" do
+    authorize_with :active
+    get :index, {
+      where: { any: ['contains', 'o2t1q5w'] }
+    }
+    assert_response :success
+    found = assigns(:objects).collect(&:uuid)
+    assert_equal true, !!found.index('zzzzz-penuu-5w2o2t1q5wy7fhn')
+  end
+
+
+end
diff --git a/services/api/test/functional/arvados/v1/keep_services_controller_test.rb b/services/api/test/functional/arvados/v1/keep_services_controller_test.rb
new file mode 100644 (file)
index 0000000..bfa138d
--- /dev/null
@@ -0,0 +1,23 @@
+require 'test_helper'
+
+class Arvados::V1::KeepServicesControllerTest < ActionController::TestCase
+
+  test "search keep_services by service_port with < query" do
+    authorize_with :active
+    get :index, {
+      filters: [['service_port', '<', 25107]]
+    }
+    assert_response :success
+    assert_equal false, assigns(:objects).any?
+  end
+
+  test "search keep_disks by service_port with >= query" do
+    authorize_with :active
+    get :index, {
+      filters: [['service_port', '>=', 25107]]
+    }
+    assert_response :success
+    assert_equal true, assigns(:objects).any?
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/links_controller_test.rb b/services/api/test/functional/arvados/v1/links_controller_test.rb
new file mode 100644 (file)
index 0000000..9bf1b0b
--- /dev/null
@@ -0,0 +1,364 @@
+require 'test_helper'
+
+class Arvados::V1::LinksControllerTest < ActionController::TestCase
+
+  ['link', 'link_json'].each do |formatted_link|
+    test "no symbol keys in serialized hash #{formatted_link}" do
+      link = {
+        properties: {username: 'testusername'},
+        link_class: 'test',
+        name: 'encoding',
+        tail_uuid: users(:admin).uuid,
+        head_uuid: virtual_machines(:testvm).uuid
+      }
+      authorize_with :admin
+      if formatted_link == 'link_json'
+        post :create, link: link.to_json
+      else
+        post :create, link: link
+      end
+      assert_response :success
+      assert_not_nil assigns(:object)
+      assert_equal 'testusername', assigns(:object).properties['username']
+      assert_equal false, assigns(:object).properties.has_key?(:username)
+    end
+  end
+
+  %w(created_at modified_at).each do |attr|
+    {nil: nil, bogus: 2.days.ago}.each do |bogustype, bogusvalue|
+      test "cannot set #{bogustype} #{attr} in create" do
+        authorize_with :active
+        post :create, {
+          link: {
+            properties: {},
+            link_class: 'test',
+            name: 'test',
+          }.merge(attr => bogusvalue)
+        }
+        assert_response :success
+        resp = JSON.parse @response.body
+        assert_in_delta Time.now, Time.parse(resp[attr]), 3.0
+      end
+      test "cannot set #{bogustype} #{attr} in update" do
+        really_created_at = links(:test_timestamps).created_at
+        authorize_with :active
+        put :update, {
+          id: links(:test_timestamps).uuid,
+          link: {
+            :properties => {test: 'test'},
+            attr => bogusvalue
+          }
+        }
+        assert_response :success
+        resp = JSON.parse @response.body
+        case attr
+        when 'created_at'
+          assert_in_delta really_created_at, Time.parse(resp[attr]), 0.001
+        else
+          assert_in_delta Time.now, Time.parse(resp[attr]), 3.0
+        end
+      end
+    end
+  end
+
+  test "head must exist" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      tail_uuid: users(:active).uuid,
+      head_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "tail must exist" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "head and tail exist, head_kind and tail_kind are returned" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: users(:spectator).uuid,
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response :success
+    l = JSON.parse(@response.body)
+    assert 'arvados#user', l['head_kind']
+    assert 'arvados#user', l['tail_kind']
+  end
+
+  test "can supply head_kind and tail_kind without error" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: users(:spectator).uuid,
+      head_kind: "arvados#user",
+      tail_kind: "arvados#user",
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response :success
+    l = JSON.parse(@response.body)
+    assert 'arvados#user', l['head_kind']
+    assert 'arvados#user', l['tail_kind']
+  end
+
+  test "tail must be visible by user" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      tail_uuid: virtual_machines(:testvm2).uuid
+    }
+    authorize_with :active
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "filter links with 'is_a' operator" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_uuid', 'is_a', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
+  end
+
+  test "filter links with 'is_a' operator with more than one" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_uuid', 'is_a', ['arvados#user', 'arvados#group'] ] ],
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f|
+                                 f.tail_uuid.match User.uuid_regex or
+                                 f.tail_uuid.match Group.uuid_regex
+                               }).count
+  end
+
+  test "filter links with 'is_a' operator with bogus type" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_uuid', 'is_a', ['arvados#bogus'] ] ],
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_equal 0, found.count
+  end
+
+  test "filter links with 'is_a' operator with collection" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['head_uuid', 'is_a', ['arvados#collection'] ] ],
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match Collection.uuid_regex}).count
+  end
+
+  test "test can still use where tail_kind" do
+    authorize_with :admin
+    get :index, {
+      where: { tail_kind: 'arvados#user' }
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
+  end
+
+  test "test can still use where head_kind" do
+    authorize_with :admin
+    get :index, {
+      where: { head_kind: 'arvados#user' }
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count
+  end
+
+  test "test can still use filter tail_kind" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['tail_kind', '=', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count
+  end
+
+  test "test can still use filter head_kind" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['head_kind', '=', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count
+  end
+
+  test "head_kind matches head_uuid" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: groups(:public).uuid,
+      head_kind: "arvados#user",
+      tail_uuid: users(:spectator).uuid,
+      tail_kind: "arvados#user",
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "tail_kind matches tail_uuid" do
+    link = {
+      link_class: 'test',
+      name: 'stuff',
+      head_uuid: users(:active).uuid,
+      head_kind: "arvados#user",
+      tail_uuid: groups(:public).uuid,
+      tail_kind: "arvados#user",
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "test with virtual_machine" do
+    link = {
+      tail_kind: "arvados#user",
+      tail_uuid: users(:active).uuid,
+      head_kind: "arvados#virtual_machine",
+      head_uuid: virtual_machines(:testvm).uuid,
+      link_class: "permission",
+      name: "can_login",
+      properties: {username: "repo_and_user_name"}
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response 422
+  end
+
+  test "test with virtualMachine" do
+    link = {
+      tail_kind: "arvados#user",
+      tail_uuid: users(:active).uuid,
+      head_kind: "arvados#virtualMachine",
+      head_uuid: virtual_machines(:testvm).uuid,
+      link_class: "permission",
+      name: "can_login",
+      properties: {username: "repo_and_user_name"}
+    }
+    authorize_with :admin
+    post :create, link: link
+    assert_response :success
+  end
+
+  test "project owner can show a project permission" do
+    uuid = links(:project_viewer_can_read_project).uuid
+    authorize_with :active
+    get :show, id: uuid
+    assert_response :success
+    assert_equal(uuid, assigns(:object).andand.uuid)
+  end
+
+  test "admin can show a project permission" do
+    uuid = links(:project_viewer_can_read_project).uuid
+    authorize_with :admin
+    get :show, id: uuid
+    assert_response :success
+    assert_equal(uuid, assigns(:object).andand.uuid)
+  end
+
+  test "project viewer can't show others' project permissions" do
+    authorize_with :project_viewer
+    get :show, id: links(:admin_can_write_aproject).uuid
+    assert_response 404
+  end
+
+  test "requesting a nonexistent link returns 404" do
+    authorize_with :active
+    get :show, id: 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'
+    assert_response 404
+  end
+
+  test "retrieve all permissions using generic links index api" do
+    skip "(not implemented)"
+    # Links.readable_by() does not return the full set of permission
+    # links that are visible to a user (i.e., all permission links
+    # whose head_uuid references an object for which the user has
+    # ownership or can_manage permission). Therefore, neither does
+    # /arvados/v1/links.
+    #
+    # It is possible to retrieve the full set of permissions for a
+    # single object via /arvados/v1/permissions.
+    authorize_with :active
+    get :index, filters: [['link_class', '=', 'permission'],
+                          ['head_uuid', '=', groups(:aproject).uuid]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map(&:uuid),
+                    links(:project_viewer_can_read_project).uuid)
+  end
+
+  test "admin can index project permissions" do
+    authorize_with :admin
+    get :index, filters: [['link_class', '=', 'permission'],
+                          ['head_uuid', '=', groups(:aproject).uuid]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_includes(assigns(:objects).map(&:uuid),
+                    links(:project_viewer_can_read_project).uuid)
+  end
+
+  test "project viewer can't index others' project permissions" do
+    authorize_with :project_viewer
+    get :index, filters: [['link_class', '=', 'permission'],
+                          ['head_uuid', '=', groups(:aproject).uuid],
+                          ['tail_uuid', '!=', users(:project_viewer).uuid]]
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    assert_empty assigns(:objects)
+  end
+
+  # Granting permissions.
+  test "grant can_read on project to other users in group" do
+    authorize_with :user_foo_in_sharing_group
+
+    refute users(:user_bar_in_sharing_group).can?(read: collections(:collection_owned_by_foo).uuid)
+
+    post :create, {
+      link: {
+        tail_uuid: users(:user_bar_in_sharing_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:collection_owned_by_foo).uuid,
+      }
+    }
+    assert_response :success
+    assert users(:user_bar_in_sharing_group).can?(read: collections(:collection_owned_by_foo).uuid)
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/logs_controller_test.rb b/services/api/test/functional/arvados/v1/logs_controller_test.rb
new file mode 100644 (file)
index 0000000..475e7d6
--- /dev/null
@@ -0,0 +1,48 @@
+require 'test_helper'
+
+class Arvados::V1::LogsControllerTest < ActionController::TestCase
+  fixtures :logs
+
+  test "non-admins can create their own logs" do
+    authorize_with :active
+    post :create, log: {summary: 'test log'}
+    assert_response :success
+    resp = assigns(:object)
+    assert_not_nil resp.uuid
+    assert_equal('test log', resp.summary, "loaded wrong log after creation")
+  end
+
+  test "non-admins can read their own logs" do
+    authorize_with :active
+    my_log = logs(:log_owned_by_active)
+    get :show, {id: my_log[:uuid]}
+    assert_response(:success, "failed to get log")
+    resp = assigns(:object)
+    assert_equal(my_log[:summary], resp.summary, "got wrong log")
+  end
+
+  test "test can still use where object_kind" do
+    authorize_with :admin
+    get :index, {
+      where: { object_kind: 'arvados#user' }
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count
+    l = JSON.parse(@response.body)
+    assert_equal 'arvados#user', l['items'][0]['object_kind']
+  end
+
+  test "test can still use filter object_kind" do
+    authorize_with :admin
+    get :index, {
+      filters: [ ['object_kind', '=', 'arvados#user'] ]
+    }
+    assert_response :success
+    found = assigns(:objects)
+    assert_not_equal 0, found.count
+    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/nodes_controller_test.rb b/services/api/test/functional/arvados/v1/nodes_controller_test.rb
new file mode 100644 (file)
index 0000000..7ea231e
--- /dev/null
@@ -0,0 +1,176 @@
+require 'test_helper'
+
+class Arvados::V1::NodesControllerTest < ActionController::TestCase
+
+  test "should get index with ping_secret" do
+    authorize_with :admin
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    node_items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, node_items.size
+    assert_not_nil node_items[0]['info'].andand['ping_secret']
+  end
+
+  # inactive user does not see any nodes
+  test "inactive user should get empty index" do
+    authorize_with :inactive
+    get :index
+    assert_response :success
+    node_items = JSON.parse(@response.body)['items']
+    assert_equal 0, node_items.size
+  end
+
+  # active user sees non-secret attributes of up and recently-up nodes
+  test "active user should get non-empty index with no ping_secret" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    node_items = JSON.parse(@response.body)['items']
+    assert_not_equal 0, node_items.size
+    found_busy_node = false
+    node_items.each do |node|
+      assert_nil node['info'].andand['ping_secret']
+      assert_not_nil node['crunch_worker_state']
+      if node['uuid'] == nodes(:busy).uuid
+        found_busy_node = true
+        assert_equal 'busy', node['crunch_worker_state']
+      end
+    end
+    assert_equal true, found_busy_node
+  end
+
+  test "node should ping with ping_secret and no token" do
+    post :ping, {
+      id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
+      instance_id: 'i-0000000',
+      local_ipv4: '172.17.2.174',
+      ping_secret: '69udawxvn3zzj45hs8bumvndricrha4lcpi23pd69e44soanc0'
+    }
+    assert_response :success
+    response = JSON.parse(@response.body)
+    assert_equal 'zzzzz-7ekkf-2z3mc76g2q73aio', response['uuid']
+    # Ensure we are getting the "superuser" attributes, too
+    assert_not_nil response['first_ping_at'], '"first_ping_at" attr missing'
+    assert_not_nil response['info'], '"info" attr missing'
+    assert_not_nil response['nameservers'], '"nameservers" attr missing'
+  end
+
+  test "node should fail ping with invalid ping_secret" do
+    post :ping, {
+      id: 'zzzzz-7ekkf-2z3mc76g2q73aio',
+      instance_id: 'i-0000000',
+      local_ipv4: '172.17.2.174',
+      ping_secret: 'dricrha4lcpi23pd69e44soanc069udawxvn3zzj45hs8bumvn'
+    }
+    assert_response 401
+  end
+
+  test "create node" do
+    authorize_with :admin
+    post :create, {node: {}}
+    assert_response :success
+    assert_not_nil json_response['uuid']
+    assert_not_nil json_response['info'].is_a? Hash
+    assert_not_nil json_response['info']['ping_secret']
+  end
+
+  test "ping adds node stats to info" do
+    authorize_with :admin
+    node = nodes(:idle)
+    post :ping, {
+      id: node.uuid,
+      ping_secret: node.info['ping_secret'],
+      total_cpu_cores: 32,
+      total_ram_mb: 1024,
+      total_scratch_mb: 2048
+    }
+    assert_response :success
+    info = JSON.parse(@response.body)['info']
+    properties = JSON.parse(@response.body)['properties']
+    assert_equal(node.info['ping_secret'], info['ping_secret'])
+    assert_equal(32, properties['total_cpu_cores'].to_i)
+    assert_equal(1024, properties['total_ram_mb'].to_i)
+    assert_equal(2048, properties['total_scratch_mb'].to_i)
+  end
+
+  test "active user can see their assigned job" do
+    authorize_with :active
+    get :show, {id: nodes(:busy).uuid}
+    assert_response :success
+    assert_equal(jobs(:nearly_finished_job).uuid, json_response["job_uuid"])
+  end
+
+  test "user without job read permission can't see job" do
+    authorize_with :spectator
+    get :show, {id: nodes(:busy).uuid}
+    assert_response :success
+    assert_nil(json_response["job"], "spectator can see node's assigned job")
+  end
+
+  [:admin, :spectator].each do |user|
+    test "select param does not break node list for #{user}" do
+      authorize_with user
+      get :index, {select: ['domain']}
+      assert_response :success
+    end
+  end
+
+  test "admin can associate a job with a node" do
+    changed_node = nodes(:idle)
+    assigned_job = jobs(:queued)
+    authorize_with :admin
+    post :update, {
+      id: changed_node.uuid,
+      node: {job_uuid: assigned_job.uuid},
+    }
+    assert_response :success
+    assert_equal(changed_node.hostname, json_response["hostname"],
+                 "hostname mismatch after defining job")
+    assert_equal(assigned_job.uuid, json_response["job_uuid"],
+                 "mismatch in node's assigned job UUID")
+  end
+
+  test "non-admin can't associate a job with a node" do
+    authorize_with :active
+    post :update, {
+      id: nodes(:idle).uuid,
+      node: {job_uuid: jobs(:queued).uuid},
+    }
+    assert_response 403
+  end
+
+  test "admin can unassign a job from a node" do
+    changed_node = nodes(:busy)
+    authorize_with :admin
+    post :update, {
+      id: changed_node.uuid,
+      node: {job_uuid: nil},
+    }
+    assert_response :success
+    assert_equal(changed_node.hostname, json_response["hostname"],
+                 "hostname mismatch after defining job")
+    assert_nil(json_response["job_uuid"],
+               "node still has job assignment after update")
+  end
+
+  test "non-admin can't unassign a job from a node" do
+    authorize_with :project_viewer
+    post :update, {
+      id: nodes(:busy).uuid,
+      node: {job_uuid: nil},
+    }
+    assert_response 403
+  end
+
+  test "job readable after updating other attributes" do
+    authorize_with :admin
+    post :update, {
+      id: nodes(:busy).uuid,
+      node: {last_ping_at: 1.second.ago},
+    }
+    assert_response :success
+    assert_equal(jobs(:nearly_finished_job).uuid, json_response["job_uuid"],
+                 "mismatched job UUID after ping update")
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb b/services/api/test/functional/arvados/v1/pipeline_instances_controller_test.rb
new file mode 100644 (file)
index 0000000..63c47ff
--- /dev/null
@@ -0,0 +1,28 @@
+require 'test_helper'
+
+class Arvados::V1::PipelineInstancesControllerTest < ActionController::TestCase
+
+  test 'create pipeline with components copied from template' do
+    authorize_with :active
+    post :create, {
+      pipeline_instance: {
+        pipeline_template_uuid: pipeline_templates(:two_part).uuid
+      }
+    }
+    assert_response :success
+    assert_equal(pipeline_templates(:two_part).components.to_json,
+                 assigns(:object).components.to_json)
+  end
+
+  test 'create pipeline with no template' do
+    authorize_with :active
+    post :create, {
+      pipeline_instance: {
+        components: {}
+      }
+    }
+    assert_response :success
+    assert_equal({}, assigns(:object).components)
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb b/services/api/test/functional/arvados/v1/pipeline_templates_controller_test.rb
new file mode 100644 (file)
index 0000000..104c3c9
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class Arvados::V1::PipelineTemplatesControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/repositories_controller_test.rb b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
new file mode 100644 (file)
index 0000000..5304bca
--- /dev/null
@@ -0,0 +1,90 @@
+require 'test_helper'
+
+class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
+  test "should get_all_logins with admin token" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+  end
+
+  test "should get_all_logins with non-admin token" do
+    authorize_with :active
+    get :get_all_permissions
+    assert_response 403
+  end
+
+  test "get_all_permissions gives RW to repository owner" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    ok = false
+    json_response['repositories'].each do |repo|
+      if repo['uuid'] == repositories(:repository2).uuid
+        if repo['user_permissions'][users(:active).uuid]['can_write']
+          ok = true
+        end
+      end
+    end
+    assert_equal(true, ok,
+                 "No permission on own repo '@{repositories(:repository2).uuid}'")
+  end
+
+  test "get_all_permissions takes into account is_admin flag" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    json_response['repositories'].each do |repo|
+      assert_not_nil(repo['user_permissions'][users(:admin).uuid],
+                     "Admin user is not listed in perms for #{repo['uuid']}")
+      assert_equal(true,
+                   repo['user_permissions'][users(:admin).uuid]['can_write'],
+                   "Admin has no perms for #{repo['uuid']}")
+    end
+  end
+
+  test "get_all_permissions does not give any access to user without permission" do
+    viewer_uuid = users(:project_viewer).uuid
+    assert_equal(authorized_keys(:project_viewer).authorized_user_uuid,
+                 viewer_uuid,
+                 "project_viewer must have an authorized_key for this test to work")
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    readable_repos = json_response["repositories"].select do |repo|
+      repo["user_permissions"].has_key?(viewer_uuid)
+    end
+    assert_equal(["arvados"], readable_repos.map { |r| r["name"] },
+                 "project_viewer should only have permissions on public repos")
+  end
+
+  test "get_all_permissions gives gitolite R to user with read-only access" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    found_it = false
+    assert_equal(authorized_keys(:spectator).authorized_user_uuid,
+                 users(:spectator).uuid,
+                 "spectator must have an authorized_key for this test to work")
+    json_response['repositories'].each do |repo|
+      next unless repo['uuid'] == repositories(:foo).uuid
+      assert_equal('R',
+                   repo['user_permissions'][users(:spectator).uuid]['gitolite_permissions'],
+                   "spectator user should have just R access to #{repo['uuid']}")
+      found_it = true
+    end
+    assert_equal true, found_it, "spectator user does not have R on foo repo"
+  end
+
+  test "get_all_permissions provides admin and active user keys" do
+    authorize_with :admin
+    get :get_all_permissions
+    assert_response :success
+    [:active, :admin].each do |u|
+      assert_equal(1, json_response['user_keys'][users(u).uuid].andand.count,
+                   "expected 1 key for #{u} (#{users(u).uuid})")
+      assert_equal(json_response['user_keys'][users(u).uuid][0]['public_key'],
+                   authorized_keys(u).public_key,
+                   "response public_key does not match fixture #{u}.")
+    end
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb
new file mode 100644 (file)
index 0000000..520e36e
--- /dev/null
@@ -0,0 +1,23 @@
+require 'test_helper'
+
+class Arvados::V1::SchemaControllerTest < ActionController::TestCase
+
+  test "should get fresh discovery document" do
+    MAX_SCHEMA_AGE = 60
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    assert_equal 'discovery#restDescription', discovery_doc['kind']
+    assert_equal(true,
+                 Time.now - MAX_SCHEMA_AGE.seconds < discovery_doc['generatedAt'],
+                 "discovery document was generated >#{MAX_SCHEMA_AGE}s ago")
+  end
+
+  test "discovery document has defaultTrashLifetime" do
+    get :index
+    assert_response :success
+    discovery_doc = JSON.parse(@response.body)
+    assert_includes discovery_doc, 'defaultTrashLifetime'
+    assert_equal discovery_doc['defaultTrashLifetime'], Rails.application.config.default_trash_lifetime
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/specimens_controller_test.rb b/services/api/test/functional/arvados/v1/specimens_controller_test.rb
new file mode 100644 (file)
index 0000000..172501e
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class Arvados::V1::SpecimensControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/traits_controller_test.rb b/services/api/test/functional/arvados/v1/traits_controller_test.rb
new file mode 100644 (file)
index 0000000..44b11db
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class Arvados::V1::TraitsControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/arvados/v1/user_agreements_controller_test.rb b/services/api/test/functional/arvados/v1/user_agreements_controller_test.rb
new file mode 100644 (file)
index 0000000..05bdef5
--- /dev/null
@@ -0,0 +1,46 @@
+require 'test_helper'
+
+class Arvados::V1::UserAgreementsControllerTest < ActionController::TestCase
+
+  test "active user get user agreements" do
+    authorize_with :active
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_not_nil agreements_list['items'][0]
+  end
+
+  test "active user get user agreement signatures" do
+    authorize_with :active
+    get :signatures
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_not_nil agreements_list['items'][0]
+    assert_equal 1, agreements_list['items'].count
+  end
+
+  test "inactive user get user agreements" do
+    authorize_with :inactive
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_not_nil agreements_list['items'][0]
+  end
+
+  test "uninvited user receives empty list of user agreements" do
+    authorize_with :inactive_uninvited
+    get :index
+    assert_response :success
+    assert_not_nil assigns(:objects)
+    agreements_list = JSON.parse(@response.body)
+    assert_not_nil agreements_list['items']
+    assert_nil agreements_list['items'][0]
+  end
+
+end
diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb
new file mode 100644 (file)
index 0000000..2d26370
--- /dev/null
@@ -0,0 +1,870 @@
+require 'test_helper'
+require 'helpers/users_test_helper'
+
+class Arvados::V1::UsersControllerTest < ActionController::TestCase
+  include CurrentApiClient
+  include UsersTestHelper
+
+  setup do
+    @all_links_at_start = Link.all
+    @vm_uuid = virtual_machines(:testvm).uuid
+  end
+
+  test "activate a user after signing UA" do
+    authorize_with :inactive_but_signed_user_agreement
+    post :activate, id: users(:inactive_but_signed_user_agreement).uuid
+    assert_response :success
+    assert_not_nil assigns(:object)
+    me = JSON.parse(@response.body)
+    assert_equal true, me['is_active']
+  end
+
+  test "refuse to activate a user before signing UA" do
+    act_as_system_user do
+    required_uuids = Link.where("owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?",
+                                system_user_uuid,
+                                'signature',
+                                'require',
+                                system_user_uuid,
+                                Collection.uuid_like_pattern).
+      collect(&:head_uuid)
+
+      assert required_uuids.length > 0
+
+      signed_uuids = Link.where(owner_uuid: system_user_uuid,
+                                link_class: 'signature',
+                                name: 'click',
+                                tail_uuid: users(:inactive).uuid,
+                                head_uuid: required_uuids).
+                          collect(&:head_uuid)
+
+      assert_equal 0, signed_uuids.length
+    end
+
+    authorize_with :inactive
+    assert_equal false, users(:inactive).is_active
+
+    post :activate, id: users(:inactive).uuid
+    assert_response 403
+
+    resp = json_response
+    assert resp['errors'].first.include? 'Cannot activate without user agreements'
+    assert_nil resp['is_active']
+  end
+
+  test "activate an already-active user" do
+    authorize_with :active
+    post :activate, id: users(:active).uuid
+    assert_response :success
+    me = JSON.parse(@response.body)
+    assert_equal true, me['is_active']
+  end
+
+  test "respond 401 if given token exists but user record is missing" do
+    authorize_with :valid_token_deleted_user
+    get :current, {format: :json}
+    assert_response 401
+  end
+
+  test "create new user with user as input" do
+    authorize_with :admin
+    post :create, user: {
+      first_name: "test_first_name",
+      last_name: "test_last_name",
+      email: "foo@example.com"
+    }
+    assert_response :success
+    created = JSON.parse(@response.body)
+    assert_equal 'test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected uuid for the newly created user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+  end
+
+  test "create user with user, vm and repo as input" do
+    authorize_with :admin
+    repo_name = 'test_repo'
+
+    post :setup, {
+      repo_name: repo_name,
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {
+        uuid: 'zzzzz-tpzed-abcdefghijklmno',
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
+    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # arvados#user, repo link and link add user to 'All users' group
+    verify_num_links @all_links_at_start, 4
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        repo_name, created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_system_group_permission_link_for created['uuid']
+  end
+
+  test "setup user with bogus uuid and expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      uuid: 'bogus_uuid',
+      repo_name: 'test_repo',
+      vm_uuid: @vm_uuid
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'Path not found'), 'Expected 404'
+  end
+
+  test "setup user with bogus uuid in user and expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {uuid: 'bogus_uuid'},
+      repo_name: 'test_repo',
+      vm_uuid: @vm_uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'ArgumentError: Require user email'),
+      'Expected RuntimeError'
+  end
+
+  test "setup user with no uuid and user, expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'test_repo',
+      vm_uuid: @vm_uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'Required uuid or user'),
+        'Expected ArgumentError'
+  end
+
+  test "setup user with no uuid and email, expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {},
+      repo_name: 'test_repo',
+      vm_uuid: @vm_uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? '<ArgumentError: Require user email'),
+        'Expected ArgumentError'
+  end
+
+  test "invoke setup with existing uuid, vm and repo and verify links" do
+    authorize_with :admin
+    inactive_user = users(:inactive)
+
+    post :setup, {
+      uuid: users(:inactive).uuid,
+      repo_name: 'test_repo',
+      vm_uuid: @vm_uuid
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    resp_obj = find_obj_in_resp response_items, 'User', nil
+
+    assert_not_nil resp_obj['uuid'], 'expected uuid for the new user'
+    assert_equal inactive_user['uuid'], resp_obj['uuid']
+    assert_equal inactive_user['email'], resp_obj['email'],
+        'expecting inactive user email'
+
+    # expect repo and vm links
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'test_repo', resp_obj['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        @vm_uuid, resp_obj['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "invoke setup with existing uuid in user, verify response" do
+    authorize_with :admin
+    inactive_user = users(:inactive)
+
+    post :setup, {
+      user: {uuid: inactive_user['uuid']},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    resp_obj = find_obj_in_resp response_items, 'User', nil
+
+    assert_not_nil resp_obj['uuid'], 'expected uuid for the new user'
+    assert_equal inactive_user['uuid'], resp_obj['uuid']
+    assert_equal inactive_user['email'], resp_obj['email'],
+        'expecting inactive user email'
+  end
+
+  test "invoke setup with existing uuid but different email, expect original email" do
+    authorize_with :admin
+    inactive_user = users(:inactive)
+
+    post :setup, {
+      uuid: inactive_user['uuid'],
+      user: {email: 'junk_email'}
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    resp_obj = find_obj_in_resp response_items, 'User', nil
+
+    assert_not_nil resp_obj['uuid'], 'expected uuid for the new user'
+    assert_equal inactive_user['uuid'], resp_obj['uuid']
+    assert_equal inactive_user['email'], resp_obj['email'],
+        'expecting inactive user email'
+  end
+
+  test "setup user with valid email and repo as input" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'test_repo',
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for the new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+
+    # four extra links; system_group, login, group and repo perms
+    verify_num_links @all_links_at_start, 4
+  end
+
+  test "setup user with fake vm and expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'test_repo',
+      vm_uuid: 'no_such_vm',
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? "No vm found for no_such_vm"),
+          'Expected RuntimeError: No vm found for no_such_vm'
+  end
+
+  test "setup user with valid email, repo and real vm as input" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'test_repo',
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      vm_uuid: @vm_uuid,
+      user: {email: 'foo@example.com'}
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for the new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+
+    # five extra links; system_group, login, group, vm, repo
+    verify_num_links @all_links_at_start, 5
+  end
+
+  test "setup user with valid email, no vm and no repo as input" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+
+    # three extra links; system_group, login, and group
+    verify_num_links @all_links_at_start, 3
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        response_object['uuid'], response_object['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', response_object['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
+        'test_repo', response_object['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, response_object['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "setup user with email, first name, repo name and vm uuid" do
+    authorize_with :admin
+
+    post :setup, {
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      repo_name: 'test_repo',
+      vm_uuid: @vm_uuid,
+      user: {
+        first_name: 'test_first_name',
+        email: 'foo@example.com'
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for new user'
+    assert_equal response_object['email'], 'foo@example.com', 'expected given email'
+    assert_equal 'test_first_name', response_object['first_name'],
+        'expecting first name'
+
+    # five extra links; system_group, login, group, repo and vm
+    verify_num_links @all_links_at_start, 5
+  end
+
+  test "setup user with an existing user email and check different object is created" do
+    authorize_with :admin
+    inactive_user = users(:inactive)
+
+    post :setup, {
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      repo_name: 'test_repo',
+      user: {
+        email: inactive_user['email']
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    response_object = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil response_object['uuid'], 'expected uuid for new user'
+    assert_not_equal response_object['uuid'], inactive_user['uuid'],
+        'expected different uuid after create operation'
+    assert_equal inactive_user['email'], response_object['email'], 'expected given email'
+    # system_group, openid, group, and repo. No vm link.
+    verify_num_links @all_links_at_start, 4
+  end
+
+  test "setup user with openid prefix" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'test_repo',
+      openid_prefix: 'http://www.example.com/account',
+      user: {
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected uuid for new user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # verify links
+    # four new links: system_group, arvados#user, repo, and 'All users' group.
+    verify_num_links @all_links_at_start, 4
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'test_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "invoke setup with no openid prefix, expect error" do
+    authorize_with :admin
+
+    post :setup, {
+      repo_name: 'test_repo',
+      user: {
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'openid_prefix parameter is missing'),
+        'Expected ArgumentError'
+  end
+
+  test "setup user with user, vm and repo and verify links" do
+    authorize_with :admin
+
+    post :setup, {
+      user: {
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      },
+      vm_uuid: @vm_uuid,
+      repo_name: 'test_repo',
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected uuid for new user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # five new links: system_group, arvados#user, repo, vm and 'All
+    # users' group link
+    verify_num_links @all_links_at_start, 5
+
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'User'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'test_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        @vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "create user as non admin user and expect error" do
+    authorize_with :active
+
+    post :create, {
+      user: {email: 'foo@example.com'}
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'PermissionDenied'),
+          'Expected PermissionDeniedError'
+  end
+
+  test "setup user as non admin user and expect error" do
+    authorize_with :active
+
+    post :setup, {
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {email: 'foo@example.com'}
+    }
+
+    response_body = JSON.parse(@response.body)
+    response_errors = response_body['errors']
+    assert_not_nil response_errors, 'Expected error in response'
+    assert (response_errors.first.include? 'Forbidden'),
+          'Expected Forbidden error'
+  end
+
+  test "setup active user with repo and no vm" do
+    authorize_with :admin
+    active_user = users(:active)
+
+    # invoke setup with a repository
+    post :setup, {
+      repo_name: 'new_repo',
+      uuid: active_user['uuid']
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal active_user[:email], created['email'], 'expected input email'
+
+     # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'new_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "setup active user with vm and no repo" do
+    authorize_with :admin
+    active_user = users(:active)
+
+    # invoke setup with a repository
+    post :setup, {
+      vm_uuid: @vm_uuid,
+      uuid: active_user['uuid'],
+      email: 'junk_email'
+    }
+
+    assert_response :success
+
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+
+    assert_equal active_user['email'], created['email'], 'expected original email'
+
+    # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
+        'new_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        @vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "unsetup active user" do
+    active_user = users(:active)
+    assert_not_nil active_user['uuid'], 'expected uuid for the active user'
+    assert active_user['is_active'], 'expected is_active for active user'
+
+    verify_link_existence active_user['uuid'], active_user['email'],
+          false, true, false, true, true
+
+    authorize_with :admin
+
+    # now unsetup this user
+    post :unsetup, id: active_user['uuid']
+    assert_response :success
+
+    response_user = JSON.parse(@response.body)
+    assert_not_nil response_user['uuid'], 'expected uuid for the upsetup user'
+    assert_equal active_user['uuid'], response_user['uuid'], 'expected uuid not found'
+    assert !response_user['is_active'], 'expected user to be inactive'
+    assert !response_user['is_invited'], 'expected user to be uninvited'
+
+    verify_link_existence response_user['uuid'], response_user['email'],
+          false, false, false, false, false
+
+    active_user = User.find_by_uuid(users(:active).uuid)
+    readable_groups = active_user.groups_i_can(:read)
+    all_users_group = Group.all.collect(&:uuid).select { |g| g.match /-f+$/ }
+    refute_includes(readable_groups, all_users_group,
+                    "active user can read All Users group after being deactivated")
+    assert_equal(false, active_user.is_invited,
+                 "active user is_invited after being deactivated & reloaded")
+  end
+
+  test "setup user with send notification param false and verify no email" do
+    authorize_with :admin
+
+    post :setup, {
+      openid_prefix: 'http://www.example.com/account',
+      send_notification_email: 'false',
+      user: {
+        email: "foo@example.com"
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil created['uuid'], 'expected uuid for the new user'
+    assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+    setup_email = ActionMailer::Base.deliveries.last
+    assert_nil setup_email, 'expected no setup email'
+  end
+
+  test "setup user with send notification param true and verify email" do
+    authorize_with :admin
+
+    post :setup, {
+      openid_prefix: 'http://www.example.com/account',
+      send_notification_email: 'true',
+      user: {
+        email: "foo@example.com"
+      }
+    }
+
+    assert_response :success
+    response_items = JSON.parse(@response.body)['items']
+    created = find_obj_in_resp response_items, 'User', nil
+    assert_not_nil created['uuid'], 'expected uuid for the new user'
+    assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+    setup_email = ActionMailer::Base.deliveries.last
+    assert_not_nil setup_email, 'Expected email after setup'
+
+    assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0]
+    assert_equal 'foo@example.com', setup_email.to[0]
+    assert_equal 'Welcome to Curoverse', setup_email.subject
+    assert (setup_email.body.to_s.include? 'Your Arvados account has been set up'),
+        'Expected Your Arvados account has been set up in email body'
+    assert (setup_email.body.to_s.include? 'foo@example.com'),
+        'Expected user email in email body'
+    assert (setup_email.body.to_s.include? Rails.configuration.workbench_address),
+        'Expected workbench url in email body'
+  end
+
+  test "non-admin user can get basic information about readable users" do
+    authorize_with :spectator
+    get(:index)
+    check_non_admin_index
+    check_readable_users_index [:spectator], [:inactive, :active]
+  end
+
+  test "non-admin user gets only safe attributes from users#show" do
+    g = act_as_system_user do
+      create :group
+    end
+    users = create_list :active_user, 2, join_groups: [g]
+    token = create :token, user: users[0]
+    authorize_with_token token
+    get :show, id: users[1].uuid
+    check_non_admin_show
+  end
+
+  [2, 4].each do |limit|
+    test "non-admin user can limit index to #{limit}" do
+      g = act_as_system_user do
+        create :group
+      end
+      users = create_list :active_user, 4, join_groups: [g]
+      token = create :token, user: users[0]
+
+      authorize_with_token token
+      get(:index, limit: limit)
+      check_non_admin_index
+      assert_equal(limit, json_response["items"].size,
+                   "non-admin index limit was ineffective")
+    end
+  end
+
+  test "admin has full index powers" do
+    authorize_with :admin
+    check_inactive_user_findable
+  end
+
+  test "reader token can grant admin index powers" do
+    authorize_with :spectator
+    check_inactive_user_findable(reader_tokens: [api_token(:admin)])
+  end
+
+  test "admin can filter on user.is_active" do
+    authorize_with :admin
+    get(:index, filters: [["is_active", "=", "true"]])
+    assert_response :success
+    check_readable_users_index [:active, :spectator], [:inactive]
+  end
+
+  test "admin can search where user.is_active" do
+    authorize_with :admin
+    get(:index, where: {is_active: true})
+    assert_response :success
+    check_readable_users_index [:active, :spectator], [:inactive]
+  end
+
+  test "update active_no_prefs user profile and expect notification email" do
+    authorize_with :admin
+
+    put :update, {
+      id: users(:active_no_prefs).uuid,
+      user: {
+        prefs: {:profile => {'organization' => 'example.com'}}
+      }
+    }
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject == "Profile created by #{users(:active_no_prefs).email}"
+        found_email = true
+        break
+      end
+    end
+    assert_equal true, found_email, 'Expected email after creating profile'
+  end
+
+  test "update active_no_prefs_profile user profile and expect notification email" do
+    authorize_with :admin
+
+    user = {}
+    user[:prefs] = users(:active_no_prefs_profile).prefs
+    user[:prefs][:profile] = {:profile => {'organization' => 'example.com'}}
+    put :update, {
+      id: users(:active_no_prefs_profile).uuid,
+      user: user
+    }
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject == "Profile created by #{users(:active_no_prefs_profile).email}"
+        found_email = true
+        break
+      end
+    end
+    assert_equal true, found_email, 'Expected email after creating profile'
+  end
+
+  test "update active user profile and expect no notification email" do
+    authorize_with :admin
+
+    put :update, {
+      id: users(:active).uuid,
+      user: {
+        prefs: {:profile => {'organization' => 'anotherexample.com'}}
+      }
+    }
+    assert_response :success
+
+    found_email = false
+    ActionMailer::Base.deliveries.andand.each do |email|
+      if email.subject == "Profile created by #{users(:active).email}"
+        found_email = true
+        break
+      end
+    end
+    assert_equal false, found_email, 'Expected no email after updating profile'
+  end
+
+  test "user API response includes writable_by" do
+    authorize_with :active
+    get :current
+    assert_response :success
+    assert_includes(json_response["writable_by"], users(:active).uuid,
+                    "user's writable_by should include self")
+    assert_includes(json_response["writable_by"], users(:active).owner_uuid,
+                    "user's writable_by should include its owner_uuid")
+  end
+
+
+  NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "email", "first_name",
+                         "last_name"].sort
+
+  def check_non_admin_index
+    assert_response :success
+    response_items = json_response["items"]
+    assert_not_nil response_items
+    response_items.each do |user_data|
+      check_non_admin_item user_data
+      assert(user_data["is_active"], "non-admin index returned inactive user")
+    end
+  end
+
+  def check_non_admin_show
+    assert_response :success
+    check_non_admin_item json_response
+  end
+
+  def check_non_admin_item user_data
+    assert_equal(NON_ADMIN_USER_DATA, user_data.keys.sort,
+                 "data in response had missing or extra attributes")
+    assert_equal("arvados#user", user_data["kind"])
+  end
+
+
+  def check_readable_users_index expect_present, expect_missing
+    response_uuids = json_response["items"].map { |u| u["uuid"] }
+    expect_present.each do |user_key|
+      assert_includes(response_uuids, users(user_key).uuid,
+                      "#{user_key} missing from index")
+    end
+    expect_missing.each do |user_key|
+      refute_includes(response_uuids, users(user_key).uuid,
+                      "#{user_key} included in index")
+    end
+  end
+
+  def check_inactive_user_findable(params={})
+    inactive_user = users(:inactive)
+    get(:index, params.merge(filters: [["email", "=", inactive_user.email]]))
+    assert_response :success
+    user_list = json_response["items"]
+    assert_equal(1, user_list.andand.count)
+    # This test needs to check a column non-admins have no access to,
+    # to ensure that admins see all user information.
+    assert_equal(inactive_user.identity_url, user_list.first["identity_url"],
+                 "admin's filtered index did not return inactive user")
+  end
+
+  def verify_num_links (original_links, expected_additional_links)
+    links_now = Link.all
+    assert_equal expected_additional_links, Link.all.size-original_links.size,
+        "Expected #{expected_additional_links.inspect} more links"
+  end
+
+  def find_obj_in_resp (response_items, object_type, head_kind=nil)
+    return_obj = nil
+    response_items
+    response_items.each { |x|
+      if !x
+        next
+      end
+
+      if object_type == 'User'
+        if ArvadosModel::resource_class_for_uuid(x['uuid']) == User
+          return_obj = x
+          break
+        end
+      else  # looking for a link
+        if x['head_uuid'] and ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+          return_obj = x
+          break
+        end
+      end
+    }
+    return return_obj
+  end
+end
diff --git a/services/api/test/functional/arvados/v1/virtual_machines_controller_test.rb b/services/api/test/functional/arvados/v1/virtual_machines_controller_test.rb
new file mode 100644 (file)
index 0000000..fd7431d
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class Arvados::V1::VirtualMachinesControllerTest < ActionController::TestCase
+end
diff --git a/services/api/test/functional/database_controller_test.rb b/services/api/test/functional/database_controller_test.rb
new file mode 100644 (file)
index 0000000..4bda0d0
--- /dev/null
@@ -0,0 +1,47 @@
+require 'test_helper'
+
+class DatabaseControllerTest < ActionController::TestCase
+  include CurrentApiClient
+
+  test "reset fails with non-admin token" do
+    authorize_with :active
+    post :reset
+    assert_response 403
+  end
+
+  test "route not found when not in test mode" do
+    authorize_with :admin
+    env_was = Rails.env
+    begin
+      Rails.env = 'production'
+      Rails.application.reload_routes!
+      assert_raises ActionController::RoutingError do
+        post :reset
+      end
+    ensure
+      Rails.env = env_was
+      Rails.application.reload_routes!
+    end
+  end
+
+  test "reset fails when a non-test-fixture user exists" do
+    act_as_system_user do
+      User.create!(uuid: 'abcde-tpzed-123451234512345', email: 'bar@example.net')
+    end
+    authorize_with :admin
+    post :reset
+    assert_response 403
+  end
+
+  test "reset succeeds with admin token" do
+    new_uuid = nil
+    act_as_system_user do
+      new_uuid = Specimen.create.uuid
+    end
+    assert_not_empty Specimen.where(uuid: new_uuid)
+    authorize_with :admin
+    post :reset
+    assert_response 200
+    assert_empty Specimen.where(uuid: new_uuid)
+  end
+end
diff --git a/services/api/test/functional/user_sessions_controller_test.rb b/services/api/test/functional/user_sessions_controller_test.rb
new file mode 100644 (file)
index 0000000..1d85ef3
--- /dev/null
@@ -0,0 +1,16 @@
+require 'test_helper'
+
+class UserSessionsControllerTest < ActionController::TestCase
+
+  test "new user from new api client" do
+    authorize_with :inactive
+    api_client_page = 'http://client.example.com/home'
+    get :login, return_to: api_client_page
+    assert_response :redirect
+    assert_equal(0, @response.redirect_url.index(api_client_page + '?'),
+                 'Redirect url ' + @response.redirect_url +
+                 ' should start with ' + api_client_page + '?')
+    assert_not_nil assigns(:api_client)
+  end
+
+end
diff --git a/services/api/test/helpers/git_test_helper.rb b/services/api/test/helpers/git_test_helper.rb
new file mode 100644 (file)
index 0000000..67e99c1
--- /dev/null
@@ -0,0 +1,28 @@
+require 'fileutils'
+require 'tmpdir'
+
+# Commit log for "foo" repository in test.git.tar
+# master is the main branch
+# b1 is a branch off of master
+# tag1 is a tag
+#
+# 1de84a8 * b1
+# 077ba2a * master
+# 4fe459a * tag1
+# 31ce37f * foo
+
+module GitTestHelper
+  def self.included base
+    base.setup do
+      @tmpdir = Dir.mktmpdir()
+      system("tar", "-xC", @tmpdir, "-f", "test/test.git.tar")
+      Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
+      Commit.refresh_repositories
+    end
+
+    base.teardown do
+      FileUtils.remove_entry @tmpdir, true
+      Commit.refresh_repositories
+    end
+  end
+end
diff --git a/services/api/test/helpers/users_test_helper.rb b/services/api/test/helpers/users_test_helper.rb
new file mode 100644 (file)
index 0000000..2a61820
--- /dev/null
@@ -0,0 +1,100 @@
+module UsersTestHelper
+  def verify_link(response_items, link_object_name, expect_link, link_class,
+        link_name, head_uuid, tail_uuid, head_kind, fetch_object, class_name)
+    link = find_obj_in_resp response_items, 'arvados#link', link_object_name
+
+    if !expect_link
+      assert_nil link, "Expected no link for #{link_object_name}"
+      return
+    end
+
+    assert_not_nil link, "Expected link for #{link_object_name}"
+
+    if fetch_object
+      object = Object.const_get(class_name).where(name: head_uuid)
+      assert [] != object, "expected #{class_name} with name #{head_uuid}"
+      head_uuid = object.first[:uuid]
+    end
+    assert_equal link_class, link['link_class'],
+        "did not find expected link_class for #{link_object_name}"
+
+    assert_equal link_name, link['name'],
+        "did not find expected link_name for #{link_object_name}"
+
+    assert_equal tail_uuid, link['tail_uuid'],
+        "did not find expected tail_uuid for #{link_object_name}"
+
+    assert_equal head_kind, link['head_kind'],
+        "did not find expected head_kind for #{link_object_name}"
+
+    assert_equal head_uuid, link['head_uuid'],
+        "did not find expected head_uuid for #{link_object_name}"
+  end
+
+  def verify_system_group_permission_link_for user_uuid
+    assert_equal 1, Link.where(link_class: 'permission',
+                               name: 'can_manage',
+                               tail_uuid: system_group_uuid,
+                               head_uuid: user_uuid).count
+  end
+
+  def verify_link_existence uuid, email, expect_oid_login_perms,
+      expect_repo_perms, expect_vm_perms, expect_group_perms, expect_signatures
+    # verify that all links are deleted for the user
+    oid_login_perms = Link.where(tail_uuid: email,
+                                 link_class: 'permission',
+                                 name: 'can_login').where("head_uuid like ?", User.uuid_like_pattern)
+    if expect_oid_login_perms
+      assert oid_login_perms.any?, "expected oid_login_perms"
+    else
+      assert !oid_login_perms.any?, "expected all oid_login_perms deleted"
+    end
+
+    repo_perms = Link.where(tail_uuid: uuid,
+                            link_class: 'permission',
+                            name: 'can_manage').where("head_uuid like ?", Repository.uuid_like_pattern)
+    if expect_repo_perms
+      assert repo_perms.any?, "expected repo_perms"
+    else
+      assert !repo_perms.any?, "expected all repo_perms deleted"
+    end
+
+    vm_login_perms = Link.
+      where(tail_uuid: uuid,
+            link_class: 'permission',
+            name: 'can_login').
+      where("head_uuid like ?",
+            VirtualMachine.uuid_like_pattern).
+      where('uuid <> ?',
+            links(:auto_setup_vm_login_username_can_login_to_test_vm).uuid)
+    if expect_vm_perms
+      assert vm_login_perms.any?, "expected vm_login_perms"
+    else
+      assert !vm_login_perms.any?, "expected all vm_login_perms deleted"
+    end
+
+    group = Group.where(name: 'All users').select do |g|
+      g[:uuid].match /-f+$/
+    end.first
+    group_read_perms = Link.where(tail_uuid: uuid,
+                                  head_uuid: group[:uuid],
+                                  link_class: 'permission',
+                                  name: 'can_read')
+    if expect_group_perms
+      assert group_read_perms.any?, "expected all users group read perms"
+    else
+      assert !group_read_perms.any?, "expected all users group perm deleted"
+    end
+
+    signed_uuids = Link.where(link_class: 'signature',
+                              tail_uuid: uuid)
+
+    if expect_signatures
+      assert signed_uuids.any?, "expected signatures"
+    else
+      assert !signed_uuids.any?, "expected all signatures deleted"
+    end
+
+  end
+
+end
diff --git a/services/api/test/integration/.gitkeep b/services/api/test/integration/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/integration/api_client_authorizations_api_test.rb b/services/api/test/integration/api_client_authorizations_api_test.rb
new file mode 100644 (file)
index 0000000..78841fe
--- /dev/null
@@ -0,0 +1,47 @@
+require 'test_helper'
+
+class ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "create system auth" do
+    post "/arvados/v1/api_client_authorizations/create_system_auth", {:format => :json, :scopes => ['test'].to_json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin_trustedclient).api_token}"}
+    assert_response :success
+  end
+
+  test "create token for different user" do
+    post "/arvados/v1/api_client_authorizations", {
+      :format => :json,
+      :api_client_authorization => {
+        :owner_uuid => users(:spectator).uuid
+      }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin_trustedclient).api_token}"}
+    assert_response :success
+
+    get "/arvados/v1/users/current", {
+      :format => :json
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{json_response['api_token']}"}
+    @json_response = nil
+    assert_equal users(:spectator).uuid, json_response['uuid']
+  end
+
+  test "refuse to create token for different user if not trusted client" do
+    post "/arvados/v1/api_client_authorizations", {
+      :format => :json,
+      :api_client_authorization => {
+        :owner_uuid => users(:spectator).uuid
+      }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
+    assert_response 403
+  end
+
+  test "refuse to create token for different user if not admin" do
+    post "/arvados/v1/api_client_authorizations", {
+      :format => :json,
+      :api_client_authorization => {
+        :owner_uuid => users(:spectator).uuid
+      }
+    }, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active_trustedclient).api_token}"}
+    assert_response 403
+  end
+
+end
diff --git a/services/api/test/integration/api_client_authorizations_scopes_test.rb b/services/api/test/integration/api_client_authorizations_scopes_test.rb
new file mode 100644 (file)
index 0000000..20f83dc
--- /dev/null
@@ -0,0 +1,85 @@
+# The v1 API uses token scopes to control access to the REST API at the path
+# level.  This is enforced in the base ApplicationController, making it a
+# functional test that we can run against many different controllers.
+
+require 'test_helper'
+
+class Arvados::V1::ApiTokensScopeTest < ActionController::IntegrationTest
+  fixtures :all
+
+  def v1_url(*parts)
+    (['arvados', 'v1'] + parts).join('/')
+  end
+
+  test "user list token can only list users" do
+    get_args = [{}, auth(:active_userlist)]
+    get(v1_url('users'), *get_args)
+    assert_response :success
+    get(v1_url('users', ''), *get_args)  # Add trailing slash.
+    assert_response :success
+    get(v1_url('users', 'current'), *get_args)
+    assert_response 403
+    get(v1_url('virtual_machines'), *get_args)
+    assert_response 403
+  end
+
+  test "specimens token can see exactly owned specimens" do
+    get_args = [{}, auth(:active_specimens)]
+    get(v1_url('specimens'), *get_args)
+    assert_response 403
+    get(v1_url('specimens', specimens(:owned_by_active_user).uuid), *get_args)
+    assert_response :success
+    get(v1_url('specimens', specimens(:owned_by_spectator).uuid), *get_args)
+    assert_includes(403..404, @response.status)
+  end
+
+  test "token with multiple scopes can use them all" do
+    def get_token_count
+      get(v1_url('api_client_authorizations'), {}, auth(:active_apitokens))
+      assert_response :success
+      token_count = JSON.parse(@response.body)['items_available']
+      assert_not_nil(token_count, "could not find token count")
+      token_count
+    end
+    # Test the GET scope.
+    token_count = get_token_count
+    # Test the POST scope.
+    post(v1_url('api_client_authorizations'),
+         {api_client_authorization: {user_id: users(:active).id}},
+         auth(:active_apitokens))
+    assert_response :success
+    assert_equal(token_count + 1, get_token_count,
+                 "token count suggests POST was not accepted")
+    # Test other requests are denied.
+    get(v1_url('api_client_authorizations',
+               api_client_authorizations(:active_apitokens).uuid),
+        {}, auth(:active_apitokens))
+    assert_response 403
+  end
+
+  test "token without scope has no access" do
+    # Logs are good for this test, because logs have relatively
+    # few access controls enforced at the model level.
+    req_args = [{}, auth(:admin_noscope)]
+    get(v1_url('logs'), *req_args)
+    assert_response 403
+    get(v1_url('logs', logs(:log1).uuid), *req_args)
+    assert_response 403
+    post(v1_url('logs'), *req_args)
+    assert_response 403
+  end
+
+  test "VM login scopes work" do
+    # A system administration script makes an API token with limited scope
+    # for virtual machines to let it see logins.
+    def vm_logins_url(name)
+      v1_url('virtual_machines', virtual_machines(name).uuid, 'logins')
+    end
+    get_args = [{}, auth(:admin_vm)]
+    get(vm_logins_url(:testvm), *get_args)
+    assert_response :success
+    get(vm_logins_url(:testvm2), *get_args)
+    assert_includes(400..419, @response.status,
+                    "getting testvm2 logins should have failed")
+  end
+end
diff --git a/services/api/test/integration/collections_api_test.rb b/services/api/test/integration/collections_api_test.rb
new file mode 100644 (file)
index 0000000..7680592
--- /dev/null
@@ -0,0 +1,137 @@
+require 'test_helper'
+
+class CollectionsApiTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "should get index" do
+    get "/arvados/v1/collections", {:format => :json}, auth(:active)
+    assert_response :success
+    assert_equal "arvados#collectionList", json_response['kind']
+  end
+
+  test "get index with filters= (empty string)" do
+    get "/arvados/v1/collections", {:format => :json, :filters => ''}, auth(:active)
+    assert_response :success
+    assert_equal "arvados#collectionList", json_response['kind']
+  end
+
+  test "get index with invalid filters (array of strings) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => ['uuid', '=', 'ad02e37b6a7f45bbe2ead3c29a109b8a+54'].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match /nvalid element.*not an array/, json_response['errors'].join(' ')
+  end
+
+  test "get index with invalid filters (unsearchable column) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => [['this_column_does_not_exist', '=', 'bogus']].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match /nvalid attribute/, json_response['errors'].join(' ')
+  end
+
+  test "get index with invalid filters (invalid operator) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => [['uuid', ':-(', 'displeased']].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match /nvalid operator/, json_response['errors'].join(' ')
+  end
+
+  test "get index with invalid filters (invalid operand type) responds 422" do
+    get "/arvados/v1/collections", {
+      :format => :json,
+      :filters => [['uuid', '=', {foo: 'bar'}]].to_json
+    }, auth(:active)
+    assert_response 422
+    assert_match /nvalid operand type/, json_response['errors'].join(' ')
+  end
+
+  test "get index with where= (empty string)" do
+    get "/arvados/v1/collections", {:format => :json, :where => ''}, auth(:active)
+    assert_response :success
+    assert_equal "arvados#collectionList", json_response['kind']
+  end
+
+  test "controller 404 response is json" do
+    get "/arvados/v1/thingsthatdonotexist", {:format => :xml}, auth(:active)
+    assert_response 404
+    assert_equal 1, json_response['errors'].length
+    assert_equal true, json_response['errors'][0].is_a?(String)
+  end
+
+  test "object 404 response is json" do
+    get "/arvados/v1/groups/zzzzz-j7d0g-o5ba971173cup4f", {}, auth(:active)
+    assert_response 404
+    assert_equal 1, json_response['errors'].length
+    assert_equal true, json_response['errors'][0].is_a?(String)
+  end
+
+  test "store collection as json" do
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
+                                       signing_opts)
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: "{\"manifest_text\":\". #{signed_locator} 0:44:md5sum.txt\\n\",\"portable_data_hash\":\"ad02e37b6a7f45bbe2ead3c29a109b8a+54\"}"
+    }, auth(:active)
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+  end
+
+  test "store collection with manifest_text only" do
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
+                                       signing_opts)
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: "{\"manifest_text\":\". #{signed_locator} 0:44:md5sum.txt\\n\"}"
+    }, auth(:active)
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+  end
+
+  test "store collection then update name" do
+    signing_opts = {
+      key: Rails.configuration.blob_signing_key,
+      api_token: api_token(:active),
+    }
+    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
+                                       signing_opts)
+    post "/arvados/v1/collections", {
+      format: :json,
+      collection: "{\"manifest_text\":\". #{signed_locator} 0:44:md5sum.txt\\n\",\"portable_data_hash\":\"ad02e37b6a7f45bbe2ead3c29a109b8a+54\"}"
+    }, auth(:active)
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+
+    put "/arvados/v1/collections/#{json_response['uuid']}", {
+      format: :json,
+      collection: { name: "a name" }
+    }, auth(:active)
+
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+    assert_equal 'a name', json_response['name']
+
+    get "/arvados/v1/collections/#{json_response['uuid']}", {
+      format: :json,
+    }, auth(:active)
+
+    assert_response 200
+    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']
+    assert_equal 'a name', json_response['name']
+  end
+
+
+end
diff --git a/services/api/test/integration/cross_origin_test.rb b/services/api/test/integration/cross_origin_test.rb
new file mode 100644 (file)
index 0000000..ebe7ce7
--- /dev/null
@@ -0,0 +1,76 @@
+require 'test_helper'
+
+class CrossOriginTest < ActionDispatch::IntegrationTest
+  def options *args
+    # Rails doesn't support OPTIONS the same way as GET, POST, etc.
+    reset! unless integration_session
+    integration_session.__send__(:process, :options, *args).tap do
+      copy_session_variables!
+    end
+  end
+
+  %w(/login /logout /auth/example/callback /auth/joshid).each do |path|
+    test "OPTIONS requests are refused at #{path}" do
+      options path, {}, {}
+      assert_no_cors_headers
+    end
+
+    test "CORS headers do not exist at GET #{path}" do
+      get path, {}, {}
+      assert_no_cors_headers
+    end
+  end
+
+  %w(/discovery/v1/apis/arvados/v1/rest).each do |path|
+    test "CORS headers are set at GET #{path}" do
+      get path, {}, {}
+      assert_response :success
+      assert_cors_headers
+    end
+  end
+
+  ['/arvados/v1/collections',
+   '/arvados/v1/users',
+   '/arvados/v1/api_client_authorizations'].each do |path|
+    test "CORS headers are set and body is stub at OPTIONS #{path}" do
+      options path, {}, {}
+      assert_response :success
+      assert_cors_headers
+      assert_equal '-', response.body
+    end
+
+    test "CORS headers are set at authenticated GET #{path}" do
+      get path, {}, auth(:active_trustedclient)
+      assert_response :success
+      assert_cors_headers
+    end
+
+    # CORS headers are OK only if cookies are *not* used to determine
+    # whether a transaction is allowed. The following is a (far from
+    # perfect) test that the usual Rails cookie->session mechanism
+    # does not grant access to any resources.
+    ['GET', 'POST'].each do |method|
+      test "Session does not work at #{method} #{path}" do
+        send method.downcase, path, {format: 'json'}, {user_id: 1}
+        assert_response 401
+        assert_cors_headers
+      end
+    end
+  end
+
+  protected
+  def assert_cors_headers
+    assert_equal '*', response.headers['Access-Control-Allow-Origin']
+    allowed = response.headers['Access-Control-Allow-Methods'].split(', ')
+    %w(GET HEAD POST PUT DELETE).each do |m|
+      assert_includes allowed, m, "A-C-A-Methods should include #{m}"
+    end
+    assert_equal 'Authorization', response.headers['Access-Control-Allow-Headers']
+  end
+
+  def assert_no_cors_headers
+    response.headers.keys.each do |h|
+      assert_no_match /^Access-Control-/i, h
+    end
+  end
+end
diff --git a/services/api/test/integration/crunch_dispatch_test.rb b/services/api/test/integration/crunch_dispatch_test.rb
new file mode 100644 (file)
index 0000000..81767af
--- /dev/null
@@ -0,0 +1,38 @@
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class CrunchDispatchTest < ActionDispatch::IntegrationTest
+  include GitTestHelper
+
+  fixtures :all
+
+  @@crunch_dispatch_pid = nil
+
+  def launch_crunch_dispatch
+    @@crunch_dispatch_pid = Process.fork {
+      ENV['PATH'] = ENV['HOME'] + '/arvados/services/crunch:' + ENV['PATH']
+      exec(ENV['HOME'] + '/arvados/services/api/script/crunch-dispatch.rb')
+    }
+  end
+
+  teardown do
+    if @@crunch_dispatch_pid
+      Process.kill "TERM", @@crunch_dispatch_pid
+      Process.wait
+      @@crunch_dispatch_pid = nil
+    end
+  end
+
+  test "job runs" do
+    post "/arvados/v1/jobs", {
+      format: "json",
+      job: {
+        script: "log",
+        repository: "crunch_dispatch_test",
+        script_version: "f35f99b7d32bac257f5989df02b9f12ee1a9b0d6",
+        script_parameters: "{}"
+      }
+    }, auth(:admin)
+    assert_response :success
+  end
+end
diff --git a/services/api/test/integration/database_reset_test.rb b/services/api/test/integration/database_reset_test.rb
new file mode 100644 (file)
index 0000000..58f2abf
--- /dev/null
@@ -0,0 +1,75 @@
+require 'test_helper'
+
+class DatabaseResetTest < ActionDispatch::IntegrationTest
+  self.use_transactional_fixtures = false
+
+  test "reset fails when Rails.env != 'test'" do
+    rails_env_was = Rails.env
+    begin
+      Rails.env = 'production'
+      Rails.application.reload_routes!
+      post '/database/reset', {}, auth(:admin)
+      assert_response 404
+    ensure
+      Rails.env = rails_env_was
+      Rails.application.reload_routes!
+    end
+  end
+
+  test "reset fails with non-admin token" do
+    post '/database/reset', {}, auth(:active)
+    assert_response 403
+  end
+
+  test "database reset doesn't break basic CRUD operations" do
+    active_auth = auth(:active)
+    admin_auth = auth(:admin)
+
+    authorize_with :admin
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    post '/arvados/v1/specimens', {specimen: '{}'}, active_auth
+    assert_response :success
+    new_uuid = json_response['uuid']
+
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response :success
+
+    put('/arvados/v1/specimens/'+new_uuid,
+        {specimen: '{"properties":{}}'}, active_auth)
+    assert_response :success
+
+    delete '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response :success
+
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response 404
+  end
+
+  test "roll back database change" do
+    active_auth = auth(:active)
+    admin_auth = auth(:admin)
+
+    old_uuid = specimens(:owned_by_active_user).uuid
+    authorize_with :admin
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    delete '/arvados/v1/specimens/' + old_uuid, {}, active_auth
+    assert_response :success
+    post '/arvados/v1/specimens', {specimen: '{}'}, active_auth
+    assert_response :success
+    new_uuid = json_response['uuid']
+
+    # Reset to fixtures.
+    post '/database/reset', {}, admin_auth
+    assert_response :success
+
+    # New specimen should disappear. Old specimen should reappear.
+    get '/arvados/v1/specimens/'+new_uuid, {}, active_auth
+    assert_response 404
+    get '/arvados/v1/specimens/'+old_uuid, {}, active_auth
+    assert_response :success
+  end
+end
diff --git a/services/api/test/integration/errors_test.rb b/services/api/test/integration/errors_test.rb
new file mode 100644 (file)
index 0000000..984f81f
--- /dev/null
@@ -0,0 +1,27 @@
+require 'test_helper'
+
+class ErrorsTest < ActionDispatch::IntegrationTest
+  fixtures :api_client_authorizations
+
+  %w(/arvados/v1/shoes /arvados/shoes /shoes /nodes /users).each do |path|
+    test "non-existent route #{path}" do
+      get path, {:format => :json}, auth(:active)
+      assert_nil assigns(:objects)
+      assert_nil assigns(:object)
+      assert_not_nil json_response['errors']
+      assert_response 404
+    end
+  end
+
+  n=0
+  Rails.application.routes.routes.each do |route|
+    test "route #{n += 1} '#{route.path.spec.to_s}' is not an accident" do
+      # Generally, new routes should appear under /arvados/v1/. If
+      # they appear elsewhere, that might have been caused by default
+      # rails generator behavior that we don't want.
+      assert_match(/^\/(|\*a|arvados\/v1\/.*|auth\/.*|login|logout|database\/reset|discovery\/.*|static\/.*|themes\/.*)(\(\.:format\))?$/,
+                   route.path.spec.to_s,
+                   "Unexpected new route: #{route.path.spec}")
+    end
+  end
+end
diff --git a/services/api/test/integration/groups_test.rb b/services/api/test/integration/groups_test.rb
new file mode 100644 (file)
index 0000000..0f6f93a
--- /dev/null
@@ -0,0 +1,42 @@
+require 'test_helper'
+
+class GroupsTest < ActionDispatch::IntegrationTest
+
+  test "get all pages of group-owned objects" do
+    limit = 5
+    offset = 0
+    items_available = nil
+    uuid_received = {}
+    owner_received = {}
+    while true
+      @json_response = nil
+
+      get "/arvados/v1/groups/contents", {
+        id: groups(:aproject).uuid,
+        limit: limit,
+        offset: offset,
+        format: :json,
+      }, auth(:active)
+
+      assert_response :success
+      assert_operator(0, :<, json_response['items'].count,
+                      "items_available=#{items_available} but received 0 "\
+                      "items with offset=#{offset}")
+      items_available ||= json_response['items_available']
+      assert_equal(items_available, json_response['items_available'],
+                   "items_available changed between page #{offset/limit} "\
+                   "and page #{1+offset/limit}")
+      json_response['items'].each do |item|
+        uuid = item['uuid']
+        assert_equal(nil, uuid_received[uuid],
+                     "Received '#{uuid}' again on page #{1+offset/limit}")
+        uuid_received[uuid] = true
+        owner_received[item['owner_uuid']] = true
+        offset += 1
+        assert_equal groups(:aproject).uuid, item['owner_uuid']
+      end
+      break if offset >= items_available
+    end
+  end
+
+end
diff --git a/services/api/test/integration/jobs_api_test.rb b/services/api/test/integration/jobs_api_test.rb
new file mode 100644 (file)
index 0000000..bf86b7d
--- /dev/null
@@ -0,0 +1,42 @@
+require 'test_helper'
+
+class JobsApiTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "cancel job" do
+    post "/arvados/v1/jobs/#{jobs(:running).uuid}/cancel", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:active).api_token}"}
+    assert_response :success
+    assert_equal "arvados#job", json_response['kind']
+    assert_not_nil json_response['cancelled_at']
+  end
+
+  test "cancel someone else's visible job" do
+    post "/arvados/v1/jobs/#{jobs(:runningbarbaz).uuid}/cancel", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
+    assert_response 403
+  end
+
+  test "cancel someone else's invisible job" do
+    post "/arvados/v1/jobs/#{jobs(:running).uuid}/cancel", {:format => :json}, {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:spectator).api_token}"}
+    assert_response 404
+  end
+
+  test "task qsequence values automatically increase monotonically" do
+    post_args = ["/arvados/v1/job_tasks",
+                 {job_task: {
+                     job_uuid: jobs(:running).uuid,
+                     sequence: 1,
+                   }},
+                 auth(:active)]
+    last_qsequence = -1
+    (1..3).each do |task_num|
+      @response = nil
+      post(*post_args)
+      assert_response :success
+      qsequence = json_response["qsequence"]
+      assert_not_nil(qsequence, "task not assigned qsequence")
+      assert_operator(qsequence, :>, last_qsequence,
+                      "qsequence did not increase between tasks")
+      last_qsequence = qsequence
+    end
+  end
+end
diff --git a/services/api/test/integration/keep_proxy_test.rb b/services/api/test/integration/keep_proxy_test.rb
new file mode 100644 (file)
index 0000000..aacda51
--- /dev/null
@@ -0,0 +1,28 @@
+require 'test_helper'
+
+class KeepProxyTest < ActionDispatch::IntegrationTest
+  test "request keep disks" do
+    get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active)
+    assert_response :success
+    services = json_response['items']
+
+    assert_operator 2, :<=, services.length
+    services.each do |service|
+      assert_equal 'disk', service['service_type']
+    end
+  end
+
+  test "request keep proxy" do
+    get "/arvados/v1/keep_services/accessible", {:format => :json}, auth(:active).merge({'HTTP_X_EXTERNAL_CLIENT' => '1'})
+    assert_response :success
+    services = json_response['items']
+
+    assert_equal 1, services.length
+
+    assert_equal keep_services(:proxy).uuid, services[0]['uuid']
+    assert_equal keep_services(:proxy).service_host, services[0]['service_host']
+    assert_equal keep_services(:proxy).service_port, services[0]['service_port']
+    assert_equal keep_services(:proxy).service_ssl_flag, services[0]['service_ssl_flag']
+    assert_equal 'proxy', services[0]['service_type']
+  end
+end
diff --git a/services/api/test/integration/login_workflow_test.rb b/services/api/test/integration/login_workflow_test.rb
new file mode 100644 (file)
index 0000000..e0d6968
--- /dev/null
@@ -0,0 +1,25 @@
+require 'test_helper'
+
+class LoginWorkflowTest < ActionDispatch::IntegrationTest
+  test "default prompt to login is JSON" do
+    post('/arvados/v1/specimens', {specimen: {}},
+         {'HTTP_ACCEPT' => ''})
+    assert_response 401
+    assert_includes(json_response['errors'], "Not logged in")
+  end
+
+  test "login prompt respects JSON Accept header" do
+    post('/arvados/v1/specimens', {specimen: {}},
+         {'HTTP_ACCEPT' => 'application/json'})
+    assert_response 401
+    assert_includes(json_response['errors'], "Not logged in")
+  end
+
+  test "login prompt respects HTML Accept header" do
+    post('/arvados/v1/specimens', {specimen: {}},
+         {'HTTP_ACCEPT' => 'text/html'})
+    assert_response 302
+    assert_match(%r{/auth/joshid$}, @response.headers['Location'],
+                 "HTML login prompt did not include expected redirect")
+  end
+end
diff --git a/services/api/test/integration/permissions_test.rb b/services/api/test/integration/permissions_test.rb
new file mode 100644 (file)
index 0000000..44b5e6e
--- /dev/null
@@ -0,0 +1,374 @@
+require 'test_helper'
+
+class PermissionsTest < ActionDispatch::IntegrationTest
+  include CurrentApiClient  # for empty_collection
+  fixtures :users, :groups, :api_client_authorizations, :collections
+
+  test "adding and removing direct can_read links" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # try to add permission as spectator
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:spectator)
+    assert_response 422
+
+    # add permission as admin
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # try to delete permission as spectator
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:spectator)
+    assert_response 403
+
+    # delete permission as admin
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+  end
+
+
+  test "adding can_read links from user to group, group to collection" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for spectator to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:private).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for group to read collection
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:private).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # delete permission for group to read collection
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+  end
+
+
+  test "adding can_read links from group to collection, user to group" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for group to read collection
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:private).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for spectator to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:private).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # delete permission for spectator to read group
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+  end
+
+  test "adding can_read links from user to group, group to group, group to collection" do
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+
+    # add permission for user to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:private).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # add permission for group to read group
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:private).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:empty_lonely_group).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    # add permission for group to read collection
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: groups(:empty_lonely_group).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: collections(:foo_file).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    u = json_response['uuid']
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response :success
+
+    # delete permission for group to read collection
+    delete "/arvados/v1/links/#{u}", {:format => :json}, auth(:admin)
+    assert_response :success
+
+    # try to read collection as spectator
+    get "/arvados/v1/collections/#{collections(:foo_file).uuid}", {:format => :json}, auth(:spectator)
+    assert_response 404
+  end
+
+  test "read-only group-admin cannot modify administered user" do
+    put "/arvados/v1/users/#{users(:active).uuid}", {
+      :user => {
+        first_name: 'KilroyWasHere'
+      },
+      :format => :json
+    }, auth(:rominiadmin)
+    assert_response 403
+  end
+
+  test "read-only group-admin cannot read or update non-administered user" do
+    get "/arvados/v1/users/#{users(:spectator).uuid}", {
+      :format => :json
+    }, auth(:rominiadmin)
+    assert_response 404
+
+    put "/arvados/v1/users/#{users(:spectator).uuid}", {
+      :user => {
+        first_name: 'KilroyWasHere'
+      },
+      :format => :json
+    }, auth(:rominiadmin)
+    assert_response 404
+  end
+
+  test "RO group-admin finds user's specimens, RW group-admin can update" do
+    [[:rominiadmin, false],
+     [:miniadmin, true]].each do |which_user, update_should_succeed|
+      get "/arvados/v1/specimens", {:format => :json}, auth(which_user)
+      assert_response :success
+      resp_uuids = json_response['items'].collect { |i| i['uuid'] }
+      [[true, specimens(:owned_by_active_user).uuid],
+       [true, specimens(:owned_by_private_group).uuid],
+       [false, specimens(:owned_by_spectator).uuid],
+      ].each do |should_find, uuid|
+        assert_equal(should_find, !resp_uuids.index(uuid).nil?,
+                     "%s should%s see %s in specimen list" %
+                     [which_user.to_s,
+                      should_find ? '' : 'not ',
+                      uuid])
+        put "/arvados/v1/specimens/#{uuid}", {
+          :specimen => {
+            properties: {
+              miniadmin_was_here: true
+            }
+          },
+          :format => :json
+        }, auth(which_user)
+        if !should_find
+          assert_response 404
+        elsif !update_should_succeed
+          assert_response 403
+        else
+          assert_response :success
+        end
+      end
+    end
+  end
+
+  test "get_permissions returns list" do
+    # First confirm that user :active cannot get permissions on group :public
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 404
+
+    # add some permissions, including can_manage
+    # permission for user :active
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:spectator).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_read_uuid = json_response['uuid']
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:inactive).uuid,
+        link_class: 'permission',
+        name: 'can_write',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_write_uuid = json_response['uuid']
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        tail_uuid: users(:active).uuid,
+        link_class: 'permission',
+        name: 'can_manage',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+    can_manage_uuid = json_response['uuid']
+
+    # Now user :active should be able to retrieve permissions
+    # on group :public.
+    get("/arvados/v1/permissions/#{groups(:public).uuid}",
+        { :format => :json },
+        auth(:active))
+    assert_response :success
+
+    perm_uuids = json_response['items'].map { |item| item['uuid'] }
+    assert_includes perm_uuids, can_read_uuid, "can_read_uuid not found"
+    assert_includes perm_uuids, can_write_uuid, "can_write_uuid not found"
+    assert_includes perm_uuids, can_manage_uuid, "can_manage_uuid not found"
+  end
+
+  test "get_permissions returns 404 for nonexistent uuid" do
+    nonexistent = Group.generate_uuid
+    # make sure it really doesn't exist
+    get "/arvados/v1/groups/#{nonexistent}", nil, auth(:admin)
+    assert_response 404
+
+    get "/arvados/v1/permissions/#{nonexistent}", nil, auth(:active)
+    assert_response 404
+  end
+
+  test "get_permissions returns 404 for unreadable uuid" do
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 404
+  end
+
+  test "get_permissions returns 403 if user can read but not manage" do
+    post "/arvados/v1/links", {
+      :link => {
+        tail_uuid: users(:active).uuid,
+        link_class: 'permission',
+        name: 'can_read',
+        head_uuid: groups(:public).uuid,
+        properties: {}
+      }
+    }, auth(:admin)
+    assert_response :success
+
+    get "/arvados/v1/permissions/#{groups(:public).uuid}", nil, auth(:active)
+    assert_response 403
+  end
+
+  test "active user can read the empty collection" do
+    # The active user should be able to read the empty collection.
+
+    get("/arvados/v1/collections/#{empty_collection_uuid}",
+        { :format => :json },
+        auth(:active))
+    assert_response :success
+    assert_empty json_response['manifest_text'], "empty collection manifest_text is not empty"
+  end
+end
diff --git a/services/api/test/integration/pipeline_test.rb b/services/api/test/integration/pipeline_test.rb
new file mode 100644 (file)
index 0000000..a550246
--- /dev/null
@@ -0,0 +1,36 @@
+require 'test_helper'
+
+class PipelineTest < ActionDispatch::IntegrationTest
+  # These tests simulate the workflow of arv-run-pipeline-instance
+  # and other pipeline-running code.
+
+  def check_component_match(comp_key, comp_hash)
+    assert_response :success
+    built_json = json_response
+    built_component = built_json["components"][comp_key]
+    comp_hash.each_pair do |key, expected|
+      assert_equal(expected, built_component[key.to_s],
+                   "component's #{key} field changed")
+    end
+  end
+
+  test "creating a pipeline instance preserves required component parameters" do
+    comp_name = "test_component"
+    component = {
+      repository: "test_repo",
+      script: "test_script",
+      script_version: "test_refspec",
+      script_parameters: {},
+    }
+
+    post("/arvados/v1/pipeline_instances",
+         {pipeline_instance: {components: {comp_name => component}}.to_json},
+         auth(:active))
+    check_component_match(comp_name, component)
+    pi_uuid = json_response["uuid"]
+
+    @response = nil
+    get("/arvados/v1/pipeline_instances/#{pi_uuid}", {}, auth(:active))
+    check_component_match(comp_name, component)
+  end
+end
diff --git a/services/api/test/integration/reader_tokens_test.rb b/services/api/test/integration/reader_tokens_test.rb
new file mode 100644 (file)
index 0000000..6ed8461
--- /dev/null
@@ -0,0 +1,85 @@
+require 'test_helper'
+
+class Arvados::V1::ReaderTokensTest < ActionController::IntegrationTest
+  fixtures :all
+
+  def spectator_specimen
+    specimens(:owned_by_spectator).uuid
+  end
+
+  def get_specimens(main_auth, read_auth, formatter=:to_a)
+    params = {}
+    params[:reader_tokens] = [api_token(read_auth)].send(formatter) if read_auth
+    headers = {}
+    headers.merge!(auth(main_auth)) if main_auth
+    get('/arvados/v1/specimens', params, headers)
+  end
+
+  def get_specimen_uuids(main_auth, read_auth, formatter=:to_a)
+    get_specimens(main_auth, read_auth, formatter)
+    assert_response :success
+    json_response['items'].map { |spec| spec['uuid'] }
+  end
+
+  def assert_post_denied(main_auth, read_auth, formatter=:to_a)
+    if main_auth
+      headers = auth(main_auth)
+      expected = 403
+    else
+      headers = {}
+      expected = 401
+    end
+    post('/arvados/v1/specimens.json',
+         {specimen: {}, reader_tokens: [api_token(read_auth)].send(formatter)},
+         headers)
+    assert_response expected
+  end
+
+  test "active user can't see spectator specimen" do
+    # Other tests in this suite assume that the active user doesn't
+    # have read permission to the owned_by_spectator specimen.
+    # This test checks that this assumption still holds.
+    refute_includes(get_specimen_uuids(:active, nil), spectator_specimen,
+                    ["active user can read the owned_by_spectator specimen",
+                     "other tests will return false positives"].join(" - "))
+  end
+
+  [nil, :active_noscope].each do |main_auth|
+    [:spectator, :spectator_specimens].each do |read_auth|
+      test "#{main_auth} auth with reader token #{read_auth} can read" do
+        assert_includes(get_specimen_uuids(main_auth, read_auth),
+                        spectator_specimen, "did not find spectator specimen")
+      end
+
+      test "#{main_auth} auth with JSON read token #{read_auth} can read" do
+        assert_includes(get_specimen_uuids(main_auth, read_auth, :to_json),
+                        spectator_specimen, "did not find spectator specimen")
+      end
+
+      test "#{main_auth} auth with reader token #{read_auth} can't write" do
+        assert_post_denied(main_auth, read_auth)
+      end
+
+      test "#{main_auth} auth with JSON read token #{read_auth} can't write" do
+        assert_post_denied(main_auth, read_auth, :to_json)
+      end
+    end
+  end
+
+  test "scopes are still limited with reader tokens" do
+    get('/arvados/v1/collections',
+        {reader_tokens: [api_token(:spectator_specimens)]},
+        auth(:active_noscope))
+    assert_response 403
+  end
+
+  test "reader tokens grant no permissions when expired" do
+    get_specimens(:active_noscope, :expired)
+    assert_response 403
+  end
+
+  test "reader tokens grant no permissions outside their scope" do
+    refute_includes(get_specimen_uuids(:active, :admin_vm), spectator_specimen,
+                    "scoped reader token granted permissions out of scope")
+  end
+end
diff --git a/services/api/test/integration/select_test.rb b/services/api/test/integration/select_test.rb
new file mode 100644 (file)
index 0000000..a7bd545
--- /dev/null
@@ -0,0 +1,93 @@
+require 'test_helper'
+
+class SelectTest < ActionDispatch::IntegrationTest
+  test "should select just two columns" do
+    get "/arvados/v1/links", {:format => :json, :select => ['uuid', 'link_class']}, auth(:active)
+    assert_response :success
+    assert_equal json_response['items'].count, json_response['items'].select { |i|
+      i.count == 3 and i['uuid'] != nil and i['link_class'] != nil
+    }.count
+  end
+
+  test "fewer distinct than total count" do
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class'], :distinct => false}, auth(:active)
+    assert_response :success
+    links = json_response['items']
+
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class'], :distinct => true}, auth(:active)
+    assert_response :success
+    distinct = json_response['items']
+
+    assert_operator(distinct.count, :<, links.count,
+                    "distinct count should be less than link count")
+    assert_equal links.uniq.count, distinct.count
+  end
+
+  test "select with order" do
+    get "/arvados/v1/links", {:format => :json, :select => ['uuid'], :order => ["uuid asc"]}, auth(:active)
+    assert_response :success
+
+    assert json_response['items'].length > 0
+
+    p = ""
+    json_response['items'].each do |i|
+      assert i['uuid'] > p
+      p = i['uuid']
+    end
+  end
+
+  def assert_link_classes_ascend(current_class, prev_class)
+    # Databases and Ruby don't always agree about string ordering with
+    # punctuation.  If the strings aren't ascending normally, check
+    # that they're equal up to punctuation.
+    if current_class < prev_class
+      class_prefix = current_class.split(/\W/).first
+      assert prev_class.start_with?(class_prefix)
+    end
+  end
+
+  test "select two columns with order" do
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => ['link_class asc', "uuid desc"]}, auth(:active)
+    assert_response :success
+
+    assert json_response['items'].length > 0
+
+    prev_link_class = ""
+    prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+    json_response['items'].each do |i|
+      if prev_link_class != i['link_class']
+        prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+      end
+
+      assert_link_classes_ascend(i['link_class'], prev_link_class)
+      assert i['uuid'] < prev_uuid
+
+      prev_link_class = i['link_class']
+      prev_uuid = i['uuid']
+    end
+  end
+
+  test "select two columns with old-style order syntax" do
+    get "/arvados/v1/links", {:format => :json, :select => ['link_class', 'uuid'], :order => 'link_class asc, uuid desc'}, auth(:active)
+    assert_response :success
+
+    assert json_response['items'].length > 0
+
+    prev_link_class = ""
+    prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+
+    json_response['items'].each do |i|
+      if prev_link_class != i['link_class']
+        prev_uuid = "zzzzz-zzzzz-zzzzzzzzzzzzzzz"
+      end
+
+      assert_link_classes_ascend(i['link_class'], prev_link_class)
+      assert i['uuid'] < prev_uuid
+
+      prev_link_class = i['link_class']
+      prev_uuid = i['uuid']
+    end
+  end
+
+end
diff --git a/services/api/test/integration/serialized_encoding_test.rb b/services/api/test/integration/serialized_encoding_test.rb
new file mode 100644 (file)
index 0000000..8a1cb10
--- /dev/null
@@ -0,0 +1,51 @@
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class SerializedEncodingTest < ActionDispatch::IntegrationTest
+  include GitTestHelper
+
+  fixtures :all
+
+  {
+    api_client_authorization: {scopes: []},
+
+    human: {properties: {eye_color: 'gray'}},
+
+    job: {
+      repository: 'foo',
+      runtime_constraints: {docker_image: 'arvados/apitestfixture'},
+      script: 'hash',
+      script_version: 'master',
+      script_parameters: {pattern: 'foobar'},
+      tasks_summary: {todo: 0},
+    },
+
+    job_task: {parameters: {pattern: 'foo'}},
+
+    link: {link_class: 'test', name: 'test', properties: {foo: :bar}},
+
+    node: {info: {uptime: 1234}},
+
+    pipeline_instance: {
+      components: {"job1" => {parameters: {pattern: "xyzzy"}}},
+      components_summary: {todo: 0},
+      properties: {test: true},
+    },
+
+    pipeline_template: {
+      components: {"job1" => {parameters: {pattern: "xyzzy"}}},
+    },
+
+    specimen: {properties: {eye_color: 'meringue'}},
+
+    trait: {properties: {eye_color: 'brown'}},
+
+    user: {prefs: {cookies: 'thin mint'}},
+  }.each_pair do |resource, postdata|
+    test "create json-encoded #{resource.to_s}" do
+      post("/arvados/v1/#{resource.to_s.pluralize}",
+           {resource => postdata.to_json}, auth(:admin_trustedclient))
+      assert_response :success
+    end
+  end
+end
diff --git a/services/api/test/integration/user_sessions_test.rb b/services/api/test/integration/user_sessions_test.rb
new file mode 100644 (file)
index 0000000..814e6eb
--- /dev/null
@@ -0,0 +1,108 @@
+require 'test_helper'
+
+class UserSessionsApiTest < ActionDispatch::IntegrationTest
+  def client_url
+    'https://wb.example.com'
+  end
+
+  def mock_auth_with_email email
+    mock = {
+      'provider' => 'josh_id',
+      'uid' => 'https://edward.example.com',
+      'info' => {
+        'identity_url' => 'https://edward.example.com',
+        'name' => 'Edward Example',
+        'first_name' => 'Edward',
+        'last_name' => 'Example',
+        'email' => email,
+      },
+    }
+    post('/auth/josh_id/callback',
+         {return_to: client_url},
+         {'omniauth.auth' => mock})
+    assert_response :redirect, 'Did not redirect to client with token'
+  end
+
+  test 'create new user during omniauth callback' do
+    mock_auth_with_email 'edward@example.com'
+    assert_equal(0, @response.redirect_url.index(client_url),
+                 'Redirected to wrong address after succesful login: was ' +
+                 @response.redirect_url + ', expected ' + client_url + '[...]')
+    assert_not_nil(@response.redirect_url.index('api_token='),
+                   'Expected api_token in query string of redirect url ' +
+                   @response.redirect_url)
+  end
+
+  # Test various combinations of auto_setup configuration and email
+  # address provided during a new user's first session setup.
+  [{result: :nope, email: nil, cfg: {auto: true, repo: true, vm: true}},
+   {result: :yup, email: nil, cfg: {auto: true}},
+   {result: :nope, email: '@example.com', cfg: {auto: true, repo: true, vm: true}},
+   {result: :yup, email: '@example.com', cfg: {auto: true}},
+   {result: :nope, email: 'root@', cfg: {auto: true, repo: true, vm: true}},
+   {result: :nope, email: 'root@', cfg: {auto: true, repo: true}},
+   {result: :nope, email: 'root@', cfg: {auto: true, vm: true}},
+   {result: :yup, email: 'root@', cfg: {auto: true}},
+   {result: :nope, email: 'gitolite@', cfg: {auto: true, repo: true}},
+   {result: :nope, email: '*_*@', cfg: {auto: true, vm: true}},
+   {result: :yup, email: 'toor@', cfg: {auto: true, vm: true, repo: true}},
+   {result: :yup, email: 'foo@', cfg: {auto: true, vm: true},
+     uniqprefix: 'foo'},
+   {result: :yup, email: 'foo@', cfg: {auto: true, repo: true},
+     uniqprefix: 'foo'},
+   {result: :yup, email: 'auto_setup_vm_login@', cfg: {auto: true, repo: true},
+     uniqprefix: 'auto_setup_vm_login'},
+   ].each do |testcase|
+    test "user auto-activate #{testcase.inspect}" do
+      # Configure auto_setup behavior according to testcase[:cfg]
+      Rails.configuration.auto_setup_new_users = testcase[:cfg][:auto]
+      Rails.configuration.auto_setup_new_users_with_vm_uuid =
+        (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : false)
+      Rails.configuration.auto_setup_new_users_with_repository =
+        testcase[:cfg][:repo]
+
+      mock_auth_with_email testcase[:email]
+      u = assigns(:user)
+      vm_links = Link.where('link_class=? and tail_uuid=? and head_uuid like ?',
+                            'permission', u.uuid,
+                            '%-' + VirtualMachine.uuid_prefix + '-%')
+      repo_links = Link.where('link_class=? and tail_uuid=? and head_uuid like ?',
+                              'permission', u.uuid,
+                              '%-' + Repository.uuid_prefix + '-%')
+      repos = Repository.where('uuid in (?)', repo_links.collect(&:head_uuid))
+      case u[:result]
+      when :nope
+        assert_equal false, u.is_invited, "should not have been set up"
+        assert_empty vm_links, "should not have VM login permission"
+        assert_empty repo_links, "should not have repo permission"
+      when :yup
+        assert_equal true, u.is_invited
+        if testcase[:cfg][:vm]
+          assert_equal 1, vm_links.count, "wrong number of VM perm links"
+        else
+          assert_empty vm_links, "should not have VM login permission"
+        end
+        if testcase[:cfg][:repo]
+          assert_equal 1, repo_links.count, "wrong number of repo perm links"
+          assert_equal 1, repos.count, "wrong number of repos"
+          assert_equal 'can_manage', repo_links.first.name, "wrong perm type"
+        else
+          assert_empty repo_links, "should not have repo permission"
+        end
+      end
+      if (prefix = testcase[:uniqprefix])
+        # This email address conflicts with a test fixture. Make sure
+        # every VM login and repository name got digits added to make
+        # it unique.
+        (repos.collect(&:name) +
+         vm_links.collect { |link| link.properties['username'] }
+         ).each do |name|
+          r = name.match /^(.{#{prefix.length}})(\d+)$/
+          assert_not_nil r, "#{name.inspect} does not match {prefix}\\d+"
+          assert_equal(prefix, r[1],
+                       "#{name.inspect} was not {#{prefix.inspect} plus digits}")
+        end
+      end
+    end
+  end
+end
diff --git a/services/api/test/integration/users_test.rb b/services/api/test/integration/users_test.rb
new file mode 100644 (file)
index 0000000..0d6c0f3
--- /dev/null
@@ -0,0 +1,216 @@
+require 'test_helper'
+require 'helpers/users_test_helper'
+
+class UsersTest < ActionDispatch::IntegrationTest
+  include UsersTestHelper
+
+  test "setup user multiple times" do
+    repo_name = 'test_repo'
+
+    post "/arvados/v1/users/setup", {
+      repo_name: repo_name,
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {
+        uuid: 'zzzzz-tpzed-abcdefghijklmno',
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }, auth(:admin)
+
+    assert_response :success
+
+    response_items = json_response['items']
+
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
+    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # arvados#user, repo link and link add user to 'All users' group
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'arvados#user'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        repo_name, created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_system_group_permission_link_for created['uuid']
+
+    # invoke setup again with the same data
+    post "/arvados/v1/users/setup", {
+      repo_name: repo_name,
+      vm_uuid: virtual_machines(:testvm).uuid,
+      openid_prefix: 'https://www.google.com/accounts/o8/id',
+      user: {
+        uuid: 'zzzzz-tpzed-abcdefghijklmno',
+        first_name: "in_create_test_first_name",
+        last_name: "test_last_name",
+        email: "foo@example.com"
+      }
+    }, auth(:admin)
+
+    assert_response :success
+
+    response_items = json_response['items']
+
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+    assert_equal 'in_create_test_first_name', created['first_name']
+    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'
+    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_nil created['identity_url'], 'expected no identity_url'
+
+    # arvados#user, repo link and link add user to 'All users' group
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        repo_name, created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_system_group_permission_link_for created['uuid']
+  end
+
+  test "setup user in multiple steps and verify response" do
+    post "/arvados/v1/users/setup", {
+      openid_prefix: 'http://www.example.com/account',
+      user: {
+        email: "foo@example.com"
+      }
+    }, auth(:admin)
+
+    assert_response :success
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_not_nil created['uuid'], 'expected uuid for new user'
+    assert_not_nil created['email'], 'expected non-nil email'
+    assert_equal created['email'], 'foo@example.com', 'expected input email'
+
+    # three new links: system_group, arvados#user, and 'All users' group.
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'arvados#user'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
+        'test_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+   # invoke setup with a repository
+    post "/arvados/v1/users/setup", {
+      openid_prefix: 'http://www.example.com/account',
+      repo_name: 'new_repo',
+      uuid: created['uuid']
+    }, auth(:admin)
+
+    assert_response :success
+
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_equal 'foo@example.com', created['email'], 'expected input email'
+
+     # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'new_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',
+        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    # invoke setup with a vm_uuid
+    post "/arvados/v1/users/setup", {
+      vm_uuid: virtual_machines(:testvm).uuid,
+      openid_prefix: 'http://www.example.com/account',
+      user: {
+        email: 'junk_email'
+      },
+      uuid: created['uuid']
+    }, auth(:admin)
+
+    assert_response :success
+
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+
+    assert_equal created['email'], 'foo@example.com', 'expected original email'
+
+    # verify links
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    # since no repo name in input, we won't get any; even though user has one
+    verify_link response_items, 'arvados#repository', false, 'permission', 'can_manage',
+        'new_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+  end
+
+  test "setup and unsetup user" do
+    post "/arvados/v1/users/setup", {
+      repo_name: 'test_repo',
+      vm_uuid: virtual_machines(:testvm).uuid,
+      user: {email: 'foo@example.com'},
+      openid_prefix: 'https://www.google.com/accounts/o8/id'
+    }, auth(:admin)
+
+    assert_response :success
+    response_items = json_response['items']
+    created = find_obj_in_resp response_items, 'arvados#user', nil
+    assert_not_nil created['uuid'], 'expected uuid for the new user'
+    assert_equal created['email'], 'foo@example.com', 'expected given email'
+
+    # five extra links: system_group, login, group, repo and vm
+    verify_link response_items, 'arvados#user', true, 'permission', 'can_login',
+        created['uuid'], created['email'], 'arvados#user', false, 'arvados#user'
+
+    verify_link response_items, 'arvados#group', true, 'permission', 'can_read',
+        'All users', created['uuid'], 'arvados#group', true, 'Group'
+
+    verify_link response_items, 'arvados#repository', true, 'permission', 'can_manage',
+        'test_repo', created['uuid'], 'arvados#repository', true, 'Repository'
+
+    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',
+        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'
+
+    verify_link_existence created['uuid'], created['email'], true, true, true, true, false
+
+    post "/arvados/v1/users/#{created['uuid']}/unsetup", {}, auth(:admin)
+
+    assert_response :success
+
+    created2 = json_response
+    assert_not_nil created2['uuid'], 'expected uuid for the newly created user'
+    assert_equal created['uuid'], created2['uuid'], 'expected uuid not found'
+
+    verify_link_existence created['uuid'], created['email'], false, false, false, false, false
+  end
+
+  def find_obj_in_resp (response_items, kind, head_kind=nil)
+    response_items.each do |x|
+      if x && x['kind']
+        return x if (x['kind'] == kind && x['head_kind'] == head_kind)
+      end
+    end
+    nil
+  end
+
+end
diff --git a/services/api/test/integration/valid_links_test.rb b/services/api/test/integration/valid_links_test.rb
new file mode 100644 (file)
index 0000000..63c8e88
--- /dev/null
@@ -0,0 +1,39 @@
+require 'test_helper'
+
+class ValidLinksTest < ActionDispatch::IntegrationTest
+  fixtures :all
+
+  test "tail must exist on update" do
+    admin_auth = {'HTTP_AUTHORIZATION' => "OAuth2 #{api_client_authorizations(:admin).api_token}"}
+
+    post "/arvados/v1/links", {
+      :format => :json,
+      :link => {
+        link_class: 'test',
+        name: 'stuff',
+        head_uuid: users(:active).uuid,
+        tail_uuid: virtual_machines(:testvm).uuid
+      }
+    }, admin_auth
+    assert_response :success
+    u = json_response['uuid']
+
+    put "/arvados/v1/links/#{u}", {
+      :format => :json,
+      :link => {
+        tail_uuid: virtual_machines(:testvm2).uuid
+      }
+    }, admin_auth
+    assert_response :success
+    assert_equal virtual_machines(:testvm2).uuid, (ActiveSupport::JSON.decode @response.body)['tail_uuid']
+
+    put "/arvados/v1/links/#{u}", {
+      :format => :json,
+      :link => {
+        tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'
+      }
+    }, admin_auth
+    assert_response 422
+  end
+
+end
diff --git a/services/api/test/integration/websocket_test.rb b/services/api/test/integration/websocket_test.rb
new file mode 100644 (file)
index 0000000..d5808d8
--- /dev/null
@@ -0,0 +1,649 @@
+require 'test_helper'
+require 'websocket_runner'
+require 'oj'
+require 'database_cleaner'
+
+DatabaseCleaner.strategy = :deletion
+
+class WebsocketTest < ActionDispatch::IntegrationTest
+  self.use_transactional_fixtures = false
+
+  setup do
+    DatabaseCleaner.start
+  end
+
+  teardown do
+    DatabaseCleaner.clean
+  end
+
+  def ws_helper (token = nil, timeout = true)
+    opened = false
+    close_status = nil
+    too_long = false
+
+    EM.run {
+      if token
+        ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket?api_token=#{api_client_authorizations(token).api_token}")
+      else
+        ws = Faye::WebSocket::Client.new("ws://localhost:3002/websocket")
+      end
+
+      ws.on :open do |event|
+        opened = true
+        if timeout
+          EM::Timer.new 4 do
+            too_long = true if close_status.nil?
+            EM.stop_event_loop
+          end
+        end
+      end
+
+      ws.on :close do |event|
+        close_status = [:close, event.code, event.reason]
+        EM.stop_event_loop
+      end
+
+      yield ws
+    }
+
+    assert opened, "Should have opened web socket"
+    assert (not too_long), "Test took too long"
+    assert_equal 1000, close_status[1], "Connection closed unexpectedly (check log for errors)"
+  end
+
+  test "connect with no token" do
+    status = nil
+
+    ws_helper do |ws|
+      ws.on :message do |event|
+        d = Oj.load event.data
+        status = d["status"]
+        ws.close
+      end
+    end
+
+    assert_equal 401, status
+  end
+
+
+  test "connect, subscribe and get response" do
+    status = nil
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe'}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        status = d["status"]
+        ws.close
+      end
+    end
+
+    assert_equal 200, status
+  end
+
+  test "connect, subscribe, get event" do
+    state = 1
+    spec = nil
+    ev_uuid = nil
+
+    authorize_with :admin
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe'}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          spec = Specimen.create
+          state = 2
+        when 2
+          ev_uuid = d["object_uuid"]
+          ws.close
+        end
+      end
+
+    end
+
+    assert_not_nil spec
+    assert_equal spec.uuid, ev_uuid
+  end
+
+  test "connect, subscribe, get two events" do
+    state = 1
+    spec = nil
+    human = nil
+    spec_ev_uuid = nil
+    human_ev_uuid = nil
+
+    authorize_with :admin
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe'}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          spec = Specimen.create
+          human = Human.create
+          state = 2
+        when 2
+          spec_ev_uuid = d["object_uuid"]
+          state = 3
+        when 3
+          human_ev_uuid = d["object_uuid"]
+          state = 4
+          ws.close
+        when 4
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_not_nil spec
+    assert_not_nil human
+    assert_equal spec.uuid, spec_ev_uuid
+    assert_equal human.uuid, human_ev_uuid
+  end
+
+  test "connect, subscribe, filter events" do
+    state = 1
+    human = nil
+    human_ev_uuid = nil
+
+    authorize_with :admin
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          Specimen.create
+          human = Human.create
+          state = 2
+        when 2
+          human_ev_uuid = d["object_uuid"]
+          state = 3
+          ws.close
+        when 3
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_not_nil human
+    assert_equal human.uuid, human_ev_uuid
+  end
+
+
+  test "connect, subscribe, multiple filters" do
+    state = 1
+    spec = nil
+    human = nil
+    spec_ev_uuid = nil
+    human_ev_uuid = nil
+
+    authorize_with :admin
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+        ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#specimen']]}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          state = 2
+        when 2
+          assert_equal 200, d["status"]
+          spec = Specimen.create
+          Trait.create # not part of filters, should not be received
+          human = Human.create
+          state = 3
+        when 3
+          spec_ev_uuid = d["object_uuid"]
+          state = 4
+        when 4
+          human_ev_uuid = d["object_uuid"]
+          state = 5
+          ws.close
+        when 5
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_not_nil spec
+    assert_not_nil human
+    assert_equal spec.uuid, spec_ev_uuid
+    assert_equal human.uuid, human_ev_uuid
+  end
+
+
+  test "connect, subscribe, compound filter" do
+    state = 1
+    t1 = nil
+
+    authorize_with :admin
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#trait'], ['event_type', '=', 'update']]}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          t1 = Trait.create("name" => "foo")
+          t1.name = "bar"
+          t1.save!
+          state = 2
+         when 2
+          assert_equal 'update', d['event_type']
+          state = 3
+          ws.close
+        when 3
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_equal 3, state
+    assert_not_nil t1
+  end
+
+  test "connect, subscribe, ask events starting at seq num" do
+    state = 1
+    human = nil
+    human_ev_uuid = nil
+
+    authorize_with :admin
+
+    lastid = logs(:log3).id
+    l1 = nil
+    l2 = nil
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe', last_log_id: lastid}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          state = 2
+        when 2
+          l1 = d["object_uuid"]
+          assert_not_nil l1, "Unexpected message: #{d}"
+          state = 3
+        when 3
+          l2 = d["object_uuid"]
+          assert_not_nil l2, "Unexpected message: #{d}"
+          state = 4
+          ws.close
+        when 4
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_equal logs(:log4).object_uuid, l1
+    assert_equal logs(:log5).object_uuid, l2
+  end
+
+  test "connect, subscribe, get event, unsubscribe" do
+    state = 1
+    spec = nil
+    spec_ev_uuid = nil
+    filter_id = nil
+
+    authorize_with :admin
+
+    ws_helper :admin, false do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe'}.to_json)
+        EM::Timer.new 3 do
+          # Set a time limit on the test because after unsubscribing the server
+          # still has to process the next event (and then hopefully correctly
+          # decides not to send it because we unsubscribed.)
+          ws.close
+        end
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          spec = Specimen.create
+          state = 2
+        when 2
+          spec_ev_uuid = d["object_uuid"]
+          ws.send ({method: 'unsubscribe'}.to_json)
+
+          EM::Timer.new 1 do
+            Specimen.create
+          end
+
+          state = 3
+        when 3
+          assert_equal 200, d["status"]
+          state = 4
+        when 4
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_not_nil spec
+    assert_equal spec.uuid, spec_ev_uuid
+  end
+
+  test "connect, subscribe, get event, unsubscribe with filter" do
+    state = 1
+    spec = nil
+    spec_ev_uuid = nil
+
+    authorize_with :admin
+
+    ws_helper :admin, false do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+        EM::Timer.new 3 do
+          # Set a time limit on the test because after unsubscribing the server
+          # still has to process the next event (and then hopefully correctly
+          # decides not to send it because we unsubscribed.)
+          ws.close
+        end
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          spec = Human.create
+          state = 2
+        when 2
+          spec_ev_uuid = d["object_uuid"]
+          ws.send ({method: 'unsubscribe', filters: [['object_uuid', 'is_a', 'arvados#human']]}.to_json)
+
+          EM::Timer.new 1 do
+            Human.create
+          end
+
+          state = 3
+        when 3
+          assert_equal 200, d["status"]
+          state = 4
+        when 4
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_not_nil spec
+    assert_equal spec.uuid, spec_ev_uuid
+  end
+
+
+  test "connect, subscribe, get event, try to unsubscribe with bogus filter" do
+    state = 1
+    spec = nil
+    spec_ev_uuid = nil
+    human = nil
+    human_ev_uuid = nil
+
+    authorize_with :admin
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe'}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          spec = Specimen.create
+          state = 2
+        when 2
+          spec_ev_uuid = d["object_uuid"]
+          ws.send ({method: 'unsubscribe', filters: [['foo', 'bar', 'baz']]}.to_json)
+
+          EM::Timer.new 1 do
+            human = Human.create
+          end
+
+          state = 3
+        when 3
+          assert_equal 404, d["status"]
+          state = 4
+        when 4
+          human_ev_uuid = d["object_uuid"]
+          state = 5
+          ws.close
+        when 5
+          assert false, "Should not get any more events"
+        end
+      end
+
+    end
+
+    assert_not_nil spec
+    assert_not_nil human
+    assert_equal spec.uuid, spec_ev_uuid
+    assert_equal human.uuid, human_ev_uuid
+  end
+
+
+
+  test "connected, not subscribed, no event" do
+    authorize_with :admin
+
+    ws_helper :admin, false do |ws|
+      ws.on :open do |event|
+        EM::Timer.new 1 do
+          Specimen.create
+        end
+
+        EM::Timer.new 3 do
+          ws.close
+        end
+      end
+
+      ws.on :message do |event|
+        assert false, "Should not get any messages, message was #{event.data}"
+      end
+    end
+  end
+
+  test "connected, not authorized to see event" do
+    state = 1
+
+    authorize_with :admin
+
+    ws_helper :active, false do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe'}.to_json)
+
+        EM::Timer.new 3 do
+          ws.close
+        end
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          Specimen.create
+          state = 2
+        when 2
+          assert false, "Should not get any messages, message was #{event.data}"
+        end
+      end
+
+    end
+
+  end
+
+  test "connect, try bogus method" do
+    status = nil
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({method: 'frobnabble'}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        status = d["status"]
+        ws.close
+      end
+    end
+
+    assert_equal 400, status
+  end
+
+  test "connect, missing method" do
+    status = nil
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send ({fizzbuzz: 'frobnabble'}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        status = d["status"]
+        ws.close
+      end
+    end
+
+    assert_equal 400, status
+  end
+
+  test "connect, send malformed request" do
+    status = nil
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        ws.send '<XML4EVER></XML4EVER>'
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        status = d["status"]
+        ws.close
+      end
+    end
+
+    assert_equal 400, status
+  end
+
+
+  test "connect, try subscribe too many filters" do
+    state = 1
+
+    authorize_with :admin
+
+    ws_helper :admin do |ws|
+      ws.on :open do |event|
+        (1..17).each do |i|
+          ws.send ({method: 'subscribe', filters: [['object_uuid', '=', i]]}.to_json)
+        end
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when (1..EventBus::MAX_FILTERS)
+          assert_equal 200, d["status"]
+          state += 1
+        when (EventBus::MAX_FILTERS+1)
+          assert_equal 403, d["status"]
+          ws.close
+        end
+      end
+
+    end
+
+    assert_equal 17, state
+
+  end
+
+  test "connect, subscribe, lots of events" do
+    state = 1
+    event_count = 0
+    log_start = Log.order(:id).last.id
+
+    authorize_with :admin
+
+    ws_helper :admin, false do |ws|
+      EM::Timer.new 45 do
+        # Needs a longer timeout than the default
+        ws.close
+      end
+
+      ws.on :open do |event|
+        ws.send ({method: 'subscribe'}.to_json)
+      end
+
+      ws.on :message do |event|
+        d = Oj.load event.data
+        case state
+        when 1
+          assert_equal 200, d["status"]
+          ActiveRecord::Base.transaction do
+            (1..202).each do
+              spec = Specimen.create
+            end
+          end
+          state = 2
+        when 2
+          event_count += 1
+          assert_equal d['id'], event_count+log_start
+          if event_count == 202
+            ws.close
+          end
+        end
+      end
+
+    end
+
+    assert_equal 202, event_count
+  end
+
+
+end
diff --git a/services/api/test/job_logs/crunchstatshort.log b/services/api/test/job_logs/crunchstatshort.log
new file mode 100644 (file)
index 0000000..7b39318
--- /dev/null
@@ -0,0 +1 @@
+2014-11-07_23:33:51 qr1hi-8i9sb-nf3qk0xzwwz3lre 31708 1 stderr crunchstat: cpu 1970.8200 user 60.2700 sys 8 cpus -- interval 10.0002 seconds 35.3900 user 0.8600 sys
diff --git a/services/api/test/performance/browsing_test.rb b/services/api/test/performance/browsing_test.rb
new file mode 100644 (file)
index 0000000..3fea27b
--- /dev/null
@@ -0,0 +1,12 @@
+require 'test_helper'
+require 'rails/performance_test_help'
+
+class BrowsingTest < ActionDispatch::PerformanceTest
+  # Refer to the documentation for all available options
+  # self.profile_options = { :runs => 5, :metrics => [:wall_time, :memory]
+  #                          :output => 'tmp/performance', :formats => [:flat] }
+
+  def test_homepage
+    get '/'
+  end
+end
diff --git a/services/api/test/test.git.tar b/services/api/test/test.git.tar
new file mode 100644 (file)
index 0000000..ae46601
Binary files /dev/null and b/services/api/test/test.git.tar differ
diff --git a/services/api/test/test_helper.rb b/services/api/test/test_helper.rb
new file mode 100644 (file)
index 0000000..216dd2d
--- /dev/null
@@ -0,0 +1,118 @@
+ENV["RAILS_ENV"] = "test"
+unless ENV["NO_COVERAGE_TEST"]
+  begin
+    require 'simplecov'
+    require 'simplecov-rcov'
+    class SimpleCov::Formatter::MergedFormatter
+      def format(result)
+        SimpleCov::Formatter::HTMLFormatter.new.format(result)
+        SimpleCov::Formatter::RcovFormatter.new.format(result)
+      end
+    end
+    SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter
+    SimpleCov.start do
+      add_filter '/test/'
+      add_filter 'initializers/secret_token'
+      add_filter 'initializers/omniauth'
+    end
+  rescue Exception => e
+    $stderr.puts "SimpleCov unavailable (#{e}). Proceeding without."
+  end
+end
+
+require File.expand_path('../../config/environment', __FILE__)
+require 'rails/test_help'
+
+module ArvadosTestSupport
+  def json_response
+    ActiveSupport::JSON.decode @response.body
+  end
+
+  def api_token(api_client_auth_name)
+    api_client_authorizations(api_client_auth_name).api_token
+  end
+
+  def auth(api_client_auth_name)
+    {'HTTP_AUTHORIZATION' => "OAuth2 #{api_token(api_client_auth_name)}"}
+  end
+end
+
+class ActiveSupport::TestCase
+  include FactoryGirl::Syntax::Methods
+  fixtures :all
+
+  include ArvadosTestSupport
+
+  teardown do
+    Thread.current[:api_client_ip_address] = nil
+    Thread.current[:api_client_authorization] = nil
+    Thread.current[:api_client_uuid] = nil
+    Thread.current[:api_client] = nil
+    Thread.current[:user] = nil
+    restore_configuration
+  end
+
+  def restore_configuration
+    # Restore configuration settings changed during tests
+    $application_config.each do |k,v|
+      if k.match /^[^.]*$/
+        Rails.configuration.send (k + '='), v
+      end
+    end
+  end
+
+  def set_user_from_auth(auth_name)
+    client_auth = api_client_authorizations(auth_name)
+    Thread.current[:api_client_authorization] = client_auth
+    Thread.current[:api_client] = client_auth.api_client
+    Thread.current[:user] = client_auth.user
+  end
+
+  def expect_json
+    self.request.headers["Accept"] = "text/json"
+  end
+
+  def authorize_with api_client_auth_name
+    authorize_with_token api_client_authorizations(api_client_auth_name).api_token
+  end
+
+  def authorize_with_token token
+    t = token
+    t = t.api_token if t.respond_to? :api_token
+    ArvadosApiToken.new.call("rack.input" => "",
+                             "HTTP_AUTHORIZATION" => "OAuth2 #{t}")
+  end
+end
+
+class ActionController::TestCase
+  setup do
+    @counter = 0
+  end
+
+  def check_counter action
+    @counter += 1
+    if @counter == 2
+      assert_equal 1, 2, "Multiple actions in functional test"
+    end
+  end
+
+  [:get, :post, :put, :patch, :delete].each do |method|
+    define_method method do |action, *args|
+      check_counter action
+      super action, *args
+    end
+  end
+end
+
+class ActionDispatch::IntegrationTest
+  teardown do
+    Thread.current[:api_client_ip_address] = nil
+    Thread.current[:api_client_authorization] = nil
+    Thread.current[:api_client_uuid] = nil
+    Thread.current[:api_client] = nil
+    Thread.current[:user] = nil
+  end
+end
+
+# Ensure permissions are computed from the test fixtures.
+User.invalidate_permissions_cache
diff --git a/services/api/test/unit/.gitkeep b/services/api/test/unit/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/test/unit/api_client_authorization_test.rb b/services/api/test/unit/api_client_authorization_test.rb
new file mode 100644 (file)
index 0000000..dd255fb
--- /dev/null
@@ -0,0 +1,17 @@
+require 'test_helper'
+
+class ApiClientAuthorizationTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  [:admin_trustedclient, :active_trustedclient].each do |token|
+    test "ApiClientAuthorization can be created then deleted by #{token}" do
+      set_user_from_auth token
+      x = ApiClientAuthorization.create!(user_id: current_user.id,
+                                         api_client_id: 0,
+                                         scopes: [])
+      newtoken = x.api_token
+      assert x.destroy, "Failed to destroy new ApiClientAuth"
+      assert_empty ApiClientAuthorization.where(api_token: newtoken), "Destroyed ApiClientAuth is still in database"
+    end
+  end
+end
diff --git a/services/api/test/unit/api_client_test.rb b/services/api/test/unit/api_client_test.rb
new file mode 100644 (file)
index 0000000..7f16490
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class ApiClientTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/application_test.rb b/services/api/test/unit/application_test.rb
new file mode 100644 (file)
index 0000000..ca80319
--- /dev/null
@@ -0,0 +1,32 @@
+require 'test_helper'
+
+class ApplicationTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  test "test act_as_system_user" do
+    Thread.current[:user] = users(:active)
+    assert_equal users(:active), Thread.current[:user]
+    act_as_system_user do
+      assert_not_equal users(:active), Thread.current[:user]
+      assert_equal system_user, Thread.current[:user]
+    end
+    assert_equal users(:active), Thread.current[:user]
+  end
+
+  test "test act_as_system_user is exception safe" do
+    Thread.current[:user] = users(:active)
+    assert_equal users(:active), Thread.current[:user]
+    caught = false
+    begin
+      act_as_system_user do
+        assert_not_equal users(:active), Thread.current[:user]
+        assert_equal system_user, Thread.current[:user]
+        raise "Fail"
+      end
+    rescue
+      caught = true
+    end
+    assert caught
+    assert_equal users(:active), Thread.current[:user]
+  end
+end
diff --git a/services/api/test/unit/arvados_model_test.rb b/services/api/test/unit/arvados_model_test.rb
new file mode 100644 (file)
index 0000000..8c75764
--- /dev/null
@@ -0,0 +1,112 @@
+require 'test_helper'
+
+class ArvadosModelTest < ActiveSupport::TestCase
+  fixtures :all
+
+  def create_with_attrs attrs
+    a = Specimen.create({material: 'caloric'}.merge(attrs))
+    a if a.valid?
+  end
+
+  test 'non-admin cannot assign uuid' do
+    set_user_from_auth :active_trustedclient
+    want_uuid = Specimen.generate_uuid
+    a = create_with_attrs(uuid: want_uuid)
+    assert_nil a, "Non-admin should not assign uuid."
+  end
+
+  test 'admin can assign valid uuid' do
+    set_user_from_auth :admin_trustedclient
+    want_uuid = Specimen.generate_uuid
+    a = create_with_attrs(uuid: want_uuid)
+    assert_equal want_uuid, a.uuid, "Admin should assign valid uuid."
+    assert a.uuid.length==27, "Auto assigned uuid length is wrong."
+  end
+
+  test 'admin cannot assign uuid with wrong object type' do
+    set_user_from_auth :admin_trustedclient
+    want_uuid = Human.generate_uuid
+    a = create_with_attrs(uuid: want_uuid)
+    assert_nil a, "Admin should not be able to assign invalid uuid."
+  end
+
+  test 'admin cannot assign badly formed uuid' do
+    set_user_from_auth :admin_trustedclient
+    a = create_with_attrs(uuid: "ntoheunthaoesunhasoeuhtnsaoeunhtsth")
+    assert_nil a, "Admin should not be able to assign invalid uuid."
+  end
+
+  test 'admin cannot assign empty uuid' do
+    set_user_from_auth :admin_trustedclient
+    a = create_with_attrs(uuid: "")
+    assert_nil a, "Admin cannot assign empty uuid."
+  end
+
+  [ {:a => 'foo'},
+    {'a' => :foo},
+    {:a => ['foo', 'bar']},
+    {'a' => [:foo, 'bar']},
+    {'a' => ['foo', :bar]},
+    {:a => [:foo, :bar]},
+    {:a => {'foo' => {'bar' => 'baz'}}},
+    {'a' => {:foo => {'bar' => 'baz'}}},
+    {'a' => {'foo' => {:bar => 'baz'}}},
+    {'a' => {'foo' => {'bar' => :baz}}},
+    {'a' => {'foo' => ['bar', :baz]}},
+    {'a' => {['foo', :foo] => ['bar', 'baz']}},
+  ].each do |x|
+    test "refuse symbol keys in serialized attribute: #{x.inspect}" do
+      set_user_from_auth :admin_trustedclient
+      assert_nothing_raised do
+        Link.create!(link_class: 'test',
+                     properties: {})
+      end
+      assert_raises ActiveRecord::RecordInvalid do
+        Link.create!(link_class: 'test',
+                     properties: x)
+      end
+    end
+  end
+
+  test "Stringify symbols coming from serialized attribute in database" do
+    set_user_from_auth :admin_trustedclient
+    fixed = Link.find_by_uuid(links(:has_symbol_keys_in_database_somehow).uuid)
+    assert_equal(["baz", "foo"], fixed.properties.keys.sort,
+                 "Hash symbol keys from DB did not get stringified.")
+    assert_equal(['waz', 'waz', 'waz', 1, nil, false, true],
+                 fixed.properties['baz'],
+                 "Array symbol values from DB did not get stringified.")
+    assert_equal true, fixed.save, "Failed to save fixed model back to db."
+  end
+
+  test "No HashWithIndifferentAccess in database" do
+    set_user_from_auth :admin_trustedclient
+    assert_raises ActiveRecord::RecordInvalid do
+      Link.create!(link_class: 'test',
+                   properties: {'foo' => 'bar'}.with_indifferent_access)
+    end
+  end
+
+  [['uuid', {unique: true}],
+   ['owner_uuid', {}]].each do |the_column, requires|
+    test "unique index on all models with #{the_column}" do
+      checked = 0
+      ActiveRecord::Base.connection.tables.each do |table|
+        columns = ActiveRecord::Base.connection.columns(table)
+
+        next unless columns.collect(&:name).include? the_column
+
+        indexes = ActiveRecord::Base.connection.indexes(table).reject do |index|
+          requires.map do |key, val|
+            index.send(key) == val
+          end.include? false
+        end
+        assert_includes indexes.collect(&:columns), [the_column], 'no index'
+        checked += 1
+      end
+      # Sanity check: make sure we didn't just systematically miss everything.
+      assert_operator(10, :<, checked,
+                      "Only #{checked} tables have a #{the_column}?!")
+    end
+  end
+end
diff --git a/services/api/test/unit/authorized_key_test.rb b/services/api/test/unit/authorized_key_test.rb
new file mode 100644 (file)
index 0000000..b8d9b67
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class AuthorizedKeyTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/blob_test.rb b/services/api/test/unit/blob_test.rb
new file mode 100644 (file)
index 0000000..0794a75
--- /dev/null
@@ -0,0 +1,121 @@
+require 'test_helper'
+
+class BlobTest < ActiveSupport::TestCase
+  @@api_token = rand(2**512).to_s(36)[0..49]
+  @@key = rand(2**2048).to_s(36)
+  @@blob_data = 'foo'
+  @@blob_locator = Digest::MD5.hexdigest(@@blob_data) +
+    '+' + @@blob_data.size.to_s
+
+  @@known_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3'
+  @@known_token = 'hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk'
+  @@known_key = '13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk' +
+    'p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc' +
+    'ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4' +
+    'jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y' +
+    'gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6' +
+    'vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei' +
+    '786u5rw2a9gx743dj3fgq2irk'
+  @@known_signed_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3' +
+    '+A257f3f5f5f0a4e4626a18fc74bd42ec34dcb228a@7fffffff'
+
+  test 'generate predictable invincible signature' do
+    signed = Blob.sign_locator @@known_locator, {
+      api_token: @@known_token,
+      key: @@known_key,
+      expire: 0x7fffffff,
+    }
+    assert_equal @@known_signed_locator, signed
+  end
+
+  test 'verify predictable invincible signature' do
+    assert_equal true, Blob.verify_signature!(@@known_signed_locator,
+                                              api_token: @@known_token,
+                                              key: @@known_key)
+  end
+
+  test 'correct' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key
+    assert_equal true, Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+  end
+
+  test 'expired' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key, ttl: -1
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'expired, but no raise' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key, ttl: -1
+    assert_equal false, Blob.verify_signature(signed,
+                                              api_token: @@api_token,
+                                              key: @@key)
+  end
+
+  test 'bogus, wrong block hash' do
+    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed.sub('acbd','abcd'), api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, expired' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@531641bf'
+    assert_raises Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, wrong key' do
+    signed = Blob.sign_locator(@@blob_locator,
+                               api_token: @@api_token,
+                               key: (@@key+'x'))
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, wrong api token' do
+    signed = Blob.sign_locator(@@blob_locator,
+                               api_token: @@api_token.reverse,
+                               key: @@key)
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, signature format 1' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, signature format 2' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+A@531641bf'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, signature format 3' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Axyzzy@531641bf'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'bogus, timestamp format' do
+    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@xyzzy'
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)
+    end
+  end
+
+  test 'no signature at all' do
+    assert_raise Blob::InvalidSignatureError do
+      Blob.verify_signature!(@@blob_locator, api_token: @@api_token, key: @@key)
+    end
+  end
+end
diff --git a/services/api/test/unit/collection_test.rb b/services/api/test/unit/collection_test.rb
new file mode 100644 (file)
index 0000000..d9c2203
--- /dev/null
@@ -0,0 +1,41 @@
+require 'test_helper'
+
+class CollectionTest < ActiveSupport::TestCase
+  def create_collection name, enc=nil
+    txt = ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:#{name}.txt\n"
+    txt.force_encoding(enc) if enc
+    return Collection.create(manifest_text: txt)
+  end
+
+  test 'accept ASCII manifest_text' do
+    act_as_system_user do
+      c = create_collection 'foo', Encoding::US_ASCII
+      assert c.valid?
+    end
+  end
+
+  test 'accept UTF-8 manifest_text' do
+    act_as_system_user do
+      c = create_collection "f\xc3\x98\xc3\x98", Encoding::UTF_8
+      assert c.valid?
+    end
+  end
+
+  test 'refuse manifest_text with invalid UTF-8 byte sequence' do
+    act_as_system_user do
+      c = create_collection "f\xc8o", Encoding::UTF_8
+      assert !c.valid?
+      assert_equal [:manifest_text], c.errors.messages.keys
+      assert_match /UTF-8/, c.errors.messages[:manifest_text].first
+    end
+  end
+
+  test 'refuse manifest_text with non-UTF-8 encoding' do
+    act_as_system_user do
+      c = create_collection "f\xc8o", Encoding::ASCII_8BIT
+      assert !c.valid?
+      assert_equal [:manifest_text], c.errors.messages.keys
+      assert_match /UTF-8/, c.errors.messages[:manifest_text].first
+    end
+  end
+end
diff --git a/services/api/test/unit/commit_ancestor_test.rb b/services/api/test/unit/commit_ancestor_test.rb
new file mode 100644 (file)
index 0000000..664dca5
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class CommitAncestorTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/commit_test.rb b/services/api/test/unit/commit_test.rb
new file mode 100644 (file)
index 0000000..2424af3
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class CommitTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/group_test.rb b/services/api/test/unit/group_test.rb
new file mode 100644 (file)
index 0000000..9e0e4fc
--- /dev/null
@@ -0,0 +1,59 @@
+require 'test_helper'
+
+class GroupTest < ActiveSupport::TestCase
+
+  test "cannot set owner_uuid to object with existing ownership cycle" do
+    set_user_from_auth :active_trustedclient
+
+    # First make sure we have lots of permission on the bad group by
+    # renaming it to "{current name} is mine all mine"
+    g = groups(:bad_group_has_ownership_cycle_b)
+    g.name += " is mine all mine"
+    assert g.save, "active user should be able to modify group #{g.uuid}"
+
+    # Use the group as the owner of a new object
+    s = Specimen.
+      create(owner_uuid: groups(:bad_group_has_ownership_cycle_b).uuid)
+    assert s.valid?, "ownership should pass validation #{s.errors.messages}"
+    assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
+
+    # Use the group as the new owner of an existing object
+    s = specimens(:in_aproject)
+    s.owner_uuid = groups(:bad_group_has_ownership_cycle_b).uuid
+    assert s.valid?, "ownership should pass validation"
+    assert_equal false, s.save, "should not save object with #{g.uuid} as owner"
+  end
+
+  test "cannot create a new ownership cycle" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    g_bar = Group.create!(name: "bar")
+
+    g_foo.owner_uuid = g_bar.uuid
+    assert g_foo.save, lambda { g_foo.errors.messages }
+    g_bar.owner_uuid = g_foo.uuid
+    assert g_bar.valid?, "ownership cycle should not prevent validation"
+    assert_equal false, g_bar.save, "should not create an ownership loop"
+    assert g_bar.errors.messages[:owner_uuid].join(" ").match(/ownership cycle/)
+  end
+
+  test "cannot create a single-object ownership cycle" do
+    set_user_from_auth :active_trustedclient
+
+    g_foo = Group.create!(name: "foo")
+    assert g_foo.save
+
+    # Ensure I have permission to manage this group even when its owner changes
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                            head_uuid: g_foo.uuid,
+                            link_class: 'permission',
+                            name: 'can_manage')
+    assert perm_link.save
+
+    g_foo.owner_uuid = g_foo.uuid
+    assert_equal false, g_foo.save, "should not create an ownership loop"
+    assert g_foo.errors.messages[:owner_uuid].join(" ").match(/ownership cycle/)
+  end
+
+end
diff --git a/services/api/test/unit/helpers/api_client_authorizations_helper_test.rb b/services/api/test/unit/helpers/api_client_authorizations_helper_test.rb
new file mode 100644 (file)
index 0000000..4225e04
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class ApiClientAuthorizationsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/api_clients_helper_test.rb b/services/api/test/unit/helpers/api_clients_helper_test.rb
new file mode 100644 (file)
index 0000000..3e58181
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class ApiClientsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/authorized_keys_helper_test.rb b/services/api/test/unit/helpers/authorized_keys_helper_test.rb
new file mode 100644 (file)
index 0000000..ced3b29
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class AuthorizedKeysHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/collections_helper_test.rb b/services/api/test/unit/helpers/collections_helper_test.rb
new file mode 100644 (file)
index 0000000..16a85d9
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class CollectionsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/commit_ancestors_helper_test.rb b/services/api/test/unit/helpers/commit_ancestors_helper_test.rb
new file mode 100644 (file)
index 0000000..e277323
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class CommitAncestorsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/commits_helper_test.rb b/services/api/test/unit/helpers/commits_helper_test.rb
new file mode 100644 (file)
index 0000000..c586153
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class CommitsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/groups_helper_test.rb b/services/api/test/unit/helpers/groups_helper_test.rb
new file mode 100644 (file)
index 0000000..cd06333
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class GroupsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/humans_helper_test.rb b/services/api/test/unit/helpers/humans_helper_test.rb
new file mode 100644 (file)
index 0000000..8c515d6
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class HumansHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/job_tasks_helper_test.rb b/services/api/test/unit/helpers/job_tasks_helper_test.rb
new file mode 100644 (file)
index 0000000..f53621c
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class JobTasksHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/jobs_helper_test.rb b/services/api/test/unit/helpers/jobs_helper_test.rb
new file mode 100644 (file)
index 0000000..7c4a3fd
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class JobsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/keep_disks_helper_test.rb b/services/api/test/unit/helpers/keep_disks_helper_test.rb
new file mode 100644 (file)
index 0000000..a3b064e
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class KeepDisksHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/links_helper_test.rb b/services/api/test/unit/helpers/links_helper_test.rb
new file mode 100644 (file)
index 0000000..3ff1dea
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class LinksHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/logs_helper_test.rb b/services/api/test/unit/helpers/logs_helper_test.rb
new file mode 100644 (file)
index 0000000..c165554
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class LogsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/nodes_helper_test.rb b/services/api/test/unit/helpers/nodes_helper_test.rb
new file mode 100644 (file)
index 0000000..13011de
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class NodesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/pipeline_instances_helper_test.rb b/services/api/test/unit/helpers/pipeline_instances_helper_test.rb
new file mode 100644 (file)
index 0000000..45749cb
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class PipelineInstancesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/pipeline_templates_helper_test.rb b/services/api/test/unit/helpers/pipeline_templates_helper_test.rb
new file mode 100644 (file)
index 0000000..2c4030a
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class PipelinesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/repositories_helper_test.rb b/services/api/test/unit/helpers/repositories_helper_test.rb
new file mode 100644 (file)
index 0000000..51b6177
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class RepositoriesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/specimens_helper_test.rb b/services/api/test/unit/helpers/specimens_helper_test.rb
new file mode 100644 (file)
index 0000000..825af25
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class SpecimensHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/traits_helper_test.rb b/services/api/test/unit/helpers/traits_helper_test.rb
new file mode 100644 (file)
index 0000000..da69c06
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class TraitsHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/helpers/virtual_machines_helper_test.rb b/services/api/test/unit/helpers/virtual_machines_helper_test.rb
new file mode 100644 (file)
index 0000000..03ded1c
--- /dev/null
@@ -0,0 +1,4 @@
+require 'test_helper'
+
+class VirtualMachinesHelperTest < ActionView::TestCase
+end
diff --git a/services/api/test/unit/human_test.rb b/services/api/test/unit/human_test.rb
new file mode 100644 (file)
index 0000000..2863cbf
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class HumanTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/job_task_test.rb b/services/api/test/unit/job_task_test.rb
new file mode 100644 (file)
index 0000000..6582bee
--- /dev/null
@@ -0,0 +1,16 @@
+require 'test_helper'
+
+class JobTaskTest < ActiveSupport::TestCase
+  test "new tasks get an assigned qsequence" do
+    set_user_from_auth :active
+    task = JobTask.create
+    assert_not_nil task.qsequence
+    assert_operator(task.qsequence, :>=, 0)
+  end
+
+  test "assigned qsequence is not overwritten" do
+    set_user_from_auth :active
+    task = JobTask.create!(qsequence: 99)
+    assert_equal(99, task.qsequence)
+  end
+end
diff --git a/services/api/test/unit/job_test.rb b/services/api/test/unit/job_test.rb
new file mode 100644 (file)
index 0000000..24bc260
--- /dev/null
@@ -0,0 +1,403 @@
+require 'test_helper'
+require 'helpers/git_test_helper'
+
+class JobTest < ActiveSupport::TestCase
+  include GitTestHelper
+
+  BAD_COLLECTION = "#{'f' * 32}+0"
+
+  setup do
+    set_user_from_auth :active
+  end
+
+  def job_attrs merge_me={}
+    # Default (valid) set of attributes, with given overrides
+    {
+      script: "hash",
+      script_version: "master",
+      repository: "foo",
+    }.merge(merge_me)
+  end
+
+  test "Job without Docker image doesn't get locator" do
+    job = Job.new job_attrs
+    assert job.valid?, job.errors.full_messages.to_s
+    assert_nil job.docker_image_locator
+  end
+
+  { 'name' => [:links, :docker_image_collection_tag, :name],
+    'hash' => [:links, :docker_image_collection_hash, :name],
+    'locator' => [:collections, :docker_image, :portable_data_hash],
+  }.each_pair do |spec_type, (fixture_type, fixture_name, fixture_attr)|
+    test "Job initialized with Docker image #{spec_type} gets locator" do
+      image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+      job = Job.new job_attrs(runtime_constraints:
+                              {'docker_image' => image_spec})
+      assert job.valid?, job.errors.full_messages.to_s
+      assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+    end
+
+    test "Job modified with Docker image #{spec_type} gets locator" do
+      job = Job.new job_attrs
+      assert job.valid?, job.errors.full_messages.to_s
+      assert_nil job.docker_image_locator
+      image_spec = send(fixture_type, fixture_name).send(fixture_attr)
+      job.runtime_constraints['docker_image'] = image_spec
+      assert job.valid?, job.errors.full_messages.to_s
+      assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+    end
+  end
+
+  test "removing a Docker runtime constraint removes the locator" do
+    image_locator = collections(:docker_image).portable_data_hash
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_locator})
+    assert job.valid?, job.errors.full_messages.to_s
+    assert_equal(image_locator, job.docker_image_locator)
+    job.runtime_constraints = {}
+    assert job.valid?, job.errors.full_messages.to_s + "after clearing runtime constraints"
+    assert_nil job.docker_image_locator
+  end
+
+  test "locate a Docker image with a repository + tag" do
+    image_repo, image_tag =
+      links(:docker_image_collection_tag2).name.split(':', 2)
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_repo,
+                              'docker_image_tag' => image_tag})
+    assert job.valid?, job.errors.full_messages.to_s
+    assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+  end
+
+  test "can't locate a Docker image with a nonexistent tag" do
+    image_repo = links(:docker_image_collection_tag).name
+    image_tag = '__nonexistent tag__'
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_repo,
+                              'docker_image_tag' => image_tag})
+    assert(job.invalid?, "Job with bad Docker tag valid")
+  end
+
+  test "locate a Docker image with a partial hash" do
+    image_hash = links(:docker_image_collection_hash).name[0..24]
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => image_hash})
+    assert job.valid?, job.errors.full_messages.to_s + " with partial hash #{image_hash}"
+    assert_equal(collections(:docker_image).portable_data_hash, job.docker_image_locator)
+  end
+
+  { 'name' => 'arvados_test_nonexistent',
+    'hash' => 'f' * 64,
+    'locator' => BAD_COLLECTION,
+  }.each_pair do |spec_type, image_spec|
+    test "Job validation fails with nonexistent Docker image #{spec_type}" do
+      job = Job.new job_attrs(runtime_constraints:
+                              {'docker_image' => image_spec})
+      assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
+    end
+  end
+
+  test "Job validation fails with non-Docker Collection constraint" do
+    job = Job.new job_attrs(runtime_constraints:
+                            {'docker_image' => collections(:foo_file).uuid})
+    assert(job.invalid?, "non-Docker Collection constraint was valid")
+  end
+
+  test "can create Job with Docker image Collection without Docker links" do
+    image_uuid = collections(:unlinked_docker_image).portable_data_hash
+    job = Job.new job_attrs(runtime_constraints: {"docker_image" => image_uuid})
+    assert(job.valid?, "Job created with unlinked Docker image was invalid")
+    assert_equal(image_uuid, job.docker_image_locator)
+  end
+
+  def check_attrs_unset(job, attrs)
+    assert_empty(attrs.each_key.map { |key| job.send(key) }.compact,
+                 "job has values for #{attrs.keys}")
+  end
+
+  def check_creation_prohibited(attrs)
+    begin
+      job = Job.new(job_attrs(attrs))
+    rescue ActiveModel::MassAssignmentSecurity::Error
+      # Test passes - expected attribute protection
+    else
+      check_attrs_unset(job, attrs)
+    end
+  end
+
+  def check_modification_prohibited(attrs)
+    job = Job.new(job_attrs)
+    attrs.each_pair do |key, value|
+      assert_raises(NoMethodError) { job.send("{key}=".to_sym, value) }
+    end
+    check_attrs_unset(job, attrs)
+  end
+
+  test "can't create Job with Docker image locator" do
+    check_creation_prohibited(docker_image_locator: BAD_COLLECTION)
+  end
+
+  test "can't assign Docker image locator to Job" do
+    check_modification_prohibited(docker_image_locator: BAD_COLLECTION)
+  end
+
+  [
+   {script_parameters: ""},
+   {script_parameters: []},
+   {script_parameters: {symbols: :are_not_allowed_here}},
+   {runtime_constraints: ""},
+   {runtime_constraints: []},
+   {tasks_summary: ""},
+   {tasks_summary: []},
+   {script_version: "no/branch/could/ever/possibly/have/this/name"},
+  ].each do |invalid_attrs|
+    test "validation failures set error messages: #{invalid_attrs.to_json}" do
+      # Ensure valid_attrs doesn't produce errors -- otherwise we will
+      # not know whether errors reported below are actually caused by
+      # invalid_attrs.
+      dummy = Job.create! job_attrs
+
+      job = Job.create job_attrs(invalid_attrs)
+      assert_raises(ActiveRecord::RecordInvalid, ArgumentError,
+                    "save! did not raise the expected exception") do
+        job.save!
+      end
+      assert_not_empty job.errors, "validation failure did not provide errors"
+    end
+  end
+
+  [
+    # Each test case is of the following format
+    # Array of parameters where each parameter is of the format:
+    #  attr name to be changed, attr value, and array of expectations (where each expectation is an array)
+    [['running', false, [['state', 'Queued']]]],
+    [['state', 'Running', [['started_at', 'not_nil']]]],
+    [['is_locked_by_uuid', 'use_current_user_uuid', [['state', 'Queued']]], ['state', 'Running', [['running', true], ['started_at', 'not_nil'], ['success', 'nil']]]],
+    [['running', false, [['state', 'Queued']]], ['state', 'Complete', [['success', true]]]],
+    [['running', true, [['state', 'Running']]], ['cancelled_at', Time.now, [['state', 'Cancelled']]]],
+    [['running', true, [['state', 'Running']]], ['state', 'Cancelled', [['cancelled_at', 'not_nil']]]],
+    [['running', true, [['state', 'Running']]], ['success', true, [['state', 'Complete']]]],
+    [['running', true, [['state', 'Running']]], ['success', false, [['state', 'Failed']]]],
+    [['running', true, [['state', 'Running']]], ['state', 'Complete', [['success', true],['finished_at', 'not_nil']]]],
+    [['running', true, [['state', 'Running']]], ['state', 'Failed', [['success', false],['finished_at', 'not_nil']]]],
+    [['cancelled_at', Time.now, [['state', 'Cancelled']]], ['success', false, [['state', 'Cancelled'],['finished_at', 'nil'], ['cancelled_at', 'not_nil']]]],
+    [['cancelled_at', Time.now, [['state', 'Cancelled'],['running', false]]], ['success', true, [['state', 'Cancelled'],['running', false],['finished_at', 'nil'],['cancelled_at', 'not_nil']]]],
+    # potential migration cases
+    [['state', nil, [['state', 'Queued']]]],
+    [['state', nil, [['state', 'Queued']]], ['cancelled_at', Time.now, [['state', 'Cancelled']]]],
+    [['running', true, [['state', 'Running']]], ['state', nil, [['state', 'Running']]]],
+  ].each do |parameters|
+    test "verify job status #{parameters}" do
+      job = Job.create! job_attrs
+      assert_equal 'Queued', job.state, "job.state"
+
+      parameters.each do |parameter|
+        expectations = parameter[2]
+        if parameter[1] == 'use_current_user_uuid'
+          parameter[1] = Thread.current[:user].uuid
+        end
+
+        if expectations.instance_of? Array
+          job[parameter[0]] = parameter[1]
+          assert_equal true, job.save, job.errors.full_messages.to_s
+          expectations.each do |expectation|
+            if expectation[1] == 'not_nil'
+              assert_not_nil job[expectation[0]], expectation[0]
+            elsif expectation[1] == 'nil'
+              assert_nil job[expectation[0]], expectation[0]
+            else
+              assert_equal expectation[1], job[expectation[0]], expectation[0]
+            end
+          end
+        else
+          raise 'I do not know how to handle this expectation'
+        end
+      end
+    end
+  end
+
+  test "Test job state changes" do
+    all = ["Queued", "Running", "Complete", "Failed", "Cancelled"]
+    valid = {"Queued" => all, "Running" => ["Complete", "Failed", "Cancelled"]}
+    all.each do |start|
+      all.each do |finish|
+        if start != finish
+          job = Job.create! job_attrs(state: start)
+          assert_equal start, job.state
+          job.state = finish
+          job.save
+          job.reload
+          if valid[start] and valid[start].include? finish
+            assert_equal finish, job.state
+          else
+            assert_equal start, job.state
+          end
+        end
+      end
+    end
+  end
+
+  test "Test job locking" do
+    set_user_from_auth :active_trustedclient
+    job = Job.create! job_attrs
+
+    assert_equal "Queued", job.state
+
+    # Should be able to lock successfully
+    job.lock current_user.uuid
+    assert_equal "Running", job.state
+
+    assert_raises ArvadosModel::AlreadyLockedError do
+      # Can't lock it again
+      job.lock current_user.uuid
+    end
+    job.reload
+    assert_equal "Running", job.state
+
+    set_user_from_auth :project_viewer
+    assert_raises ArvadosModel::AlreadyLockedError do
+      # Can't lock it as a different user either
+      job.lock current_user.uuid
+    end
+    job.reload
+    assert_equal "Running", job.state
+
+    assert_raises ArvadosModel::PermissionDeniedError do
+      # Can't update fields as a different user
+      job.update_attributes(state: "Failed")
+    end
+    job.reload
+    assert_equal "Running", job.state
+
+
+    set_user_from_auth :active_trustedclient
+
+    # Can update fields as the locked_by user
+    job.update_attributes(state: "Failed")
+    assert_equal "Failed", job.state
+  end
+
+  test "verify job queue position" do
+    job1 = Job.create! job_attrs
+    assert_equal 'Queued', job1.state, "Incorrect job state for newly created job1"
+
+    job2 = Job.create! job_attrs
+    assert_equal 'Queued', job2.state, "Incorrect job state for newly created job2"
+
+    assert_not_nil job1.queue_position, "Expected non-nil queue position for job1"
+    assert_not_nil job2.queue_position, "Expected non-nil queue position for job2"
+    assert_not_equal job1.queue_position, job2.queue_position
+  end
+
+  SDK_MASTER = "ca68b24e51992e790f29df5cc4bc54ce1da4a1c2"
+  SDK_TAGGED = "00634b2b8a492d6f121e3cf1d6587b821136a9a7"
+
+  def sdk_constraint(version)
+    {runtime_constraints: {
+        "arvados_sdk_version" => version,
+        "docker_image" => links(:docker_image_collection_tag).name,
+      }}
+  end
+
+  def check_job_sdk_version(expected)
+    job = yield
+    if expected.nil?
+      refute(job.valid?, "job valid with bad Arvados SDK version")
+    else
+      assert(job.valid?, "job not valid with good Arvados SDK version")
+      assert_equal(expected, job.arvados_sdk_version)
+    end
+  end
+
+  { "master" => SDK_MASTER,
+    "commit2" => SDK_TAGGED,
+    SDK_TAGGED[0, 8] => SDK_TAGGED,
+    "__nonexistent__" => nil,
+  }.each_pair do |search, commit_hash|
+    test "creating job with SDK version '#{search}'" do
+      check_job_sdk_version(commit_hash) do
+        Job.new(job_attrs(sdk_constraint(search)))
+      end
+    end
+
+    test "updating job from no SDK to version '#{search}'" do
+      job = Job.create!(job_attrs)
+      assert_nil job.arvados_sdk_version
+      check_job_sdk_version(commit_hash) do
+        job.runtime_constraints = sdk_constraint(search)[:runtime_constraints]
+        job
+      end
+    end
+
+    test "updating job from SDK version 'master' to '#{search}'" do
+      job = Job.create!(job_attrs(sdk_constraint("master")))
+      assert_equal(SDK_MASTER, job.arvados_sdk_version)
+      check_job_sdk_version(commit_hash) do
+        job.runtime_constraints = sdk_constraint(search)[:runtime_constraints]
+        job
+      end
+    end
+  end
+
+  test "clear the SDK version" do
+    job = Job.create!(job_attrs(sdk_constraint("master")))
+    assert_equal(SDK_MASTER, job.arvados_sdk_version)
+    job.runtime_constraints = {}
+    assert(job.valid?, "job invalid after clearing SDK version")
+    assert_nil(job.arvados_sdk_version)
+  end
+
+  test "job with SDK constraint, without Docker image is invalid" do
+    sdk_attrs = sdk_constraint("master")
+    sdk_attrs[:runtime_constraints].delete("docker_image")
+    job = Job.create(job_attrs(sdk_attrs))
+    refute(job.valid?, "Job valid with SDK version, without Docker image")
+    sdk_errors = job.errors.messages[:arvados_sdk_version] || []
+    refute_empty(sdk_errors.grep(/\bDocker\b/),
+                 "no Job SDK errors mention that Docker is required")
+  end
+
+  test "invalid to clear Docker image constraint when SDK constraint exists" do
+    job = Job.create!(job_attrs(sdk_constraint("master")))
+    job.runtime_constraints.delete("docker_image")
+    refute(job.valid?,
+           "Job with SDK constraint valid after clearing Docker image")
+  end
+
+  test "can't create job with SDK version assigned directly" do
+    check_creation_prohibited(arvados_sdk_version: SDK_MASTER)
+  end
+
+  test "can't modify job to assign SDK version directly" do
+    check_modification_prohibited(arvados_sdk_version: SDK_MASTER)
+  end
+
+  test "job validation fails when collection uuid found in script_parameters" do
+    bad_params = {
+      script_parameters: {
+        'input' => {
+          'param1' => 'the collection uuid zzzzz-4zz18-012345678901234'
+        }
+      }
+    }
+    assert_raises(ActiveRecord::RecordInvalid,
+                  "created job with a collection uuid in script_parameters") do
+      job = Job.create!(job_attrs(bad_params))
+    end
+  end
+
+  test "job validation succeeds when no collection uuid in script_parameters" do
+    good_params = {
+      script_parameters: {
+        'arg1' => 'foo',
+        'arg2' => [ 'bar', 'baz' ],
+        'arg3' => {
+          'a' => 1,
+          'b' => [2, 3, 4],
+        }
+      }
+    }
+    job = Job.create!(job_attrs(good_params))
+    assert job.valid?
+  end
+end
diff --git a/services/api/test/unit/keep_disk_test.rb b/services/api/test/unit/keep_disk_test.rb
new file mode 100644 (file)
index 0000000..424c72b
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class KeepDiskTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/keep_service_test.rb b/services/api/test/unit/keep_service_test.rb
new file mode 100644 (file)
index 0000000..72c4f8e
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class KeepServiceTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/link_test.rb b/services/api/test/unit/link_test.rb
new file mode 100644 (file)
index 0000000..028f403
--- /dev/null
@@ -0,0 +1,64 @@
+require 'test_helper'
+
+class LinkTest < ActiveSupport::TestCase
+  fixtures :all
+
+  setup do
+    set_user_from_auth :admin_trustedclient
+  end
+
+  test "cannot delete an object referenced by links" do
+    ob = Specimen.create
+    link = Link.create(tail_uuid: users(:active).uuid,
+                       head_uuid: ob.uuid,
+                       link_class: 'test',
+                       name: 'test')
+    assert_equal users(:admin).uuid, link.owner_uuid
+    assert_raises(ActiveRecord::DeleteRestrictionError,
+                  "should not delete #{ob.uuid} with link #{link.uuid}") do
+      ob.destroy
+    end
+  end
+
+  def new_active_link_valid?(link_attrs)
+    set_user_from_auth :active
+    begin
+      Link.
+        create({link_class: "permission",
+                 name: "can_read",
+                 head_uuid: groups(:aproject).uuid,
+               }.merge(link_attrs)).
+        valid?
+    rescue ArvadosModel::PermissionDeniedError
+      false
+    end
+  end
+
+  test "link granting permission to nonexistent user is invalid" do
+    refute new_active_link_valid?(tail_uuid:
+                                  users(:active).uuid.sub(/-\w+$/, "-#{'z' * 15}"))
+  end
+
+  test "link granting non-project permission to unreadable user is invalid" do
+    refute new_active_link_valid?(tail_uuid: users(:admin).uuid,
+                                  head_uuid: collections(:bar_file).uuid)
+  end
+
+  test "user can't add a Collection to a Project without permission" do
+    refute new_active_link_valid?(link_class: "name",
+                                  name: "Permission denied test name",
+                                  tail_uuid: collections(:bar_file).uuid)
+  end
+
+  test "user can't add a User to a Project" do
+    # Users *can* give other users permissions to projects.
+    # This test helps ensure that that exception is specific to permissions.
+    refute new_active_link_valid?(link_class: "name",
+                                  name: "Permission denied test name",
+                                  tail_uuid: users(:admin).uuid)
+  end
+
+  test "link granting project permissions to unreadable user is invalid" do
+    refute new_active_link_valid?(tail_uuid: users(:admin).uuid)
+  end
+end
diff --git a/services/api/test/unit/log_test.rb b/services/api/test/unit/log_test.rb
new file mode 100644 (file)
index 0000000..0c85d4c
--- /dev/null
@@ -0,0 +1,243 @@
+require 'test_helper'
+
+class LogTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  EVENT_TEST_METHODS = {
+    :create => [:created_at, :assert_nil, :assert_not_nil],
+    :update => [:modified_at, :assert_not_nil, :assert_not_nil],
+    :destroy => [nil, :assert_not_nil, :assert_nil],
+  }
+
+  def setup
+    @start_time = Time.now
+    @log_count = 1
+  end
+
+  def assert_properties(test_method, event, props, *keys)
+    verb = (test_method == :assert_nil) ? 'have nil' : 'define'
+    keys.each do |prop_name|
+      assert_includes(props, prop_name, "log properties missing #{prop_name}")
+      self.send(test_method, props[prop_name],
+                "#{event.to_s} log should #{verb} #{prop_name}")
+    end
+  end
+
+  def get_logs_about(thing)
+    Log.where(object_uuid: thing.uuid).order("created_at ASC").all
+  end
+
+  def assert_logged(thing, event_type)
+    logs = get_logs_about(thing)
+    assert_equal(@log_count, logs.size, "log count mismatch")
+    @log_count += 1
+    log = logs.last
+    props = log.properties
+    assert_equal(current_user.andand.uuid, log.owner_uuid,
+                 "log is not owned by current user")
+    assert_equal(current_user.andand.uuid, log.modified_by_user_uuid,
+                 "log is not 'modified by' current user")
+    assert_equal(current_api_client.andand.uuid, log.modified_by_client_uuid,
+                 "log is not 'modified by' current client")
+    assert_equal(thing.uuid, log.object_uuid, "log UUID mismatch")
+    assert_equal(event_type.to_s, log.event_type, "log event type mismatch")
+    time_method, old_props_test, new_props_test = EVENT_TEST_METHODS[event_type]
+    if time_method.nil? or (timestamp = thing.send(time_method)).nil?
+      assert(log.event_at >= @start_time, "log timestamp too old")
+    else
+      assert_in_delta(timestamp, log.event_at, 1, "log timestamp mismatch")
+    end
+    assert_properties(old_props_test, event_type, props,
+                      'old_etag', 'old_attributes')
+    assert_properties(new_props_test, event_type, props,
+                      'new_etag', 'new_attributes')
+    yield props if block_given?
+  end
+
+  def assert_auth_logged_with_clean_properties(auth, event_type)
+    assert_logged(auth, event_type) do |props|
+      ['old_attributes', 'new_attributes'].map { |k| props[k] }.compact
+        .each do |attributes|
+        refute_includes(attributes, 'api_token',
+                        "auth log properties include sensitive API token")
+      end
+      yield props if block_given?
+    end
+  end
+
+  test "creating a user makes a log" do
+    set_user_from_auth :admin_trustedclient
+    u = User.new(first_name: "Log", last_name: "Test")
+    u.save!
+    assert_logged(u, :create) do |props|
+      assert_equal(u.etag, props['new_etag'], "new user etag mismatch")
+      assert_equal(u.first_name, props['new_attributes']['first_name'],
+                   "new user first name mismatch")
+      assert_equal(u.last_name, props['new_attributes']['last_name'],
+                   "new user first name mismatch")
+    end
+  end
+
+  test "updating a virtual machine makes a log" do
+    set_user_from_auth :admin_trustedclient
+    vm = virtual_machines(:testvm)
+    orig_etag = vm.etag
+    vm.hostname = 'testvm.testshell'
+    vm.save!
+    assert_logged(vm, :update) do |props|
+      assert_equal(orig_etag, props['old_etag'], "updated VM old etag mismatch")
+      assert_equal(vm.etag, props['new_etag'], "updated VM new etag mismatch")
+      assert_equal('testvm.shell', props['old_attributes']['hostname'],
+                   "updated VM old name mismatch")
+      assert_equal('testvm.testshell', props['new_attributes']['hostname'],
+                   "updated VM new name mismatch")
+    end
+  end
+
+  test "destroying an authorization makes a log" do
+    set_user_from_auth :admin_trustedclient
+    auth = api_client_authorizations(:spectator)
+    orig_etag = auth.etag
+    orig_attrs = auth.attributes
+    orig_attrs.delete 'api_token'
+    auth.destroy
+    assert_logged(auth, :destroy) do |props|
+      assert_equal(orig_etag, props['old_etag'], "destroyed auth etag mismatch")
+      assert_equal(orig_attrs, props['old_attributes'],
+                   "destroyed auth attributes mismatch")
+    end
+  end
+
+  test "saving an unchanged client still makes a log" do
+    set_user_from_auth :admin_trustedclient
+    client = api_clients(:untrusted)
+    client.is_trusted = client.is_trusted
+    client.save!
+    assert_logged(client, :update) do |props|
+      ['old', 'new'].each do |age|
+        assert_equal(client.etag, props["#{age}_etag"],
+                     "unchanged client #{age} etag mismatch")
+        assert_equal(client.attributes, props["#{age}_attributes"],
+                     "unchanged client #{age} attributes mismatch")
+      end
+    end
+  end
+
+  test "updating a group twice makes two logs" do
+    set_user_from_auth :admin_trustedclient
+    group = groups(:empty_lonely_group)
+    name1 = group.name
+    name2 = "#{name1} under test"
+    group.name = name2
+    group.save!
+    assert_logged(group, :update) do |props|
+      assert_equal(name1, props['old_attributes']['name'],
+                   "group start name mismatch")
+      assert_equal(name2, props['new_attributes']['name'],
+                   "group updated name mismatch")
+    end
+    group.name = name1
+    group.save!
+    assert_logged(group, :update) do |props|
+      assert_equal(name2, props['old_attributes']['name'],
+                   "group pre-revert name mismatch")
+      assert_equal(name1, props['new_attributes']['name'],
+                   "group final name mismatch")
+    end
+  end
+
+  test "making a log doesn't get logged" do
+    set_user_from_auth :active_trustedclient
+    log = Log.new
+    log.save!
+    assert_equal(0, get_logs_about(log).size, "made a Log about a Log")
+  end
+
+  test "non-admins can't modify or delete logs" do
+    set_user_from_auth :active_trustedclient
+    log = Log.new(summary: "immutable log test")
+    assert_nothing_raised { log.save! }
+    log.summary = "log mutation test should fail"
+    assert_raise(ArvadosModel::PermissionDeniedError) { log.save! }
+    assert_raise(ArvadosModel::PermissionDeniedError) { log.destroy }
+  end
+
+  test "admins can modify and delete logs" do
+    set_user_from_auth :admin_trustedclient
+    log = Log.new(summary: "admin log mutation test")
+    assert_nothing_raised { log.save! }
+    log.summary = "admin mutated log test"
+    assert_nothing_raised { log.save! }
+    assert_nothing_raised { log.destroy }
+  end
+
+  test "failure saving log causes failure saving object" do
+    Log.class_eval do
+      alias_method :_orig_validations, :perform_validations
+      def perform_validations(options)
+        false
+      end
+    end
+    begin
+      set_user_from_auth :active_trustedclient
+      user = users(:active)
+      user.first_name = 'Test'
+      assert_raise(ActiveRecord::RecordInvalid) { user.save! }
+    ensure
+      Log.class_eval do
+        alias_method :perform_validations, :_orig_validations
+      end
+    end
+  end
+
+  test "don't log changes only to ApiClientAuthorization.last_used_*" do
+    set_user_from_auth :admin_trustedclient
+    auth = api_client_authorizations(:spectator)
+    start_log_count = get_logs_about(auth).size
+    auth.last_used_at = Time.now
+    auth.last_used_by_ip_address = '::1'
+    auth.save!
+    assert_equal(start_log_count, get_logs_about(auth).size,
+                 "log count changed after 'using' ApiClientAuthorization")
+    auth.created_by_ip_address = '::1'
+    auth.save!
+    assert_logged(auth, :update)
+  end
+
+  test "token isn't included in ApiClientAuthorization logs" do
+    set_user_from_auth :admin_trustedclient
+    auth = ApiClientAuthorization.new
+    auth.user = users(:spectator)
+    auth.api_client = api_clients(:untrusted)
+    auth.save!
+    assert_auth_logged_with_clean_properties(auth, :create)
+    auth.expires_at = Time.now
+    auth.save!
+    assert_auth_logged_with_clean_properties(auth, :update)
+    auth.destroy
+    assert_auth_logged_with_clean_properties(auth, :destroy)
+  end
+
+  test "use ownership and permission links to determine which logs a user can see" do
+    c = Log.readable_by(users(:admin)).order("id asc").each.to_a
+    assert_equal 6, c.size
+    assert_equal 1, c[0].id # no-op
+    assert_equal 2, c[1].id # admin changes repository foo, which is owned by active user
+    assert_equal 3, c[2].id # admin changes specimen owned_by_spectator
+    assert_equal 4, c[3].id # foo collection added, readable by active through link
+    assert_equal 5, c[4].id # baz collection added, readable by active and spectator through group 'all users' group membership
+    assert_equal 6, c[5].id # log_owned_by_active
+
+    c = Log.readable_by(users(:active)).order("id asc").each.to_a
+    assert_equal 4, c.size
+    assert_equal 2, c[0].id # admin changes repository foo, which is owned by active user
+    assert_equal 4, c[1].id # foo collection added, readable by active through link
+    assert_equal 5, c[2].id # baz collection added, readable by active and spectator through group 'all users' group membership
+    assert_equal 6, c[3].id # log_owned_by_active
+
+    c = Log.readable_by(users(:spectator)).order("id asc").each.to_a
+    assert_equal 2, c.size
+    assert_equal 3, c[0].id # admin changes specimen owned_by_spectator
+    assert_equal 5, c[1].id # baz collection added, readable by active and spectator through group 'all users' group membership
+  end
+end
diff --git a/services/api/test/unit/node_test.rb b/services/api/test/unit/node_test.rb
new file mode 100644 (file)
index 0000000..37e95db
--- /dev/null
@@ -0,0 +1,29 @@
+require 'test_helper'
+
+class NodeTest < ActiveSupport::TestCase
+  def ping_node(node_name, ping_data)
+    set_user_from_auth :admin
+    node = nodes(node_name)
+    node.ping({ping_secret: node.info['ping_secret'],
+                ip: node.ip_address}.merge(ping_data))
+    node
+  end
+
+  test "pinging a node can add and update stats" do
+    node = ping_node(:idle, {total_cpu_cores: '12', total_ram_mb: '512'})
+    assert_equal(12, node.properties['total_cpu_cores'])
+    assert_equal(512, node.properties['total_ram_mb'])
+  end
+
+  test "stats disappear if not in a ping" do
+    node = ping_node(:idle, {total_ram_mb: '256'})
+    refute_includes(node.properties, 'total_cpu_cores')
+    assert_equal(256, node.properties['total_ram_mb'])
+  end
+
+  test "worker state is down for node with no slot" do
+    node = nodes(:was_idle_now_down)
+    assert_nil node.slot_number, "fixture is not what I expected"
+    assert_equal 'down', node.crunch_worker_state, "wrong worker state"
+  end
+end
diff --git a/services/api/test/unit/owner_test.rb b/services/api/test/unit/owner_test.rb
new file mode 100644 (file)
index 0000000..c7f9776
--- /dev/null
@@ -0,0 +1,126 @@
+require 'test_helper'
+
+# Test referential integrity: ensure we cannot leave any object
+# without owners by deleting a user or group.
+#
+# "o" is an owner.
+# "i" is an item.
+
+class OwnerTest < ActiveSupport::TestCase
+  fixtures :users, :groups, :specimens
+
+  setup do
+    set_user_from_auth :admin_trustedclient
+  end
+
+  User.all
+  Group.all
+  [User, Group].each do |o_class|
+    test "create object with legit #{o_class} owner" do
+      o = o_class.create!
+      i = Specimen.create(owner_uuid: o.uuid)
+      assert i.valid?, "new item should pass validation"
+      assert i.uuid, "new item should have an ID"
+      assert Specimen.where(uuid: i.uuid).any?, "new item should really be in DB"
+    end
+
+    test "create object with non-existent #{o_class} owner" do
+      assert_raises(ActiveRecord::RecordInvalid,
+                    "create should fail with random owner_uuid") do
+        i = Specimen.create!(owner_uuid: o_class.generate_uuid)
+      end
+
+      i = Specimen.create(owner_uuid: o_class.generate_uuid)
+      assert !i.valid?, "object with random owner_uuid should not be valid?"
+
+      i = Specimen.new(owner_uuid: o_class.generate_uuid)
+      assert !i.valid?, "new item should not pass validation"
+      assert !i.uuid, "new item should not have an ID"
+    end
+
+    [User, Group].each do |new_o_class|
+      test "change owner from legit #{o_class} to legit #{new_o_class} owner" do
+        o = o_class.create!
+        i = Specimen.create!(owner_uuid: o.uuid)
+        new_o = new_o_class.create!
+        assert(Specimen.where(uuid: i.uuid).any?,
+               "new item should really be in DB")
+        assert(i.update_attributes(owner_uuid: new_o.uuid),
+               "should change owner_uuid from #{o.uuid} to #{new_o.uuid}")
+      end
+    end
+
+    test "delete #{o_class} that owns nothing" do
+      o = o_class.create!
+      assert(o_class.where(uuid: o.uuid).any?,
+             "new #{o_class} should really be in DB")
+      assert(o.destroy, "should delete #{o_class} that owns nothing")
+      assert_equal(false, o_class.where(uuid: o.uuid).any?,
+                   "#{o.uuid} should not be in DB after deleting")
+    end
+
+    test "change uuid of #{o_class} that owns nothing" do
+      # (we're relying on our admin credentials here)
+      o = o_class.create!
+      assert(o_class.where(uuid: o.uuid).any?,
+             "new #{o_class} should really be in DB")
+      old_uuid = o.uuid
+      new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+      assert(o.update_attributes(uuid: new_uuid),
+             "should change #{o_class} uuid from #{old_uuid} to #{new_uuid}")
+      assert_equal(false, o_class.where(uuid: old_uuid).any?,
+                   "#{old_uuid} should disappear when renamed to #{new_uuid}")
+    end
+  end
+
+  ['users(:active)', 'groups(:aproject)'].each do |ofixt|
+    test "delete #{ofixt} that owns other objects" do
+      o = eval ofixt
+      assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+                   "need something to be owned by #{o.uuid} for this test")
+
+      assert_raises(ActiveRecord::DeleteRestrictionError,
+                    "should not delete #{ofixt} that owns objects") do
+        o.destroy
+      end
+    end
+
+    test "change uuid of #{ofixt} that owns other objects" do
+      o = eval ofixt
+      assert_equal(true, Specimen.where(owner_uuid: o.uuid).any?,
+                   "need something to be owned by #{o.uuid} for this test")
+      old_uuid = o.uuid
+      new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+      assert(!o.update_attributes(uuid: new_uuid),
+             "should not change uuid of #{ofixt} that owns objects")
+    end
+  end
+
+  test "delete User that owns self" do
+    o = User.create!
+    assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+    assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+                 "setting owner to self should work")
+    assert(o.destroy, "should delete User that owns self")
+    assert_equal(false, User.where(uuid: o.uuid).any?,
+                 "#{o.uuid} should not be in DB after deleting")
+  end
+
+  test "change uuid of User that owns self" do
+    o = User.create!
+    assert User.where(uuid: o.uuid).any?, "new User should really be in DB"
+    assert_equal(true, o.update_attributes(owner_uuid: o.uuid),
+                 "setting owner to self should work")
+    old_uuid = o.uuid
+    new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])
+    assert(o.update_attributes(uuid: new_uuid),
+           "should change uuid of User that owns self")
+    assert_equal(false, User.where(uuid: old_uuid).any?,
+                 "#{old_uuid} should not be in DB after deleting")
+    assert_equal(true, User.where(uuid: new_uuid).any?,
+                 "#{new_uuid} should be in DB after renaming")
+    assert_equal(new_uuid, User.where(uuid: new_uuid).first.owner_uuid,
+                 "#{new_uuid} should be its own owner in DB after renaming")
+  end
+
+end
diff --git a/services/api/test/unit/permission_test.rb b/services/api/test/unit/permission_test.rb
new file mode 100644 (file)
index 0000000..20cffda
--- /dev/null
@@ -0,0 +1,370 @@
+require 'test_helper'
+
+class PermissionTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  test "Grant permissions on an object I own" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create
+    assert ob.save
+
+    # Ensure I have permission to manage this group even when its owner changes
+    perm_link = Link.create(tail_uuid: users(:active).uuid,
+                            head_uuid: ob.uuid,
+                            link_class: 'permission',
+                            name: 'can_manage')
+    assert perm_link.save, "should give myself permission on my own object"
+  end
+
+  test "Delete permission links when deleting an object" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_manage')
+    ob_uuid = ob.uuid
+    assert ob.destroy, "Could not destroy object with 1 permission link"
+    assert_empty(Link.where(head_uuid: ob_uuid),
+                 "Permission link was not deleted when object was deleted")
+  end
+
+  test "permission links owned by root" do
+    set_user_from_auth :active_trustedclient
+    ob = Specimen.create!
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_read')
+    assert_equal system_user_uuid, perm_link.owner_uuid
+  end
+
+  test "readable_by" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_read')
+    assert Specimen.readable_by(users(:active)).where(uuid: ob.uuid).any?, "user does not have read permission"
+  end
+
+  test "writable_by" do
+    set_user_from_auth :active_trustedclient
+
+    ob = Specimen.create!
+    Link.create!(tail_uuid: users(:active).uuid,
+                 head_uuid: ob.uuid,
+                 link_class: 'permission',
+                 name: 'can_write')
+    assert ob.writable_by.include?(users(:active).uuid), "user does not have write permission"
+  end
+
+  test "writable_by reports requesting user's own uuid for a writable project" do
+    invited_to_write = users(:project_viewer)
+    group = groups(:asubproject)
+
+    # project_view can read, but cannot see write or see writers list
+    set_user_from_auth :project_viewer
+    assert_equal([group.owner_uuid],
+                 group.writable_by,
+                 "writers list should just have owner_uuid")
+
+    # allow project_viewer to write for the remainder of the test
+    set_user_from_auth :admin
+    Link.create!(tail_uuid: invited_to_write.uuid,
+                 head_uuid: group.uuid,
+                 link_class: 'permission',
+                 name: 'can_write')
+    group.permissions.reload
+
+    # project_viewer should see self in writers list (but not all writers)
+    set_user_from_auth :project_viewer
+    assert_not_nil(group.writable_by,
+                    "can write but cannot see writers list")
+    assert_includes(group.writable_by, invited_to_write.uuid,
+                    "self missing from writers list")
+    assert_includes(group.writable_by, group.owner_uuid,
+                    "project owner missing from writers list")
+    refute_includes(group.writable_by, users(:active).uuid,
+                    "saw :active user in writers list")
+
+    # active user should see full writers list
+    set_user_from_auth :active
+    assert_includes(group.writable_by, invited_to_write.uuid,
+                    "permission just added, but missing from writers list")
+
+    # allow project_viewer to manage for the remainder of the test
+    set_user_from_auth :admin
+    Link.create!(tail_uuid: invited_to_write.uuid,
+                 head_uuid: group.uuid,
+                 link_class: 'permission',
+                 name: 'can_manage')
+    # invite another writer we can test for
+    Link.create!(tail_uuid: users(:spectator).uuid,
+                 head_uuid: group.uuid,
+                 link_class: 'permission',
+                 name: 'can_write')
+    group.permissions.reload
+
+    set_user_from_auth :project_viewer
+    assert_not_nil(group.writable_by,
+                    "can manage but cannot see writers list")
+    assert_includes(group.writable_by, users(:spectator).uuid,
+                    ":spectator missing from writers list")
+  end
+
+  test "user owns group, group can_manage object's group, user can add permissions" do
+    set_user_from_auth :admin
+
+    owner_grp = Group.create!(owner_uuid: users(:active).uuid)
+
+    sp_grp = Group.create!
+    sp = Specimen.create!(owner_uuid: sp_grp.uuid)
+
+    manage_perm = Link.create!(link_class: 'permission',
+                               name: 'can_manage',
+                               tail_uuid: owner_grp.uuid,
+                               head_uuid: sp_grp.uuid)
+
+    # active user owns owner_grp, which has can_manage permission on sp_grp
+    # user should be able to add permissions on sp.
+    set_user_from_auth :active_trustedclient
+    test_perm = Link.create(tail_uuid: users(:active).uuid,
+                            head_uuid: sp.uuid,
+                            link_class: 'permission',
+                            name: 'can_write')
+    test_uuid = test_perm.uuid
+    assert test_perm.save, "could not save new permission on target object"
+    assert test_perm.destroy, "could not delete new permission on target object"
+  end
+
+  # TODO(twp): fix bug #3091, which should fix this test.
+  test "can_manage permission on a non-group object" do
+    skip
+    set_user_from_auth :admin
+
+    ob = Specimen.create!
+    # grant can_manage permission to active
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_manage')
+    # ob is owned by :admin, the link is owned by root
+    assert_equal users(:admin).uuid, ob.owner_uuid
+    assert_equal system_user_uuid, perm_link.owner_uuid
+
+    # user "active" can modify the permission link
+    set_user_from_auth :active_trustedclient
+    perm_link.properties["foo"] = 'bar'
+    assert perm_link.save, "could not save modified link"
+
+    assert_equal 'bar', perm_link.properties['foo'], "link properties do not include foo = bar"
+  end
+
+  test "user without can_manage permission may not modify permission link" do
+    set_user_from_auth :admin
+
+    ob = Specimen.create!
+    # grant can_manage permission to active
+    perm_link = Link.create!(tail_uuid: users(:active).uuid,
+                             head_uuid: ob.uuid,
+                             link_class: 'permission',
+                             name: 'can_read')
+    # ob is owned by :admin, the link is owned by root
+    assert_equal ob.owner_uuid, users(:admin).uuid
+    assert_equal perm_link.owner_uuid, system_user_uuid
+
+    # user "active" may not modify the permission link
+    set_user_from_auth :active_trustedclient
+    perm_link.name = 'can_manage'
+    assert_raises ArvadosModel::PermissionDeniedError do
+      perm_link.save
+    end
+  end
+
+  test "manager user gets permission to minions' articles via can_manage link" do
+    manager = create :active_user, first_name: "Manage", last_name: "Er"
+    minion = create :active_user, first_name: "Min", last_name: "Ion"
+    minions_specimen = act_as_user minion do
+      Specimen.create!
+    end
+    # Manager creates a group. (Make sure it doesn't magically give
+    # anyone any additional permissions.)
+    g = nil
+    act_as_user manager do
+      g = create :group, name: "NoBigSecret Lab"
+      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                   "saw a user I shouldn't see")
+      assert_raises(ArvadosModel::PermissionDeniedError,
+                    ActiveRecord::RecordInvalid,
+                    "gave can_read permission to a user I shouldn't see") do
+        create(:permission_link,
+               name: 'can_read', tail_uuid: minion.uuid, head_uuid: g.uuid)
+      end
+      %w(can_manage can_write can_read).each do |perm_type|
+        assert_raises(ArvadosModel::PermissionDeniedError,
+                      ActiveRecord::RecordInvalid,
+                      "escalated privileges") do
+          create(:permission_link,
+                 name: perm_type, tail_uuid: g.uuid, head_uuid: minion.uuid)
+        end
+      end
+      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                   "manager saw minion too soon")
+      assert_empty(User.readable_by(minion).where(uuid: manager.uuid),
+                   "minion saw manager too soon")
+      assert_empty(Group.readable_by(minion).where(uuid: g.uuid),
+                   "minion saw manager's new NoBigSecret Lab group too soon")
+
+      # Manager declares everybody on the system should be able to see
+      # the NoBigSecret Lab group.
+      create(:permission_link,
+             name: 'can_read',
+             tail_uuid: 'zzzzz-j7d0g-fffffffffffffff',
+             head_uuid: g.uuid)
+      # ...but nobody has joined the group yet. Manager still can't see
+      # minion.
+      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                   "manager saw minion too soon")
+    end
+
+    act_as_user minion do
+      # Minion can see the group.
+      assert_not_empty(Group.readable_by(minion).where(uuid: g.uuid),
+                       "minion could not see the NoBigSecret Lab group")
+      # Minion joins the group.
+      create(:permission_link,
+             name: 'can_read',
+             tail_uuid: g.uuid,
+             head_uuid: minion.uuid)
+    end
+
+    act_as_user manager do
+      # Now, manager can see minion.
+      assert_not_empty(User.readable_by(manager).where(uuid: minion.uuid),
+                       "manager could not see minion")
+      # But cannot obtain further privileges this way.
+      assert_raises(ArvadosModel::PermissionDeniedError,
+                    "escalated privileges") do
+        create(:permission_link,
+               name: 'can_manage', tail_uuid: manager.uuid, head_uuid: minion.uuid)
+      end
+      assert_empty(Specimen
+                     .readable_by(manager)
+                     .where(uuid: minions_specimen.uuid),
+                   "manager saw the minion's private stuff")
+      assert_raises(ArvadosModel::PermissionDeniedError,
+                   "manager could update minion's private stuff") do
+        minions_specimen.update_attributes(properties: {'x' => 'y'})
+      end
+    end
+
+    act_as_system_user do
+      # Root can give Manager more privileges over Minion.
+      create(:permission_link,
+             name: 'can_manage', tail_uuid: g.uuid, head_uuid: minion.uuid)
+    end
+
+    act_as_user manager do
+      # Now, manager can read and write Minion's stuff.
+      assert_not_empty(Specimen
+                         .readable_by(manager)
+                         .where(uuid: minions_specimen.uuid),
+                       "manager could not find minion's specimen by uuid")
+      assert_equal(true,
+                   minions_specimen.update_attributes(properties: {'x' => 'y'}),
+                   "manager could not update minion's specimen object")
+    end
+  end
+
+  test "users with bidirectional read permission in group can see each other, but cannot see each other's private articles" do
+    a = create :active_user, first_name: "A"
+    b = create :active_user, first_name: "B"
+    other = create :active_user, first_name: "OTHER"
+    act_as_system_user do
+      g = create :group
+      [a,b].each do |u|
+        create(:permission_link,
+               name: 'can_read', tail_uuid: u.uuid, head_uuid: g.uuid)
+        create(:permission_link,
+               name: 'can_read', head_uuid: u.uuid, tail_uuid: g.uuid)
+      end
+    end
+    a_specimen = act_as_user a do
+      Specimen.create!
+    end
+    assert_not_empty(Specimen.readable_by(a).where(uuid: a_specimen.uuid),
+                     "A cannot read own Specimen, following test probably useless.")
+    assert_empty(Specimen.readable_by(b).where(uuid: a_specimen.uuid),
+                 "B can read A's Specimen")
+    [a,b].each do |u|
+      assert_empty(User.readable_by(u).where(uuid: other.uuid),
+                   "#{u.first_name} can see OTHER in the user list")
+      assert_empty(User.readable_by(other).where(uuid: u.uuid),
+                   "OTHER can see #{u.first_name} in the user list")
+      act_as_user u do
+        assert_raises ArvadosModel::PermissionDeniedError, "wrote without perm" do
+          other.update_attributes!(prefs: {'pwned' => true})
+        end
+        assert_equal(true, u.update_attributes!(prefs: {'thisisme' => true}),
+                     "#{u.first_name} can't update its own prefs")
+      end
+      act_as_user other do
+        assert_raises(ArvadosModel::PermissionDeniedError,
+                        "OTHER wrote #{u.first_name} without perm") do
+          u.update_attributes!(prefs: {'pwned' => true})
+        end
+        assert_equal(true, other.update_attributes!(prefs: {'thisisme' => true}),
+                     "OTHER can't update its own prefs")
+      end
+    end
+  end
+
+  test "cannot create with owner = unwritable user" do
+    set_user_from_auth :rominiadmin
+    assert_raises ArvadosModel::PermissionDeniedError, "created with owner = unwritable user" do
+      Specimen.create!(owner_uuid: users(:active).uuid)
+    end
+  end
+
+  test "cannot change owner to unwritable user" do
+    set_user_from_auth :rominiadmin
+    ob = Specimen.create!
+    assert_raises ArvadosModel::PermissionDeniedError, "changed owner to unwritable user" do
+      ob.update_attributes!(owner_uuid: users(:active).uuid)
+    end
+  end
+
+  test "cannot create with owner = unwritable group" do
+    set_user_from_auth :rominiadmin
+    assert_raises ArvadosModel::PermissionDeniedError, "created with owner = unwritable group" do
+      Specimen.create!(owner_uuid: groups(:aproject).uuid)
+    end
+  end
+
+  test "cannot change owner to unwritable group" do
+    set_user_from_auth :rominiadmin
+    ob = Specimen.create!
+    assert_raises ArvadosModel::PermissionDeniedError, "changed owner to unwritable group" do
+      ob.update_attributes!(owner_uuid: groups(:aproject).uuid)
+    end
+  end
+
+  test "active user cannot write admin's repo" do
+    set_user_from_auth :active
+    assert_raises ArvadosModel::PermissionDeniedError, "pwned" do
+      repositories(:repository3).update_attributes(name: "kilroy")
+    end
+  end
+
+  test "active user cannot change repo name via can_manage permission" do
+    set_user_from_auth :active
+    assert_raises ArvadosModel::PermissionDeniedError, "pwned" do
+      repositories(:foo).update_attributes(name: "arvados")
+    end
+  end
+end
diff --git a/services/api/test/unit/pipeline_instance_test.rb b/services/api/test/unit/pipeline_instance_test.rb
new file mode 100644 (file)
index 0000000..93354f8
--- /dev/null
@@ -0,0 +1,113 @@
+require 'test_helper'
+
+class PipelineInstanceTest < ActiveSupport::TestCase
+
+  test "check active and success for a pipeline in new state" do
+    pi = pipeline_instances :new_pipeline
+
+    assert_equal 'New', pi.state, 'expected state to be New for :new_pipeline'
+
+    # save the pipeline and expect state to be New
+    Thread.current[:user] = users(:admin)
+
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::New, pi.state, 'expected state to be New for new pipeline'
+  end
+
+  test "check active and success for a newly created pipeline" do
+    set_user_from_auth :active
+
+    pi = PipelineInstance.create(state: 'Ready')
+    pi.save
+
+    assert pi.valid?, 'expected newly created empty pipeline to be valid ' + pi.errors.messages.to_s
+    assert_equal 'Ready', pi.state, 'expected state to be Ready for a new empty pipeline'
+  end
+
+  test "update attributes for pipeline" do
+    Thread.current[:user] = users(:admin)
+
+    pi = pipeline_instances :new_pipeline
+
+    # add a component with no input and expect state to be New
+    component = {'script_parameters' => {"input_not_provided" => {"required" => true}}}
+    pi.components['first'] = component
+    components = pi.components
+    pi.update_attribute 'components', pi.components
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::New, pi.state, 'expected state to be New after adding component with input'
+    assert_equal pi.components.size, 1, 'expected one component'
+
+    # add a component with no input not required
+    component = {'script_parameters' => {"input_not_provided" => {"required" => false}}}
+    pi.components['first'] = component
+    components = pi.components
+    pi.update_attribute 'components', pi.components
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Ready, pi.state, 'expected state to be Ready after adding component with input'
+    assert_equal pi.components.size, 1, 'expected one component'
+
+    # add a component with input and expect state to become Ready
+    component = {'script_parameters' => {"input" => "yyyad4b39ca5a924e481008009d94e32+210"}}
+    pi.components['first'] = component
+    components = pi.components
+    pi.update_attribute 'components', pi.components
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Ready, pi.state, 'expected state to be Ready after adding component with input'
+    assert_equal pi.components.size, 1, 'expected one component'
+
+    pi.state = PipelineInstance::RunningOnServer
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::RunningOnServer, pi.state, 'expected state to be RunningOnServer after updating state to RunningOnServer'
+
+    pi.state = PipelineInstance::Paused
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Paused, pi.state, 'expected state to be Paused after updating state to Paused'
+
+    pi.state = PipelineInstance::Complete
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Complete, pi.state, 'expected state to be Complete after updating state to Complete'
+
+    pi.state = 'bogus'
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Complete, pi.state, 'expected state to be unchanged with set to a bogus value'
+
+    pi.state = PipelineInstance::Failed
+    pi.save
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::Failed, pi.state, 'expected state to be Failed after updating state to Failed'
+  end
+
+  test "update attributes for pipeline with two components" do
+    pi = pipeline_instances :new_pipeline
+
+    # add two components, one with input and one with no input and expect state to be New
+    component1 = {'script_parameters' => {"something" => "xxxad4b39ca5a924e481008009d94e32+210", "input" => "c1bad4b39ca5a924e481008009d94e32+210"}}
+    component2 = {'script_parameters' => {"something_else" => "xxxad4b39ca5a924e481008009d94e32+210", "input_missing" => {"required" => true}}}
+    pi.components['first'] = component1
+    pi.components['second'] = component2
+    components = pi.components
+
+    Thread.current[:user] = users(:admin)
+    pi.update_attribute 'components', pi.components
+
+    pi = PipelineInstance.find_by_uuid 'zzzzz-d1hrv-f4gneyn6br1xize'
+    assert_equal PipelineInstance::New, pi.state, 'expected state to be New after adding component with input'
+    assert_equal pi.components.size, 2, 'expected two components'
+  end
+
+  [:has_component_with_no_script_parameters,
+   :has_component_with_empty_script_parameters].each do |pi_name|
+    test "update pipeline that #{pi_name}" do
+      pi = pipeline_instances pi_name
+
+      Thread.current[:user] = users(:active)
+      assert_equal PipelineInstance::Ready, pi.state
+    end
+  end
+end
diff --git a/services/api/test/unit/pipeline_template_test.rb b/services/api/test/unit/pipeline_template_test.rb
new file mode 100644 (file)
index 0000000..ab21010
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class PipelineTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/repository_test.rb b/services/api/test/unit/repository_test.rb
new file mode 100644 (file)
index 0000000..327170c
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class RepositoryTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/specimen_test.rb b/services/api/test/unit/specimen_test.rb
new file mode 100644 (file)
index 0000000..a9abc8c
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class SpecimenTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/trait_test.rb b/services/api/test/unit/trait_test.rb
new file mode 100644 (file)
index 0000000..45df2ed
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class TraitTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/unit/user_notifier_test.rb b/services/api/test/unit/user_notifier_test.rb
new file mode 100644 (file)
index 0000000..b280ae7
--- /dev/null
@@ -0,0 +1,24 @@
+require 'test_helper'
+
+class UserNotifierTest < ActionMailer::TestCase
+
+  # Send the email, then test that it got queued
+  test "account is setup" do
+    user = users :active
+    email = UserNotifier.account_is_setup user
+
+    assert_not_nil email
+
+    # Test the body of the sent email contains what we expect it to
+    assert_equal Rails.configuration.user_notifier_email_from, email.from.first
+    assert_equal user.email, email.to.first
+    assert_equal 'Welcome to Curoverse', email.subject
+    assert (email.body.to_s.include? 'Your Arvados account has been set up'),
+        'Expected Your Arvados account has been set up in email body'
+    assert (email.body.to_s.include? user.email),
+        'Expected user email in email body'
+    assert (email.body.to_s.include? Rails.configuration.workbench_address),
+        'Expected workbench url in email body'
+  end
+
+end
diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb
new file mode 100644 (file)
index 0000000..9bcb011
--- /dev/null
@@ -0,0 +1,657 @@
+require 'test_helper'
+
+class UserTest < ActiveSupport::TestCase
+  include CurrentApiClient
+
+  # The fixture services/api/test/fixtures/users.yml serves as the input for this test case
+  setup do
+    # Make sure system_user exists before making "pre-test users" list
+    system_user
+  end
+
+  [[false, 'foo@example.com', true, nil],
+   [false, 'bar@example.com', nil, true],
+   [true, 'foo@example.com', true, nil],
+   [true, 'bar@example.com', true, true],
+   [false, false, nil, nil],
+   [true, false, true, nil]
+  ].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin|
+    # In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'.
+    test "auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}" do
+
+      if auto_admin_first_user_config
+        # This test requires no admin users exist (except for the system user)
+        users(:admin).delete
+        @all_users = User.where("uuid not like '%-000000000000000'").where(:is_admin => true).find(:all)
+        assert_equal 0, @all_users.size, "No admin users should exist (except for the system user)"
+      end
+
+      Rails.configuration.auto_admin_first_user = auto_admin_first_user_config
+      Rails.configuration.auto_admin_user = auto_admin_user_config
+
+      # See if the foo user has is_admin
+      foo = User.new
+      foo.first_name = 'foo'
+      foo.email = 'foo@example.com'
+
+      act_as_system_user do
+        foo.save!
+      end
+
+      foo = User.find(foo.id)   # get the user back
+      assert_equal foo_should_be_admin, foo.is_admin, "is_admin is wrong for user foo"
+      assert_equal 'foo', foo.first_name
+
+      # See if the bar user has is_admin
+      bar = User.new
+      bar.first_name = 'bar'
+      bar.email = 'bar@example.com'
+
+      act_as_system_user do
+        bar.save!
+      end
+
+      bar = User.find(bar.id)   # get the user back
+      assert_equal bar_should_be_admin, bar.is_admin, "is_admin is wrong for user bar"
+      assert_equal 'bar', bar.first_name
+
+      # A subsequent user with the bar@example.com address should never be
+      # elevated to admin
+      bar2 = User.new
+      bar2.first_name = 'bar2'
+      bar2.email = 'bar@example.com'
+
+      act_as_system_user do
+        bar2.save!
+      end
+
+      bar2 = User.find(bar2.id)   # get the user back
+      assert !bar2.is_admin, "is_admin is wrong for user bar2"
+      assert_equal 'bar2', bar2.first_name
+
+      # An ordinary new user should not be elevated to admin
+      baz = User.new
+      baz.first_name = 'baz'
+      baz.email = 'baz@example.com'
+
+      act_as_system_user do
+        baz.save!
+      end
+
+      baz = User.find(baz.id)   # get the user back
+      assert !baz.is_admin
+      assert_equal 'baz', baz.first_name
+
+    end
+  end
+
+  test "check non-admin active user properties" do
+    @active_user = users(:active)     # get the active user
+    assert !@active_user.is_admin, 'is_admin should not be set for a non-admin user'
+    assert @active_user.is_active, 'user should be active'
+    assert @active_user.is_invited, 'is_invited should be set'
+    assert_not_nil @active_user.prefs, "user's preferences should be non-null, but may be size zero"
+    assert (@active_user.can? :read=>"#{@active_user.uuid}"), "user should be able to read own object"
+    assert (@active_user.can? :write=>"#{@active_user.uuid}"), "user should be able to write own object"
+    assert (@active_user.can? :manage=>"#{@active_user.uuid}"), "user should be able to manage own object"
+
+    assert @active_user.groups_i_can(:read).size > 0, "active user should be able read at least one group"
+
+    # non-admin user cannot manage or write other user objects
+    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user
+    assert !(@active_user.can? :read=>"#{@uninvited_user.uuid}")
+    assert !(@active_user.can? :write=>"#{@uninvited_user.uuid}")
+    assert !(@active_user.can? :manage=>"#{@uninvited_user.uuid}")
+  end
+
+  test "check admin user properties" do
+    @admin_user = users(:admin)     # get the admin user
+    assert @admin_user.is_admin, 'is_admin should be set for admin user'
+    assert @admin_user.is_active, 'admin user cannot be inactive'
+    assert @admin_user.is_invited, 'is_invited should be set'
+    assert_not_nil @admin_user.uuid.size, "user's uuid should be non-null"
+    assert_not_nil @admin_user.prefs, "user's preferences should be non-null, but may be size zero"
+    assert @admin_user.identity_url.size > 0, "user's identity url is expected"
+    assert @admin_user.can? :read=>"#{@admin_user.uuid}"
+    assert @admin_user.can? :write=>"#{@admin_user.uuid}"
+    assert @admin_user.can? :manage=>"#{@admin_user.uuid}"
+
+    assert @admin_user.groups_i_can(:read).size > 0, "admin active user should be able read at least one group"
+    assert @admin_user.groups_i_can(:write).size > 0, "admin active user should be able write to at least one group"
+    assert @admin_user.groups_i_can(:manage).size > 0, "admin active user should be able manage at least one group"
+
+    # admin user can also write or manage other users
+    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user
+    assert @admin_user.can? :read=>"#{@uninvited_user.uuid}"
+    assert @admin_user.can? :write=>"#{@uninvited_user.uuid}"
+    assert @admin_user.can? :manage=>"#{@uninvited_user.uuid}"
+  end
+
+  test "check inactive and uninvited user properties" do
+    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user
+    assert !@uninvited_user.is_admin, 'is_admin should not be set for a non-admin user'
+    assert !@uninvited_user.is_active, 'user should be inactive'
+    assert !@uninvited_user.is_invited, 'is_invited should not be set'
+    assert @uninvited_user.can? :read=>"#{@uninvited_user.uuid}"
+    assert @uninvited_user.can? :write=>"#{@uninvited_user.uuid}"
+    assert @uninvited_user.can? :manage=>"#{@uninvited_user.uuid}"
+
+    assert @uninvited_user.groups_i_can(:read).size == 1, "inactive and uninvited user can only read anonymous user group"
+    assert @uninvited_user.groups_i_can(:read).first.ends_with? 'anonymouspublic' , "inactive and uninvited user can only read anonymous user group"
+    assert @uninvited_user.groups_i_can(:write).size == 0, "inactive and uninvited user should not be able write to any groups"
+    assert @uninvited_user.groups_i_can(:manage).size == 0, "inactive and uninvited user should not be able manage any groups"
+  end
+
+  test "find user method checks" do
+    User.find(:all).each do |user|
+      assert_not_nil user.uuid, "non-null uuid expected for " + user.full_name
+    end
+
+    user = users(:active)     # get the active user
+
+    found_user = User.find(user.id)   # find a user by the row id
+
+    assert_equal found_user.full_name, user.first_name + ' ' + user.last_name
+    assert_equal found_user.identity_url, user.identity_url
+  end
+
+  test "full name should not contain spurious whitespace" do
+    set_user_from_auth :admin
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: 'foo@example.com' })
+
+    assert_equal '', user.full_name
+
+    user.first_name = 'John'
+    user.last_name = 'Smith'
+
+    assert_equal user.first_name + ' ' + user.last_name, user.full_name
+  end
+
+  test "create new user" do
+    set_user_from_auth :admin
+
+    @all_users = User.find(:all)
+
+    user = User.new
+    user.first_name = "first_name_for_newly_created_user"
+    user.save
+
+    # verify there is one extra user in the db now
+    assert_equal @all_users.size+1, User.find(:all).size
+
+    user = User.find(user.id)   # get the user back
+    assert_equal(user.first_name, 'first_name_for_newly_created_user')
+    assert_not_nil user.uuid, 'uuid should be set for newly created user'
+    assert_nil user.email, 'email should be null for newly created user, because it was not passed in'
+    assert_nil user.identity_url, 'identity_url should be null for newly created user, because it was not passed in'
+
+    user.first_name = 'first_name_for_newly_created_user_updated'
+    user.save
+    user = User.find(user.id)   # get the user back
+    assert_equal(user.first_name, 'first_name_for_newly_created_user_updated')
+  end
+
+  test "create new user with notifications" do
+    set_user_from_auth :admin
+
+    create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, false
+    create_user_and_verify_setup_and_notifications true, 'active-notify-address@example.com', [], nil, false
+    create_user_and_verify_setup_and_notifications true, [], [], nil, false
+    create_user_and_verify_setup_and_notifications false, 'active-notify-address@example.com', 'inactive-notify-address@example.com', nil, false
+    create_user_and_verify_setup_and_notifications false, [], 'inactive-notify-address@example.com', nil, false
+    create_user_and_verify_setup_and_notifications false, [], [], nil, false
+  end
+
+  [
+    [false, [], [], 'inactive-none@example.com', false, false, true],
+    [false, [], [], 'inactive-vm@example.com', true, false, true],
+    [false, [], [], 'inactive-repo@example.com', false, true, true],
+    [false, [], [], 'inactive-both@example.com', true, true, true],
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'active-none@example.com', false, false, true],
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'active-vm@example.com', true, false, true],
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'active-repo@example.com', false, true, true],
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'active-both@example.com', true, true, true],
+
+    [false, [], [], nil, true, true, false],
+
+    [false, [], [], 'arvados', true, true, false],
+    [false, [], [], 'arvados', true, false, false],   # blacklisted username
+    [false, [], [], 'arvados', false, false, true],   # since we are not creating repo and vm login, this blacklisted name is not a problem
+
+    [false, [], [], 'arvados@example.com', false, false, true],   # since we are not creating repo and vm login, this blacklisted name is not a problem
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'arvados@example.com', false, false, true],   # since we are not creating repo and vm login, this blacklisted name is not a problem
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'root@example.com', true, false, false], # blacklisted name
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', 'root@example.com', true, false, false], # blacklisted name
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'roo_t@example.com', false, true, true], # not blacklisted name
+
+    [false, [], [], '@example.com', true, false, false],  # incorrect format
+    [false, [], [], '@example.com', false, true, false],
+    [false, [], [], '@example.com', false, false, true],  # no repo and vm login, so no issue with email format
+
+    [false, [], [], '^^incorrect_format@example.com', true, true, false],
+
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_repo@example.com', true, true, true],  # existing repository name 'auto_setup_repo'
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_repo@example.com', true, false, true],  # existing repository name 'auto_setup_repo'
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_repo@example.com', false, true, true],  # existing repository name 'auto_setup_repo'
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_repo@example.com', false, false, true],  # existing repository name 'auto_setup_repo', but we are not creating repo or login link
+
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_vm_login@example.com', true, true, true], # existing vm login name
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_vm_login@example.com', true, false, true], # existing vm login name
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_vm_login@example.com', false, true, true], # existing vm login name
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', 'auto_setup_vm_login@example.com', false, false, true], # existing vm login name, but we are not creating repo or login link
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '*!*@example.com', true, false, false], # username is invalid format
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', '*!*@example.com', false, false, true], # since no repo and vm login, username is ok (not validated)
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '*!*@example.com', false, false, true], # since no repo and vm login, username is ok (not validated)
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '&4ad@example.com', true, true, false], # username is invalid format
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '&4ad@example.com', false, false, true], # no repo or vm login, so format not checked
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', '&4ad@example.com', true, true, false], # username is invalid format
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', '&4ad@example.com', false, false, true], # no repo or vm login, so format not checked
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '4ad@example.com', true, true, false], # username is invalid format
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '4ad@example.com', false, false, true], # no repo or vm login, so format not checked
+    [false, 'active-notify@example.com', 'inactive-notify@example.com', '4ad@example.com', false, false, true], # no repo or vm login, so format not checked
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '.foo@example.com', false, false, true], # no repo or vm login, so format not checked
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', '.foo@example.com', true, false, false], # invalid format
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'bar.@example.com', false, false, true], # no repo or vm login, so format not checked
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'bar.@example.com', true, false, false], # valid format
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'ice9@example.com', false, false, true], # no repo or vm login, so format not checked
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'ice9@example.com', true, false, true], # valid format
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'o_o@example.com', false, false, true], # no repo or vm login, so format not checked
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'o_o@example.com', true, false, true], # valid format
+
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'r00t@example.com', false, false, true], # no repo or vm login, so format not checked
+    [true, 'active-notify@example.com', 'inactive-notify@example.com', 'r00t@example.com', true, false, true], # valid format
+
+  ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, auto_setup_repo, ok_to_auto_setup|
+    test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
+      auto_setup_new_users = Rails.configuration.auto_setup_new_users
+      auto_setup_new_users_with_vm_uuid = Rails.configuration.auto_setup_new_users_with_vm_uuid
+      auto_setup_new_users_with_repository = Rails.configuration.auto_setup_new_users_with_repository
+
+      begin
+        set_user_from_auth :admin
+
+        Rails.configuration.auto_setup_new_users = true
+
+        if auto_setup_vm
+          Rails.configuration.auto_setup_new_users_with_vm_uuid = virtual_machines(:testvm)['uuid']
+        else
+          Rails.configuration.auto_setup_new_users_with_vm_uuid = false
+        end
+
+        Rails.configuration.auto_setup_new_users_with_repository = auto_setup_repo
+
+        create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, ok_to_auto_setup
+      ensure
+        Rails.configuration.auto_setup_new_users = auto_setup_new_users
+        Rails.configuration.auto_setup_new_users_with_vm_uuid = auto_setup_new_users_with_vm_uuid
+        Rails.configuration.auto_setup_new_users_with_repository = auto_setup_new_users_with_repository
+      end
+    end
+  end
+
+  test "update existing user" do
+    set_user_from_auth :active    # set active user as current user
+
+    @active_user = users(:active)     # get the active user
+
+    @active_user.first_name = "first_name_changed"
+    @active_user.save
+
+    @active_user = User.find(@active_user.id)   # get the user back
+    assert_equal(@active_user.first_name, 'first_name_changed')
+
+    # admin user also should be able to update the "active" user info
+    set_user_from_auth :admin # set admin user as current user
+    @active_user.first_name = "first_name_changed_by_admin_for_active_user"
+    @active_user.save
+
+    @active_user = User.find(@active_user.id)   # get the user back
+    assert_equal(@active_user.first_name, 'first_name_changed_by_admin_for_active_user')
+  end
+
+  test "delete a user and verify" do
+    @active_user = users(:active)     # get the active user
+    active_user_uuid = @active_user.uuid
+
+    set_user_from_auth :admin
+    @active_user.delete
+
+    found_deleted_user = false
+    User.find(:all).each do |user|
+      if user.uuid == active_user_uuid
+        found_deleted_user = true
+        break
+      end
+    end
+    assert !found_deleted_user, "found deleted user: "+active_user_uuid
+
+  end
+
+  test "create new user as non-admin user" do
+    set_user_from_auth :active
+
+    begin
+      user = User.new
+      user.save
+    rescue ArvadosModel::PermissionDeniedError => e
+    end
+    assert (e.message.include? 'PermissionDeniedError'),
+        'Expected PermissionDeniedError'
+  end
+
+  test "setup new user" do
+    set_user_from_auth :admin
+
+    email = 'foo@example.com'
+    openid_prefix = 'http://openid/prefix'
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+
+    vm = VirtualMachine.create
+
+    response = User.setup user, openid_prefix, 'test_repo', vm.uuid
+
+    resp_user = find_obj_in_resp response, 'User'
+    verify_user resp_user, email
+
+    oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+
+    verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
+        resp_user[:uuid]
+
+    assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
+        'expected identity_url_prefix not found for oid_login_perm'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+  end
+
+  test "setup new user with junk in database" do
+    set_user_from_auth :admin
+
+    email = 'foo@example.com'
+    openid_prefix = 'http://openid/prefix'
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+
+    vm = VirtualMachine.create
+
+    # Set up the bogus Link
+    bad_uuid = 'zzzzz-tpzed-xyzxyzxyzxyzxyz'
+
+    resp_link = Link.create ({tail_uuid: email, link_class: 'permission',
+        name: 'can_login', head_uuid: bad_uuid})
+    resp_link.save(validate: false)
+
+    verify_link resp_link, 'permission', 'can_login', email, bad_uuid
+
+    response = User.setup user, openid_prefix, 'test_repo', vm.uuid
+
+    resp_user = find_obj_in_resp response, 'User'
+    verify_user resp_user, email
+
+    oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+
+    verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
+        resp_user[:uuid]
+
+    assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
+        'expected identity_url_prefix not found for oid_login_perm'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+  end
+
+  test "setup new user in multiple steps" do
+    set_user_from_auth :admin
+
+    email = 'foo@example.com'
+    openid_prefix = 'http://openid/prefix'
+
+    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+
+    response = User.setup user, openid_prefix
+
+    resp_user = find_obj_in_resp response, 'User'
+    verify_user resp_user, email
+
+    oid_login_perm = find_obj_in_resp response, 'Link', 'arvados#user'
+    verify_link oid_login_perm, 'permission', 'can_login', resp_user[:email],
+        resp_user[:uuid]
+    assert_equal openid_prefix, oid_login_perm[:properties]['identity_url_prefix'],
+        'expected identity_url_prefix not found for oid_login_perm'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    # invoke setup again with repo_name
+    response = User.setup user, openid_prefix, 'test_repo'
+    resp_user = find_obj_in_resp response, 'User', nil
+    verify_user resp_user, email
+    assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    # invoke setup again with a vm_uuid
+    vm = VirtualMachine.create
+
+    response = User.setup user, openid_prefix, 'test_repo', vm.uuid
+
+    resp_user = find_obj_in_resp response, 'User', nil
+    verify_user resp_user, email
+    assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'
+
+    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+
+    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+  end
+
+  def find_obj_in_resp (response_items, object_type, head_kind=nil)
+    return_obj = nil
+    response_items.each { |x|
+      if !x
+        next
+      end
+
+      if object_type == 'User'
+        if ArvadosModel::resource_class_for_uuid(x['uuid']) == User
+          return_obj = x
+          break
+        end
+      else  # looking for a link
+        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+          return_obj = x
+          break
+        end
+      end
+    }
+    return return_obj
+  end
+
+  def verify_user (resp_user, email)
+    assert_not_nil resp_user, 'expected user object'
+    assert_not_nil resp_user['uuid'], 'expected user object'
+    assert_equal email, resp_user['email'], 'expected email not found'
+
+  end
+
+  def verify_link (link_object, link_class, link_name, tail_uuid, head_uuid)
+    assert_not_nil link_object, "expected link for #{link_class} #{link_name}"
+    assert_not_nil link_object[:uuid],
+        "expected non-nil uuid for link for #{link_class} #{link_name}"
+    assert_equal link_class, link_object[:link_class],
+        "expected link_class not found for #{link_class} #{link_name}"
+    assert_equal link_name, link_object[:name],
+        "expected link_name not found for #{link_class} #{link_name}"
+    assert_equal tail_uuid, link_object[:tail_uuid],
+        "expected tail_uuid not found for #{link_class} #{link_name}"
+    if head_uuid
+      assert_equal head_uuid, link_object[:head_uuid],
+          "expected head_uuid not found for #{link_class} #{link_name}"
+    end
+  end
+
+  def create_user_and_verify_setup_and_notifications (active, new_user_recipients, inactive_recipients, email, ok_to_auto_setup)
+    Rails.configuration.new_user_notification_recipients = new_user_recipients
+    Rails.configuration.new_inactive_user_notification_recipients = inactive_recipients
+
+    assert_equal new_user_recipients, Rails.configuration.new_user_notification_recipients
+    assert_equal inactive_recipients, Rails.configuration.new_inactive_user_notification_recipients
+
+    ActionMailer::Base.deliveries = []
+
+    user = User.new
+    user.first_name = "first_name_for_newly_created_user"
+    user.email = email
+    user.is_active = active
+    user.save!
+
+    # check user setup
+    group = Group.where(name: 'All users').select do |g|
+      g[:uuid].match /-f+$/
+    end.first
+
+    if !Rails.configuration.auto_setup_new_users || !ok_to_auto_setup
+      # verify that the user is not added to "All groups" by auto_setup
+      verify_link_exists false, group[:uuid], user.uuid, 'permission', 'can_read', nil, nil
+
+      # check oid login link not created by auto_setup
+      verify_link_exists false, user.uuid, user.email, 'permission', 'can_login', nil, nil
+    else
+      # verify that auto_setup took place
+      # verify that the user is added to "All groups"
+      verify_link_exists true, group[:uuid], user.uuid, 'permission', 'can_read', nil, nil
+
+      # check oid login link
+      verify_link_exists true, user.uuid, user.email, 'permission', 'can_login', nil, nil
+
+      username = user.email.partition('@')[0] if email
+
+      # check repo
+      repo_names = []
+      if Rails.configuration.auto_setup_new_users_with_repository
+        repos = Repository.where('name like ?', "%#{username}%")
+        assert_not_nil repos, 'repository not found'
+        assert_equal true, repos.any?, 'repository not found'
+        repo_uuids = []
+        repos.each do |repo|
+          repo_uuids << repo[:uuid]
+          repo_names << repo[:name]
+        end
+        if username == 'auto_setup_repo'
+          begin
+            repo_names.delete('auto_setup_repo')
+          ensure
+            assert_equal true, repo_names.any?, 'Repository name for username foo is not unique'
+          end
+        end
+        verify_link_exists true, repo_uuids, user.uuid, 'permission', 'can_manage', nil, nil
+      end
+
+      # if username is existing vm login name, make sure the username used to generate any repo is unique
+      if username == 'auto_setup_vm_login' || username == 'auto_setup_repo'
+        if repo_names.any?
+          assert repo_names.first.start_with? username
+          assert_not_nil /\d$/.match(repo_names.first)
+        end
+      end
+
+      # check vm uuid
+      vm_uuid = Rails.configuration.auto_setup_new_users_with_vm_uuid
+      if vm_uuid
+        verify_link_exists true, vm_uuid, user.uuid, 'permission', 'can_login', 'username', (username == 'auto_setup_repo' ? repo_names.first : username)
+      else
+        verify_link_exists false, vm_uuid, user.uuid, 'permission', 'can_login', 'username', (username == 'auto_setup_repo' ? repo_names.first : username)
+      end
+    end
+
+    # check email notifications
+    new_user_email = nil
+    new_inactive_user_email = nil
+
+    new_user_email_subject = "#{Rails.configuration.email_subject_prefix}New user created notification"
+    if Rails.configuration.auto_setup_new_users
+      new_user_email_subject = (ok_to_auto_setup || active) ?
+                                 "#{Rails.configuration.email_subject_prefix}New user created and setup notification" :
+                                 "#{Rails.configuration.email_subject_prefix}New user created, but not setup notification"
+    end
+
+    ActionMailer::Base.deliveries.each do |d|
+      if d.subject == new_user_email_subject then
+        new_user_email = d
+      elsif d.subject == "#{Rails.configuration.email_subject_prefix}New inactive user notification" then
+        new_inactive_user_email = d
+      end
+    end
+
+    # both active and inactive user creations should result in new user creation notification mails,
+    # if the new user email recipients config parameter is set
+    if not new_user_recipients.empty? then
+      assert_not_nil new_user_email, 'Expected new user email after setup'
+      assert_equal Rails.configuration.user_notifier_email_from, new_user_email.from[0]
+      assert_equal new_user_recipients, new_user_email.to[0]
+      assert_equal new_user_email_subject, new_user_email.subject
+    else
+      assert_nil new_user_email, 'Did not expect new user email after setup'
+    end
+
+    if not active
+      if not inactive_recipients.empty? then
+        assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
+        assert_equal Rails.configuration.user_notifier_email_from, new_inactive_user_email.from[0]
+        assert_equal inactive_recipients, new_inactive_user_email.to[0]
+        assert_equal "#{Rails.configuration.email_subject_prefix}New inactive user notification", new_inactive_user_email.subject
+      else
+        assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
+      end
+    else
+      assert_nil new_inactive_user_email, 'Expected no inactive user email after setting up active user'
+    end
+    ActionMailer::Base.deliveries = []
+
+  end
+
+  def verify_link_exists link_exists, head_uuid, tail_uuid, link_class, link_name, property_name, property_value
+    all_links = Link.where(head_uuid: head_uuid,
+                           tail_uuid: tail_uuid,
+                           link_class: link_class,
+                           name: link_name)
+    assert_equal link_exists, all_links.any?, "Link #{'not' if link_exists} found for #{link_name} #{link_class} #{property_value}"
+    if link_exists && property_name && property_value
+      all_links.each do |link|
+        assert_equal true, all_links.first.properties[property_name].start_with?(property_value), 'Property not found in link'
+      end
+    end
+  end
+
+end
diff --git a/services/api/test/unit/virtual_machine_test.rb b/services/api/test/unit/virtual_machine_test.rb
new file mode 100644 (file)
index 0000000..69258b5
--- /dev/null
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class VirtualMachineTest < ActiveSupport::TestCase
+  # test "the truth" do
+  #   assert true
+  # end
+end
diff --git a/services/api/test/websocket_runner.rb b/services/api/test/websocket_runner.rb
new file mode 100644 (file)
index 0000000..df72e24
--- /dev/null
@@ -0,0 +1,42 @@
+require 'bundler'
+
+$ARV_API_SERVER_DIR = File.expand_path('../..', __FILE__)
+SERVER_PID_PATH = 'tmp/pids/passenger.3002.pid'
+
+class WebsocketTestRunner < MiniTest::Unit
+  def _system(*cmd)
+    Bundler.with_clean_env do
+      if not system({'ARVADOS_WEBSOCKETS' => 'ws-only', 'RAILS_ENV' => 'test'}, *cmd)
+        raise RuntimeError, "#{cmd[0]} returned exit code #{$?.exitstatus}"
+      end
+    end
+  end
+
+  def _run(args=[])
+    server_pid = Dir.chdir($ARV_API_SERVER_DIR) do |apidir|
+      # Only passenger seems to be able to run the websockets server successfully.
+      _system('passenger', 'start', '-d', '-p3002')
+      timeout = Time.now.tv_sec + 10
+      begin
+        sleep 0.2
+        begin
+          server_pid = IO.read(SERVER_PID_PATH).to_i
+          good_pid = (server_pid > 0) and (Process.kill(0, pid) rescue false)
+        rescue Errno::ENOENT
+          good_pid = false
+        end
+      end while (not good_pid) and (Time.now.tv_sec < timeout)
+      if not good_pid
+        raise RuntimeError, "could not find API server Rails pid"
+      end
+      server_pid
+    end
+    begin
+      super(args)
+    ensure
+      Process.kill('TERM', server_pid)
+    end
+  end
+end
+
+MiniTest::Unit.runner = WebsocketTestRunner.new
diff --git a/services/api/vendor/assets/stylesheets/.gitkeep b/services/api/vendor/assets/stylesheets/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/api/vendor/plugins/.gitkeep b/services/api/vendor/plugins/.gitkeep
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/crunch/crunch-job b/services/crunch/crunch-job
new file mode 120000 (symlink)
index 0000000..ff0e702
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/cli/bin/arv-crunch-job
\ No newline at end of file
diff --git a/services/crunchstat/crunchstat.go b/services/crunchstat/crunchstat.go
new file mode 100644 (file)
index 0000000..0cfdfa8
--- /dev/null
@@ -0,0 +1,469 @@
+package main
+
+import (
+       "bufio"
+       "bytes"
+       "errors"
+       "flag"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "os/exec"
+       "os/signal"
+       "strconv"
+       "strings"
+       "syscall"
+       "time"
+)
+
+/*
+#include <unistd.h>
+#include <sys/types.h>
+#include <pwd.h>
+#include <stdlib.h>
+*/
+import "C"
+
+// The above block of magic allows us to look up user_hz via _SC_CLK_TCK.
+
+type Cgroup struct {
+       root   string
+       parent string
+       cid    string
+}
+
+func CopyPipeToChan(in io.Reader, out chan string, done chan<- bool) {
+       s := bufio.NewScanner(in)
+       for s.Scan() {
+               out <- s.Text()
+       }
+       done <- true
+}
+
+func CopyChanToPipe(in <-chan string, out io.Writer) {
+       for s := range in {
+               fmt.Fprintln(out, s)
+       }
+}
+
+var logChan chan string
+func LogPrintf(format string, args ...interface{}) {
+       if logChan == nil {
+               return
+       }
+       logChan <- fmt.Sprintf("crunchstat: " + format, args...)
+}
+
+func ReadAllOrWarn(in *os.File) ([]byte, error) {
+       content, err := ioutil.ReadAll(in)
+       if err != nil {
+               LogPrintf("read %s: %s", in.Name(), err)
+       }
+       return content, err
+}
+
+var reportedStatFile = map[string]string{}
+
+// Open the cgroup stats file in /sys/fs corresponding to the target
+// cgroup, and return an *os.File. If no stats file is available,
+// return nil.
+//
+// TODO: Instead of trying all options, choose a process in the
+// container, and read /proc/PID/cgroup to determine the appropriate
+// cgroup root for the given statgroup. (This will avoid falling back
+// to host-level stats during container setup and teardown.)
+func OpenStatFile(cgroup Cgroup, statgroup string, stat string) (*os.File, error) {
+       var paths = []string{
+               fmt.Sprintf("%s/%s/%s/%s/%s", cgroup.root, statgroup, cgroup.parent, cgroup.cid, stat),
+               fmt.Sprintf("%s/%s/%s/%s", cgroup.root, cgroup.parent, cgroup.cid, stat),
+               fmt.Sprintf("%s/%s/%s", cgroup.root, statgroup, stat),
+               fmt.Sprintf("%s/%s", cgroup.root, stat),
+       }
+       var path string
+       var file *os.File
+       var err error
+       for _, path = range paths {
+               file, err = os.Open(path)
+               if err == nil {
+                       break
+               } else {
+                       path = ""
+               }
+       }
+       if pathWas, ok := reportedStatFile[stat]; !ok || pathWas != path {
+               // Log whenever we start using a new/different cgroup
+               // stat file for a given statistic. This typically
+               // happens 1 to 3 times per statistic, depending on
+               // whether we happen to collect stats [a] before any
+               // processes have been created in the container and
+               // [b] after all contained processes have exited.
+               reportedStatFile[stat] = path
+               if path == "" {
+                       LogPrintf("did not find stats file: stat %s, statgroup %s, cid %s, parent %s, root %s", stat, statgroup, cgroup.cid, cgroup.parent, cgroup.root)
+               } else {
+                       LogPrintf("reading stats from %s", path)
+               }
+       }
+       return file, err
+}
+
+func GetContainerNetStats(cgroup Cgroup) (io.Reader, error) {
+       procsFile, err := OpenStatFile(cgroup, "cpuacct", "cgroup.procs")
+       if err != nil {
+               return nil, err
+       }
+       defer procsFile.Close()
+       reader := bufio.NewScanner(procsFile)
+       for reader.Scan() {
+               taskPid := reader.Text()
+               statsFilename := fmt.Sprintf("/proc/%s/net/dev", taskPid)
+               stats, err := ioutil.ReadFile(statsFilename)
+               if err != nil {
+                       LogPrintf("read %s: %s", statsFilename, err)
+                       continue
+               }
+               return strings.NewReader(string(stats)), nil
+       }
+       return nil, errors.New("Could not read stats for any proc in container")
+}
+
+type IoSample struct {
+       sampleTime time.Time
+       txBytes    int64
+       rxBytes    int64
+}
+
+func DoBlkIoStats(cgroup Cgroup, lastSample map[string]IoSample) {
+       c, err := OpenStatFile(cgroup, "blkio", "blkio.io_service_bytes")
+       if err != nil {
+               return
+       }
+       defer c.Close()
+       b := bufio.NewScanner(c)
+       var sampleTime = time.Now()
+       newSamples := make(map[string]IoSample)
+       for b.Scan() {
+               var device, op string
+               var val int64
+               if _, err := fmt.Sscanf(string(b.Text()), "%s %s %d", &device, &op, &val); err != nil {
+                       continue
+               }
+               var thisSample IoSample
+               var ok bool
+               if thisSample, ok = newSamples[device]; !ok {
+                       thisSample = IoSample{sampleTime, -1, -1}
+               }
+               switch op {
+               case "Read":
+                       thisSample.rxBytes = val
+               case "Write":
+                       thisSample.txBytes = val
+               }
+               newSamples[device] = thisSample
+       }
+       for dev, sample := range newSamples {
+               if sample.txBytes < 0 || sample.rxBytes < 0 {
+                       continue
+               }
+               delta := ""
+               if prev, ok := lastSample[dev]; ok {
+                       delta = fmt.Sprintf(" -- interval %.4f seconds %d write %d read",
+                               sample.sampleTime.Sub(prev.sampleTime).Seconds(),
+                               sample.txBytes-prev.txBytes,
+                               sample.rxBytes-prev.rxBytes)
+               }
+               LogPrintf("blkio:%s %d write %d read%s", dev, sample.txBytes, sample.rxBytes, delta)
+               lastSample[dev] = sample
+       }
+}
+
+type MemSample struct {
+       sampleTime time.Time
+       memStat    map[string]int64
+}
+
+func DoMemoryStats(cgroup Cgroup) {
+       c, err := OpenStatFile(cgroup, "memory", "memory.stat")
+       if err != nil {
+               return
+       }
+       defer c.Close()
+       b := bufio.NewScanner(c)
+       thisSample := MemSample{time.Now(), make(map[string]int64)}
+       wantStats := [...]string{"cache", "swap", "pgmajfault", "rss"}
+       for b.Scan() {
+               var stat string
+               var val int64
+               if _, err := fmt.Sscanf(string(b.Text()), "%s %d", &stat, &val); err != nil {
+                       continue
+               }
+               thisSample.memStat[stat] = val
+       }
+       var outstat bytes.Buffer
+       for _, key := range wantStats {
+               if val, ok := thisSample.memStat[key]; ok {
+                       outstat.WriteString(fmt.Sprintf(" %d %s", val, key))
+               }
+       }
+       LogPrintf("mem%s", outstat.String())
+}
+
+func DoNetworkStats(cgroup Cgroup, lastSample map[string]IoSample) {
+       sampleTime := time.Now()
+       stats, err := GetContainerNetStats(cgroup)
+       if err != nil {
+               return
+       }
+
+       scanner := bufio.NewScanner(stats)
+       for scanner.Scan() {
+               var ifName string
+               var rx, tx int64
+               words := strings.Fields(scanner.Text())
+               if len(words) != 17 {
+                       // Skip lines with wrong format
+                       continue
+               }
+               ifName = strings.TrimRight(words[0], ":")
+               if ifName == "lo" || ifName == "" {
+                       // Skip loopback interface and lines with wrong format
+                       continue
+               }
+               if tx, err = strconv.ParseInt(words[9], 10, 64); err != nil {
+                       continue
+               }
+               if rx, err = strconv.ParseInt(words[1], 10, 64); err != nil {
+                       continue
+               }
+               nextSample := IoSample{}
+               nextSample.sampleTime = sampleTime
+               nextSample.txBytes = tx
+               nextSample.rxBytes = rx
+               var delta string
+               if prev, ok := lastSample[ifName]; ok {
+                       interval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()
+                       delta = fmt.Sprintf(" -- interval %.4f seconds %d tx %d rx",
+                               interval,
+                               tx-prev.txBytes,
+                               rx-prev.rxBytes)
+               }
+               LogPrintf("net:%s %d tx %d rx%s", ifName, tx, rx, delta)
+               lastSample[ifName] = nextSample
+       }
+}
+
+type CpuSample struct {
+       hasData    bool // to distinguish the zero value from real data
+       sampleTime time.Time
+       user       float64
+       sys        float64
+       cpus       int64
+}
+
+// Return the number of CPUs available in the container. Return 0 if
+// we can't figure out the real number of CPUs.
+func GetCpuCount(cgroup Cgroup) int64 {
+       cpusetFile, err := OpenStatFile(cgroup, "cpuset", "cpuset.cpus")
+       if err != nil {
+               return 0
+       }
+       defer cpusetFile.Close()
+       b, err := ReadAllOrWarn(cpusetFile)
+       sp := strings.Split(string(b), ",")
+       cpus := int64(0)
+       for _, v := range sp {
+               var min, max int64
+               n, _ := fmt.Sscanf(v, "%d-%d", &min, &max)
+               if n == 2 {
+                       cpus += (max - min) + 1
+               } else {
+                       cpus += 1
+               }
+       }
+       return cpus
+}
+
+func DoCpuStats(cgroup Cgroup, lastSample *CpuSample) {
+       statFile, err := OpenStatFile(cgroup, "cpuacct", "cpuacct.stat")
+       if err != nil {
+               return
+       }
+       defer statFile.Close()
+       b, err := ReadAllOrWarn(statFile)
+       if err != nil {
+               return
+       }
+
+       nextSample := CpuSample{true, time.Now(), 0, 0, GetCpuCount(cgroup)}
+       var userTicks, sysTicks int64
+       fmt.Sscanf(string(b), "user %d\nsystem %d", &userTicks, &sysTicks)
+       user_hz := float64(C.sysconf(C._SC_CLK_TCK))
+       nextSample.user = float64(userTicks) / user_hz
+       nextSample.sys = float64(sysTicks) / user_hz
+
+       delta := ""
+       if lastSample.hasData {
+               delta = fmt.Sprintf(" -- interval %.4f seconds %.4f user %.4f sys",
+                       nextSample.sampleTime.Sub(lastSample.sampleTime).Seconds(),
+                       nextSample.user-lastSample.user,
+                       nextSample.sys-lastSample.sys)
+       }
+       LogPrintf("cpu %.4f user %.4f sys %d cpus%s",
+               nextSample.user, nextSample.sys, nextSample.cpus, delta)
+       *lastSample = nextSample
+}
+
+func PollCgroupStats(cgroup Cgroup, poll int64, stop_poll_chan <-chan bool) {
+       var lastNetSample = map[string]IoSample{}
+       var lastDiskSample = map[string]IoSample{}
+       var lastCpuSample = CpuSample{}
+
+       poll_chan := make(chan bool, 1)
+       go func() {
+               // Send periodic poll events.
+               poll_chan <- true
+               for {
+                       time.Sleep(time.Duration(poll) * time.Millisecond)
+                       poll_chan <- true
+               }
+       }()
+       for {
+               select {
+               case <-stop_poll_chan:
+                       return
+               case <-poll_chan:
+                       // Emit stats, then select again.
+               }
+               DoMemoryStats(cgroup)
+               DoCpuStats(cgroup, &lastCpuSample)
+               DoBlkIoStats(cgroup, lastDiskSample)
+               DoNetworkStats(cgroup, lastNetSample)
+       }
+}
+
+func run(logger *log.Logger) error {
+
+       var (
+               cgroup_root    string
+               cgroup_parent  string
+               cgroup_cidfile string
+               wait           int64
+               poll           int64
+       )
+
+       flag.StringVar(&cgroup_root, "cgroup-root", "", "Root of cgroup tree")
+       flag.StringVar(&cgroup_parent, "cgroup-parent", "", "Name of container parent under cgroup")
+       flag.StringVar(&cgroup_cidfile, "cgroup-cid", "", "Path to container id file")
+       flag.Int64Var(&wait, "wait", 5, "Maximum time (in seconds) to wait for cid file to show up")
+       flag.Int64Var(&poll, "poll", 1000, "Polling frequency, in milliseconds")
+
+       flag.Parse()
+
+       if cgroup_root == "" {
+               logger.Fatal("Must provide -cgroup-root")
+       }
+
+       logChan = make(chan string, 1)
+       defer close(logChan)
+       finish_chan := make(chan bool)
+       defer close(finish_chan)
+
+       go CopyChanToPipe(logChan, os.Stderr)
+
+       var cmd *exec.Cmd
+
+       if len(flag.Args()) > 0 {
+               // Set up subprocess
+               cmd = exec.Command(flag.Args()[0], flag.Args()[1:]...)
+
+               logger.Print("Running ", flag.Args())
+
+               // Child process will use our stdin and stdout pipes
+               // (we close our copies below)
+               cmd.Stdin = os.Stdin
+               cmd.Stdout = os.Stdout
+
+               // Forward SIGINT and SIGTERM to inner process
+               term := make(chan os.Signal, 1)
+               go func(sig <-chan os.Signal) {
+                       catch := <-sig
+                       if cmd.Process != nil {
+                               cmd.Process.Signal(catch)
+                       }
+                       logger.Print("caught signal: ", catch)
+               }(term)
+               signal.Notify(term, syscall.SIGTERM)
+               signal.Notify(term, syscall.SIGINT)
+
+               // Funnel stderr through our channel
+               stderr_pipe, err := cmd.StderrPipe()
+               if err != nil {
+                       logger.Fatal(err)
+               }
+               go CopyPipeToChan(stderr_pipe, logChan, finish_chan)
+
+               // Run subprocess
+               if err := cmd.Start(); err != nil {
+                       logger.Fatal(err)
+               }
+
+               // Close stdin/stdout in this (parent) process
+               os.Stdin.Close()
+               os.Stdout.Close()
+       }
+
+       // Read the cid file
+       var container_id string
+       if cgroup_cidfile != "" {
+               // wait up to 'wait' seconds for the cid file to appear
+               ok := false
+               var i time.Duration
+               for i = 0; i < time.Duration(wait)*time.Second; i += (100 * time.Millisecond) {
+                       cid, err := ioutil.ReadFile(cgroup_cidfile)
+                       if err == nil && len(cid) > 0 {
+                               ok = true
+                               container_id = string(cid)
+                               break
+                       }
+                       time.Sleep(100 * time.Millisecond)
+               }
+               if !ok {
+                       logger.Printf("Could not read cid file %s", cgroup_cidfile)
+               }
+       }
+
+       stop_poll_chan := make(chan bool, 1)
+       cgroup := Cgroup{cgroup_root, cgroup_parent, container_id}
+       go PollCgroupStats(cgroup, poll, stop_poll_chan)
+
+       // When the child exits, tell the polling goroutine to stop.
+       defer func() { stop_poll_chan <- true }()
+
+       // Wait for CopyPipeToChan to consume child's stderr pipe
+       <-finish_chan
+
+       return cmd.Wait()
+}
+
+func main() {
+       logger := log.New(os.Stderr, "crunchstat: ", 0)
+       if err := run(logger); err != nil {
+               if exiterr, ok := err.(*exec.ExitError); ok {
+                       // The program has exited with an exit code != 0
+
+                       // This works on both Unix and
+                       // Windows. Although package syscall is
+                       // generally platform dependent, WaitStatus is
+                       // defined for both Unix and Windows and in
+                       // both cases has an ExitStatus() method with
+                       // the same signature.
+                       if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+                               os.Exit(status.ExitStatus())
+                       }
+               } else {
+                       logger.Fatalf("cmd.Wait: %v", err)
+               }
+       }
+}
diff --git a/services/crunchstat/crunchstat_test.go b/services/crunchstat/crunchstat_test.go
new file mode 100644 (file)
index 0000000..48988a1
--- /dev/null
@@ -0,0 +1,50 @@
+package main
+
+import (
+       "os"
+       "regexp"
+       "testing"
+)
+
+func TestReadAllOrWarnFail(t *testing.T) {
+       logChan = make(chan string)
+       go func() {
+               defer close(logChan)
+               // The special file /proc/self/mem can be opened for
+               // reading, but reading from byte 0 returns an error.
+               f, err := os.Open("/proc/self/mem")
+               if err != nil {
+                       t.Fatalf("Opening /proc/self/mem: %s", err)
+               }
+               if x, err := ReadAllOrWarn(f); err == nil {
+                       t.Fatalf("Expected error, got %v", x)
+               }
+       }()
+       if _, ok := <-logChan; !ok {
+               t.Fatalf("Expected error message about nonexistent file")
+       }
+       if msg, ok := <-logChan; ok {
+               t.Fatalf("Expected channel to close, got %s", msg)
+       }
+}
+
+func TestReadAllOrWarnSuccess(t *testing.T) {
+       logChan = make(chan string)
+       go func() {
+               defer close(logChan)
+               f, err := os.Open("./crunchstat_test.go")
+               if err != nil {
+                       t.Fatalf("Opening ./crunchstat_test.go: %s", err)
+               }
+               data, err := ReadAllOrWarn(f)
+               if err != nil {
+                       t.Fatalf("got error %s", err)
+               }
+               if matched, err := regexp.MatchString("^package main\n", string(data)); err != nil || !matched {
+                       t.Fatalf("data failed regexp: %s", err)
+               }
+       }()
+       if msg, ok := <-logChan; ok {
+               t.Fatalf("Expected channel to close, got %s", msg)
+       }
+}
diff --git a/services/datamanager/experimental/datamanager.py b/services/datamanager/experimental/datamanager.py
new file mode 100755 (executable)
index 0000000..8207bdc
--- /dev/null
@@ -0,0 +1,887 @@
+#! /usr/bin/env python
+
+import arvados
+
+import argparse
+import cgi
+import csv
+import json
+import logging
+import math
+import pprint
+import re
+import threading
+import urllib2
+
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+from collections import defaultdict, Counter
+from functools import partial
+from operator import itemgetter
+from SocketServer import ThreadingMixIn
+
+arv = arvados.api('v1')
+
+# Adapted from http://stackoverflow.com/questions/4180980/formatting-data-quantity-capacity-as-string
+byteunits = ('B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
+def fileSizeFormat(value):
+  exponent = 0 if value == 0 else int(math.log(value, 1024))
+  return "%7.2f %-3s" % (float(value) / pow(1024, exponent),
+                         byteunits[exponent])
+
+def percentageFloor(x):
+  """ Returns a float which is the input rounded down to the neared 0.01.
+
+e.g. precentageFloor(0.941354) = 0.94
+"""
+  return math.floor(x*100) / 100.0
+
+
+def byteSizeFromValidUuid(valid_uuid):
+  return int(valid_uuid.split('+')[1])
+
+class maxdict(dict):
+  """A dictionary that holds the largest value entered for each key."""
+  def addValue(self, key, value):
+    dict.__setitem__(self, key, max(dict.get(self, key), value))
+  def addValues(self, kv_pairs):
+    for key,value in kv_pairs:
+      self.addValue(key, value)
+  def addDict(self, d):
+    self.addValues(d.items())
+
+class CollectionInfo:
+  DEFAULT_PERSISTER_REPLICATION_LEVEL=2
+  all_by_uuid = {}
+
+  def __init__(self, uuid):
+    if CollectionInfo.all_by_uuid.has_key(uuid):
+      raise ValueError('Collection for uuid "%s" already exists.' % uuid)
+    self.uuid = uuid
+    self.block_uuids = set()  # uuids of keep blocks in this collection
+    self.reader_uuids = set()  # uuids of users who can read this collection
+    self.persister_uuids = set()  # uuids of users who want this collection saved
+    # map from user uuid to replication level they desire
+    self.persister_replication = maxdict()
+
+    # The whole api response in case we need anything else later.
+    self.api_response = []
+    CollectionInfo.all_by_uuid[uuid] = self
+
+  def byteSize(self):
+    return sum(map(byteSizeFromValidUuid, self.block_uuids))
+
+  def __str__(self):
+    return ('CollectionInfo uuid: %s\n'
+            '               %d block(s) containing %s\n'
+            '               reader_uuids: %s\n'
+            '               persister_replication: %s' %
+            (self.uuid,
+             len(self.block_uuids),
+             fileSizeFormat(self.byteSize()),
+             pprint.pformat(self.reader_uuids, indent = 15),
+             pprint.pformat(self.persister_replication, indent = 15)))
+
+  @staticmethod
+  def get(uuid):
+    if not CollectionInfo.all_by_uuid.has_key(uuid):
+      CollectionInfo(uuid)
+    return CollectionInfo.all_by_uuid[uuid]
+
+
+def extractUuid(candidate):
+  """ Returns a canonical (hash+size) uuid from a valid uuid, or None if candidate is not a valid uuid."""
+  match = re.match('([0-9a-fA-F]{32}\+[0-9]+)(\+[^+]+)*$', candidate)
+  return match and match.group(1)
+
+def checkUserIsAdmin():
+  current_user = arv.users().current().execute()
+
+  if not current_user['is_admin']:
+    log.warning('Current user %s (%s - %s) does not have '
+                'admin access and will not see much of the data.',
+                current_user['full_name'],
+                current_user['email'],
+                current_user['uuid'])
+    if args.require_admin_user:
+      log.critical('Exiting, rerun with --no-require-admin-user '
+                   'if you wish to continue.')
+      exit(1)
+
+def buildCollectionsList():
+  if args.uuid:
+    return [args.uuid,]
+  else:
+    collections_list_response = arv.collections().list(limit=args.max_api_results).execute()
+
+    print ('Returned %d of %d collections.' %
+           (len(collections_list_response['items']),
+            collections_list_response['items_available']))
+
+    return [item['uuid'] for item in collections_list_response['items']]
+
+
+def readCollections(collection_uuids):
+  for collection_uuid in collection_uuids:
+    collection_block_uuids = set()
+    collection_response = arv.collections().get(uuid=collection_uuid).execute()
+    collection_info = CollectionInfo.get(collection_uuid)
+    collection_info.api_response = collection_response
+    manifest_lines = collection_response['manifest_text'].split('\n')
+
+    if args.verbose:
+      print 'Manifest text for %s:' % collection_uuid
+      pprint.pprint(manifest_lines)
+
+    for manifest_line in manifest_lines:
+      if manifest_line:
+        manifest_tokens = manifest_line.split(' ')
+        if args.verbose:
+          print 'manifest tokens: ' + pprint.pformat(manifest_tokens)
+        stream_name = manifest_tokens[0]
+
+        line_block_uuids = set(filter(None,
+                                      [extractUuid(candidate)
+                                       for candidate in manifest_tokens[1:]]))
+        collection_info.block_uuids.update(line_block_uuids)
+
+        # file_tokens = [token
+        #                for token in manifest_tokens[1:]
+        #                if extractUuid(token) is None]
+
+        # # Sort file tokens by start position in case they aren't already
+        # file_tokens.sort(key=lambda file_token: int(file_token.split(':')[0]))
+
+        # if args.verbose:
+        #   print 'line_block_uuids: ' + pprint.pformat(line_block_uuids)
+        #   print 'file_tokens: ' + pprint.pformat(file_tokens)
+
+
+def readLinks():
+  link_classes = set()
+
+  for collection_uuid,collection_info in CollectionInfo.all_by_uuid.items():
+    # TODO(misha): We may not be seing all the links, but since items
+    # available does not return an accurate number, I don't knos how
+    # to confirm that we saw all of them.
+    collection_links_response = arv.links().list(where={'head_uuid':collection_uuid}).execute()
+    link_classes.update([link['link_class'] for link in collection_links_response['items']])
+    for link in collection_links_response['items']:
+      if link['link_class'] == 'permission':
+        collection_info.reader_uuids.add(link['tail_uuid'])
+      elif link['link_class'] == 'resources':
+        replication_level = link['properties'].get(
+          'replication',
+          CollectionInfo.DEFAULT_PERSISTER_REPLICATION_LEVEL)
+        collection_info.persister_replication.addValue(
+          link['tail_uuid'],
+          replication_level)
+        collection_info.persister_uuids.add(link['tail_uuid'])
+
+  print 'Found the following link classes:'
+  pprint.pprint(link_classes)
+
+def reportMostPopularCollections():
+  most_popular_collections = sorted(
+    CollectionInfo.all_by_uuid.values(),
+    key=lambda info: len(info.reader_uuids) + 10 * len(info.persister_replication),
+    reverse=True)[:10]
+
+  print 'Most popular Collections:'
+  for collection_info in most_popular_collections:
+    print collection_info
+
+
+def buildMaps():
+  for collection_uuid,collection_info in CollectionInfo.all_by_uuid.items():
+    # Add the block holding the manifest itself for all calculations
+    block_uuids = collection_info.block_uuids.union([collection_uuid,])
+    for block_uuid in block_uuids:
+      block_to_collections[block_uuid].add(collection_uuid)
+      block_to_readers[block_uuid].update(collection_info.reader_uuids)
+      block_to_persisters[block_uuid].update(collection_info.persister_uuids)
+      block_to_persister_replication[block_uuid].addDict(
+        collection_info.persister_replication)
+    for reader_uuid in collection_info.reader_uuids:
+      reader_to_collections[reader_uuid].add(collection_uuid)
+      reader_to_blocks[reader_uuid].update(block_uuids)
+    for persister_uuid in collection_info.persister_uuids:
+      persister_to_collections[persister_uuid].add(collection_uuid)
+      persister_to_blocks[persister_uuid].update(block_uuids)
+
+
+def itemsByValueLength(original):
+  return sorted(original.items(),
+                key=lambda item:len(item[1]),
+                reverse=True)
+
+
+def reportBusiestUsers():
+  busiest_readers = itemsByValueLength(reader_to_collections)
+  print 'The busiest readers are:'
+  for reader,collections in busiest_readers:
+    print '%s reading %d collections.' % (reader, len(collections))
+  busiest_persisters = itemsByValueLength(persister_to_collections)
+  print 'The busiest persisters are:'
+  for persister,collections in busiest_persisters:
+    print '%s reading %d collections.' % (persister, len(collections))
+
+
+def blockDiskUsage(block_uuid):
+  """Returns the disk usage of a block given its uuid.
+
+  Will return 0 before reading the contents of the keep servers.
+  """
+  return byteSizeFromValidUuid(block_uuid) * block_to_replication[block_uuid]
+
+def blockPersistedUsage(user_uuid, block_uuid):
+  return (byteSizeFromValidUuid(block_uuid) *
+          block_to_persister_replication[block_uuid].get(user_uuid, 0))
+
+memo_computeWeightedReplicationCosts = {}
+def computeWeightedReplicationCosts(replication_levels):
+  """Computes the relative cost of varied replication levels.
+
+  replication_levels: a tuple of integers representing the desired
+  replication level. If n users want a replication level of x then x
+  should appear n times in replication_levels.
+
+  Returns a dictionary from replication level to cost.
+
+  The basic thinking is that the cost of replicating at level x should
+  be shared by everyone who wants replication of level x or higher.
+
+  For example, if we have two users who want 1 copy, one user who
+  wants 3 copies and two users who want 6 copies:
+  the input would be [1, 1, 3, 6, 6] (or any permutation)
+
+  The cost of the first copy is shared by all 5 users, so they each
+  pay 1 copy / 5 users = 0.2.
+  The cost of the second and third copies shared by 3 users, so they
+  each pay 2 copies / 3 users = 0.67 (plus the above costs)
+  The cost of the fourth, fifth and sixth copies is shared by two
+  users, so they each pay 3 copies / 2 users = 1.5 (plus the above costs)
+
+  Here are some other examples:
+  computeWeightedReplicationCosts([1,]) -> {1:1.0}
+  computeWeightedReplicationCosts([2,]) -> {2:2.0}
+  computeWeightedReplicationCosts([1,1]) -> {1:0.5}
+  computeWeightedReplicationCosts([2,2]) -> {1:1.0}
+  computeWeightedReplicationCosts([1,2]) -> {1:0.5,2:1.5}
+  computeWeightedReplicationCosts([1,3]) -> {1:0.5,2:2.5}
+  computeWeightedReplicationCosts([1,3,6,6,10]) -> {1:0.2,3:0.7,6:1.7,10:5.7}
+  """
+  replication_level_counts = sorted(Counter(replication_levels).items())
+
+  memo_key = str(replication_level_counts)
+
+  if not memo_key in memo_computeWeightedReplicationCosts:
+    last_level = 0
+    current_cost = 0
+    total_interested = float(sum(map(itemgetter(1), replication_level_counts)))
+    cost_for_level = {}
+    for replication_level, count in replication_level_counts:
+      copies_added = replication_level - last_level
+      # compute marginal cost from last level and add it to the last cost
+      current_cost += copies_added / total_interested
+      cost_for_level[replication_level] = current_cost
+      # update invariants
+      last_level = replication_level
+      total_interested -= count
+    memo_computeWeightedReplicationCosts[memo_key] = cost_for_level
+
+  return memo_computeWeightedReplicationCosts[memo_key]
+
+def blockPersistedWeightedUsage(user_uuid, block_uuid):
+  persister_replication_for_block = block_to_persister_replication[block_uuid]
+  user_replication = persister_replication_for_block[user_uuid]
+  return (
+    byteSizeFromValidUuid(block_uuid) *
+    computeWeightedReplicationCosts(
+      persister_replication_for_block.values())[user_replication])
+
+
+def computeUserStorageUsage():
+  for user, blocks in reader_to_blocks.items():
+    user_to_usage[user][UNWEIGHTED_READ_SIZE_COL] = sum(map(
+        byteSizeFromValidUuid,
+        blocks))
+    user_to_usage[user][WEIGHTED_READ_SIZE_COL] = sum(map(
+        lambda block_uuid:(float(byteSizeFromValidUuid(block_uuid))/
+                                 len(block_to_readers[block_uuid])),
+        blocks))
+  for user, blocks in persister_to_blocks.items():
+    user_to_usage[user][UNWEIGHTED_PERSIST_SIZE_COL] = sum(map(
+        partial(blockPersistedUsage, user),
+        blocks))
+    user_to_usage[user][WEIGHTED_PERSIST_SIZE_COL] = sum(map(
+        partial(blockPersistedWeightedUsage, user),
+        blocks))
+
+def printUserStorageUsage():
+  print ('user: unweighted readable block size, weighted readable block size, '
+         'unweighted persisted block size, weighted persisted block size:')
+  for user, usage in user_to_usage.items():
+    print ('%s: %s %s %s %s' %
+           (user,
+            fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+            fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+            fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+            fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+
+def logUserStorageUsage():
+  for user, usage in user_to_usage.items():
+    body = {}
+    # user could actually represent a user or a group. We don't set
+    # the object_type field since we don't know which we have.
+    body['object_uuid'] = user
+    body['event_type'] = args.user_storage_log_event_type
+    properties = {}
+    properties['read_collections_total_bytes'] = usage[UNWEIGHTED_READ_SIZE_COL]
+    properties['read_collections_weighted_bytes'] = (
+      usage[WEIGHTED_READ_SIZE_COL])
+    properties['persisted_collections_total_bytes'] = (
+      usage[UNWEIGHTED_PERSIST_SIZE_COL])
+    properties['persisted_collections_weighted_bytes'] = (
+      usage[WEIGHTED_PERSIST_SIZE_COL])
+    body['properties'] = properties
+    # TODO(misha): Confirm that this will throw an exception if it
+    # fails to create the log entry.
+    arv.logs().create(body=body).execute()
+
+def getKeepServers():
+  response = arv.keep_disks().list().execute()
+  return [[keep_server['service_host'], keep_server['service_port']]
+          for keep_server in response['items']]
+
+
+def getKeepBlocks(keep_servers):
+  blocks = []
+  for host,port in keep_servers:
+    response = urllib2.urlopen('http://%s:%d/index' % (host, port))
+    server_blocks = [line.split(' ')
+                     for line in response.read().split('\n')
+                     if line]
+    server_blocks = [(block_id, int(mtime))
+                     for block_id, mtime in server_blocks]
+    blocks.append(server_blocks)
+  return blocks
+
+def getKeepStats(keep_servers):
+  MOUNT_COLUMN = 5
+  TOTAL_COLUMN = 1
+  FREE_COLUMN = 3
+  DISK_BLOCK_SIZE = 1024
+  stats = []
+  for host,port in keep_servers:
+    response = urllib2.urlopen('http://%s:%d/status.json' % (host, port))
+
+    parsed_json = json.load(response)
+    df_entries = [line.split()
+                  for line in parsed_json['df'].split('\n')
+                  if line]
+    keep_volumes = [columns
+                    for columns in df_entries
+                    if 'keep' in columns[MOUNT_COLUMN]]
+    total_space = DISK_BLOCK_SIZE*sum(map(int,map(itemgetter(TOTAL_COLUMN),
+                                                  keep_volumes)))
+    free_space =  DISK_BLOCK_SIZE*sum(map(int,map(itemgetter(FREE_COLUMN),
+                                                  keep_volumes)))
+    stats.append([total_space, free_space])
+  return stats
+
+
+def computeReplication(keep_blocks):
+  for server_blocks in keep_blocks:
+    for block_uuid, _ in server_blocks:
+      block_to_replication[block_uuid] += 1
+  log.debug('Seeing the following replication levels among blocks: %s',
+            str(set(block_to_replication.values())))
+
+
+def computeGarbageCollectionCandidates():
+  for server_blocks in keep_blocks:
+    block_to_latest_mtime.addValues(server_blocks)
+  empty_set = set()
+  garbage_collection_priority = sorted(
+    [(block,mtime)
+     for block,mtime in block_to_latest_mtime.items()
+     if len(block_to_persisters.get(block,empty_set)) == 0],
+    key = itemgetter(1))
+  global garbage_collection_report
+  garbage_collection_report = []
+  cumulative_disk_size = 0
+  for block,mtime in garbage_collection_priority:
+    disk_size = blockDiskUsage(block)
+    cumulative_disk_size += disk_size
+    garbage_collection_report.append(
+      (block,
+       mtime,
+       disk_size,
+       cumulative_disk_size,
+       float(free_keep_space + cumulative_disk_size)/total_keep_space))
+
+  print 'The oldest Garbage Collection Candidates: '
+  pprint.pprint(garbage_collection_report[:20])
+
+
+def outputGarbageCollectionReport(filename):
+  with open(filename, 'wb') as csvfile:
+    gcwriter = csv.writer(csvfile)
+    gcwriter.writerow(['block uuid', 'latest mtime', 'disk size',
+                       'cumulative size', 'disk free'])
+    for line in garbage_collection_report:
+      gcwriter.writerow(line)
+
+def computeGarbageCollectionHistogram():
+  # TODO(misha): Modify this to allow users to specify the number of
+  # histogram buckets through a flag.
+  histogram = []
+  last_percentage = -1
+  for _,mtime,_,_,disk_free in garbage_collection_report:
+    curr_percentage = percentageFloor(disk_free)
+    if curr_percentage > last_percentage:
+      histogram.append( (mtime, curr_percentage) )
+    last_percentage = curr_percentage
+
+  log.info('Garbage collection histogram is: %s', histogram)
+
+  return histogram
+
+
+def logGarbageCollectionHistogram():
+  body = {}
+  # TODO(misha): Decide whether we should specify an object_uuid in
+  # the body and if so, which uuid to use.
+  body['event_type'] = args.block_age_free_space_histogram_log_event_type
+  properties = {}
+  properties['histogram'] = garbage_collection_histogram
+  body['properties'] = properties
+  # TODO(misha): Confirm that this will throw an exception if it
+  # fails to create the log entry.
+  arv.logs().create(body=body).execute()
+
+
+def detectReplicationProblems():
+  blocks_not_in_any_collections.update(
+    set(block_to_replication.keys()).difference(block_to_collections.keys()))
+  underreplicated_persisted_blocks.update(
+    [uuid
+     for uuid, persister_replication in block_to_persister_replication.items()
+     if len(persister_replication) > 0 and
+     block_to_replication[uuid] < max(persister_replication.values())])
+  overreplicated_persisted_blocks.update(
+    [uuid
+     for uuid, persister_replication in block_to_persister_replication.items()
+     if len(persister_replication) > 0 and
+     block_to_replication[uuid] > max(persister_replication.values())])
+
+  log.info('Found %d blocks not in any collections, e.g. %s...',
+           len(blocks_not_in_any_collections),
+           ','.join(list(blocks_not_in_any_collections)[:5]))
+  log.info('Found %d underreplicated blocks, e.g. %s...',
+           len(underreplicated_persisted_blocks),
+           ','.join(list(underreplicated_persisted_blocks)[:5]))
+  log.info('Found %d overreplicated blocks, e.g. %s...',
+           len(overreplicated_persisted_blocks),
+           ','.join(list(overreplicated_persisted_blocks)[:5]))
+
+  # TODO:
+  #  Read blocks sorted by mtime
+  #  Cache window vs % free space
+  #  Collections which candidates will appear in
+  #  Youngest underreplicated read blocks that appear in collections.
+  #  Report Collections that have blocks which are missing from (or
+  #   underreplicated in) keep.
+
+
+# This is the main flow here
+
+parser = argparse.ArgumentParser(description='Report on keep disks.')
+"""The command line argument parser we use.
+
+We only use it in the __main__ block, but leave it outside the block
+in case another package wants to use it or customize it by specifying
+it as a parent to their commandline parser.
+"""
+parser.add_argument('-m',
+                    '--max-api-results',
+                    type=int,
+                    default=5000,
+                    help=('The max results to get at once.'))
+parser.add_argument('-p',
+                    '--port',
+                    type=int,
+                    default=9090,
+                    help=('The port number to serve on. 0 means no server.'))
+parser.add_argument('-v',
+                    '--verbose',
+                    help='increase output verbosity',
+                    action='store_true')
+parser.add_argument('-u',
+                    '--uuid',
+                    help='uuid of specific collection to process')
+parser.add_argument('--require-admin-user',
+                    action='store_true',
+                    default=True,
+                    help='Fail if the user is not an admin [default]')
+parser.add_argument('--no-require-admin-user',
+                    dest='require_admin_user',
+                    action='store_false',
+                    help=('Allow users without admin permissions with '
+                          'only a warning.'))
+parser.add_argument('--log-to-workbench',
+                    action='store_true',
+                    default=False,
+                    help='Log findings to workbench')
+parser.add_argument('--no-log-to-workbench',
+                    dest='log_to_workbench',
+                    action='store_false',
+                    help='Don\'t log findings to workbench [default]')
+parser.add_argument('--user-storage-log-event-type',
+                    default='user-storage-report',
+                    help=('The event type to set when logging user '
+                          'storage usage to workbench.'))
+parser.add_argument('--block-age-free-space-histogram-log-event-type',
+                    default='block-age-free-space-histogram',
+                    help=('The event type to set when logging user '
+                          'storage usage to workbench.'))
+parser.add_argument('--garbage-collection-file',
+                    default='',
+                    help=('The file to write a garbage collection report, or '
+                          'leave empty for no report.'))
+
+args = None
+
+# TODO(misha): Think about moving some of this to the __main__ block.
+log = logging.getLogger('arvados.services.datamanager')
+stderr_handler = logging.StreamHandler()
+log.setLevel(logging.INFO)
+stderr_handler.setFormatter(
+  logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s'))
+log.addHandler(stderr_handler)
+
+# Global Data - don't try this at home
+collection_uuids = []
+
+# These maps all map from uuids to a set of uuids
+block_to_collections = defaultdict(set)  # keep blocks
+reader_to_collections = defaultdict(set)  # collection(s) for which the user has read access
+persister_to_collections = defaultdict(set)  # collection(s) which the user has persisted
+block_to_readers = defaultdict(set)
+block_to_persisters = defaultdict(set)
+block_to_persister_replication = defaultdict(maxdict)
+reader_to_blocks = defaultdict(set)
+persister_to_blocks = defaultdict(set)
+
+UNWEIGHTED_READ_SIZE_COL = 0
+WEIGHTED_READ_SIZE_COL = 1
+UNWEIGHTED_PERSIST_SIZE_COL = 2
+WEIGHTED_PERSIST_SIZE_COL = 3
+NUM_COLS = 4
+user_to_usage = defaultdict(lambda : [0,]*NUM_COLS)
+
+keep_servers = []
+keep_blocks = []
+keep_stats = []
+total_keep_space = 0
+free_keep_space =  0
+
+block_to_replication = defaultdict(lambda: 0)
+block_to_latest_mtime = maxdict()
+
+garbage_collection_report = []
+"""A list of non-persisted blocks, sorted by increasing mtime
+
+Each entry is of the form (block uuid, latest mtime, disk size,
+cumulative size)
+
+* block uuid: The id of the block we want to delete
+* latest mtime: The latest mtime of the block across all keep servers.
+* disk size: The total disk space used by this block (block size
+multiplied by current replication level)
+* cumulative disk size: The sum of this block's disk size and all the
+blocks listed above it
+* disk free: The proportion of our disk space that would be free if we
+deleted this block and all the above. So this is (free disk space +
+cumulative disk size) / total disk capacity
+"""
+
+garbage_collection_histogram = []
+""" Shows the tradeoff of keep block age vs keep disk free space.
+
+Each entry is of the form (mtime, Disk Proportion).
+
+An entry of the form (1388747781, 0.52) means that if we deleted the
+oldest non-presisted blocks until we had 52% of the disk free, then
+all blocks with an mtime greater than 1388747781 would be preserved.
+"""
+
+# Stuff to report on
+blocks_not_in_any_collections = set()
+underreplicated_persisted_blocks = set()
+overreplicated_persisted_blocks = set()
+
+all_data_loaded = False
+
+def loadAllData():
+  checkUserIsAdmin()
+
+  log.info('Building Collection List')
+  global collection_uuids
+  collection_uuids = filter(None, [extractUuid(candidate)
+                                   for candidate in buildCollectionsList()])
+
+  log.info('Reading Collections')
+  readCollections(collection_uuids)
+
+  if args.verbose:
+    pprint.pprint(CollectionInfo.all_by_uuid)
+
+  log.info('Reading Links')
+  readLinks()
+
+  reportMostPopularCollections()
+
+  log.info('Building Maps')
+  buildMaps()
+
+  reportBusiestUsers()
+
+  log.info('Getting Keep Servers')
+  global keep_servers
+  keep_servers = getKeepServers()
+
+  print keep_servers
+
+  log.info('Getting Blocks from each Keep Server.')
+  global keep_blocks
+  keep_blocks = getKeepBlocks(keep_servers)
+
+  log.info('Getting Stats from each Keep Server.')
+  global keep_stats, total_keep_space, free_keep_space
+  keep_stats = getKeepStats(keep_servers)
+
+  total_keep_space = sum(map(itemgetter(0), keep_stats))
+  free_keep_space = sum(map(itemgetter(1), keep_stats))
+
+  # TODO(misha): Delete this hack when the keep servers are fixed!
+  # This hack deals with the fact that keep servers report each other's disks.
+  total_keep_space /= len(keep_stats)
+  free_keep_space /= len(keep_stats)
+
+  log.info('Total disk space: %s, Free disk space: %s (%d%%).' %
+           (fileSizeFormat(total_keep_space),
+            fileSizeFormat(free_keep_space),
+            100*free_keep_space/total_keep_space))
+
+  computeReplication(keep_blocks)
+
+  log.info('average replication level is %f',
+           (float(sum(block_to_replication.values())) /
+            len(block_to_replication)))
+
+  computeGarbageCollectionCandidates()
+
+  if args.garbage_collection_file:
+    log.info('Writing garbage Collection report to %s',
+             args.garbage_collection_file)
+    outputGarbageCollectionReport(args.garbage_collection_file)
+
+  global garbage_collection_histogram
+  garbage_collection_histogram = computeGarbageCollectionHistogram()
+
+  if args.log_to_workbench:
+    logGarbageCollectionHistogram()
+
+  detectReplicationProblems()
+
+  computeUserStorageUsage()
+  printUserStorageUsage()
+  if args.log_to_workbench:
+    logUserStorageUsage()
+
+  global all_data_loaded
+  all_data_loaded = True
+
+
+class DataManagerHandler(BaseHTTPRequestHandler):
+  USER_PATH = 'user'
+  COLLECTION_PATH = 'collection'
+  BLOCK_PATH = 'block'
+
+  def userLink(self, uuid):
+    return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+            {'uuid': uuid,
+             'path': DataManagerHandler.USER_PATH})
+
+  def collectionLink(self, uuid):
+    return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+            {'uuid': uuid,
+             'path': DataManagerHandler.COLLECTION_PATH})
+
+  def blockLink(self, uuid):
+    return ('<A HREF="/%(path)s/%(uuid)s">%(uuid)s</A>' %
+            {'uuid': uuid,
+             'path': DataManagerHandler.BLOCK_PATH})
+
+  def writeTop(self, title):
+    self.wfile.write('<HTML><HEAD><TITLE>%s</TITLE></HEAD>\n<BODY>' % title)
+
+  def writeBottom(self):
+    self.wfile.write('</BODY></HTML>\n')
+
+  def writeHomePage(self):
+    self.send_response(200)
+    self.end_headers()
+    self.writeTop('Home')
+    self.wfile.write('<TABLE>')
+    self.wfile.write('<TR><TH>user'
+                     '<TH>unweighted readable block size'
+                     '<TH>weighted readable block size'
+                     '<TH>unweighted persisted block size'
+                     '<TH>weighted persisted block size</TR>\n')
+    for user, usage in user_to_usage.items():
+      self.wfile.write('<TR><TD>%s<TD>%s<TD>%s<TD>%s<TD>%s</TR>\n' %
+                       (self.userLink(user),
+                        fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+                        fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+                        fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+                        fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+    self.wfile.write('</TABLE>\n')
+    self.writeBottom()
+
+  def userExists(self, uuid):
+    # Currently this will return false for a user who exists but
+    # doesn't appear on any manifests.
+    # TODO(misha): Figure out if we need to fix this.
+    return user_to_usage.has_key(uuid)
+
+  def writeUserPage(self, uuid):
+    if not self.userExists(uuid):
+      self.send_error(404,
+                      'User (%s) Not Found.' % cgi.escape(uuid, quote=False))
+    else:
+      # Here we assume that since a user exists, they don't need to be
+      # html escaped.
+      self.send_response(200)
+      self.end_headers()
+      self.writeTop('User %s' % uuid)
+      self.wfile.write('<TABLE>')
+      self.wfile.write('<TR><TH>user'
+                       '<TH>unweighted readable block size'
+                       '<TH>weighted readable block size'
+                       '<TH>unweighted persisted block size'
+                       '<TH>weighted persisted block size</TR>\n')
+      usage = user_to_usage[uuid]
+      self.wfile.write('<TR><TD>%s<TD>%s<TD>%s<TD>%s<TD>%s</TR>\n' %
+                       (self.userLink(uuid),
+                        fileSizeFormat(usage[UNWEIGHTED_READ_SIZE_COL]),
+                        fileSizeFormat(usage[WEIGHTED_READ_SIZE_COL]),
+                        fileSizeFormat(usage[UNWEIGHTED_PERSIST_SIZE_COL]),
+                        fileSizeFormat(usage[WEIGHTED_PERSIST_SIZE_COL])))
+      self.wfile.write('</TABLE>\n')
+      self.wfile.write('<P>Persisting Collections: %s\n' %
+                       ', '.join(map(self.collectionLink,
+                                     persister_to_collections[uuid])))
+      self.wfile.write('<P>Reading Collections: %s\n' %
+                       ', '.join(map(self.collectionLink,
+                                     reader_to_collections[uuid])))
+      self.writeBottom()
+
+  def collectionExists(self, uuid):
+    return CollectionInfo.all_by_uuid.has_key(uuid)
+
+  def writeCollectionPage(self, uuid):
+    if not self.collectionExists(uuid):
+      self.send_error(404,
+                      'Collection (%s) Not Found.' % cgi.escape(uuid, quote=False))
+    else:
+      collection = CollectionInfo.get(uuid)
+      # Here we assume that since a collection exists, its id doesn't
+      # need to be html escaped.
+      self.send_response(200)
+      self.end_headers()
+      self.writeTop('Collection %s' % uuid)
+      self.wfile.write('<H1>Collection %s</H1>\n' % uuid)
+      self.wfile.write('<P>Total size %s (not factoring in replication).\n' %
+                       fileSizeFormat(collection.byteSize()))
+      self.wfile.write('<P>Readers: %s\n' %
+                       ', '.join(map(self.userLink, collection.reader_uuids)))
+
+      if len(collection.persister_replication) == 0:
+        self.wfile.write('<P>No persisters\n')
+      else:
+        replication_to_users = defaultdict(set)
+        for user,replication in collection.persister_replication.items():
+          replication_to_users[replication].add(user)
+        replication_levels = sorted(replication_to_users.keys())
+
+        self.wfile.write('<P>%d persisters in %d replication level(s) maxing '
+                         'out at %dx replication:\n' %
+                         (len(collection.persister_replication),
+                          len(replication_levels),
+                          replication_levels[-1]))
+
+        # TODO(misha): This code is used twice, let's move it to a method.
+        self.wfile.write('<TABLE><TR><TH>%s</TR>\n' %
+                         '<TH>'.join(['Replication Level ' + str(x)
+                                      for x in replication_levels]))
+        self.wfile.write('<TR>\n')
+        for replication_level in replication_levels:
+          users = replication_to_users[replication_level]
+          self.wfile.write('<TD valign="top">%s\n' % '<BR>\n'.join(
+              map(self.userLink, users)))
+        self.wfile.write('</TR></TABLE>\n')
+
+      replication_to_blocks = defaultdict(set)
+      for block in collection.block_uuids:
+        replication_to_blocks[block_to_replication[block]].add(block)
+      replication_levels = sorted(replication_to_blocks.keys())
+      self.wfile.write('<P>%d blocks in %d replication level(s):\n' %
+                       (len(collection.block_uuids), len(replication_levels)))
+      self.wfile.write('<TABLE><TR><TH>%s</TR>\n' %
+                       '<TH>'.join(['Replication Level ' + str(x)
+                                    for x in replication_levels]))
+      self.wfile.write('<TR>\n')
+      for replication_level in replication_levels:
+        blocks = replication_to_blocks[replication_level]
+        self.wfile.write('<TD valign="top">%s\n' % '<BR>\n'.join(blocks))
+      self.wfile.write('</TR></TABLE>\n')
+
+
+  def do_GET(self):
+    if not all_data_loaded:
+      self.send_error(503,
+                      'Sorry, but I am still loading all the data I need.')
+    else:
+      # Removing leading '/' and process request path
+      split_path = self.path[1:].split('/')
+      request_type = split_path[0]
+      log.debug('path (%s) split as %s with request_type %s' % (self.path,
+                                                                split_path,
+                                                                request_type))
+      if request_type == '':
+        self.writeHomePage()
+      elif request_type == DataManagerHandler.USER_PATH:
+        self.writeUserPage(split_path[1])
+      elif request_type == DataManagerHandler.COLLECTION_PATH:
+        self.writeCollectionPage(split_path[1])
+      else:
+        self.send_error(404, 'Unrecognized request path.')
+    return
+
+class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
+  """Handle requests in a separate thread."""
+
+
+if __name__ == '__main__':
+  args = parser.parse_args()
+
+  if args.port == 0:
+    loadAllData()
+  else:
+    loader = threading.Thread(target = loadAllData, name = 'loader')
+    loader.start()
+
+    server = ThreadedHTTPServer(('localhost', args.port), DataManagerHandler)
+    server.serve_forever()
diff --git a/services/datamanager/experimental/datamanager_test.py b/services/datamanager/experimental/datamanager_test.py
new file mode 100755 (executable)
index 0000000..0842c16
--- /dev/null
@@ -0,0 +1,41 @@
+#! /usr/bin/env python
+
+import datamanager
+import unittest
+
+class TestComputeWeightedReplicationCosts(unittest.TestCase):
+  def test_obvious(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([1,]),
+                     {1:1.0})
+
+  def test_simple(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([2,]),
+                     {2:2.0})
+
+  def test_even_split(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([1,1]),
+                     {1:0.5})
+
+  def test_even_split_bigger(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([2,2]),
+                     {2:1.0})
+
+  def test_uneven_split(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([1,2]),
+                     {1:0.5, 2:1.5})
+
+  def test_uneven_split_bigger(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([1,3]),
+                     {1:0.5, 3:2.5})
+
+  def test_uneven_split_jumble(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([1,3,6,6,10]),
+                     {1:0.2, 3:0.7, 6:1.7, 10:5.7})
+
+  def test_documentation_example(self):
+    self.assertEqual(datamanager.computeWeightedReplicationCosts([1,1,3,6,6]),
+                     {1:0.2, 3: 0.2 + 2.0 / 3, 6: 0.2 + 2.0 / 3 + 1.5})
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/services/fuse/.gitignore b/services/fuse/.gitignore
new file mode 120000 (symlink)
index 0000000..ed3b362
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/.gitignore
\ No newline at end of file
diff --git a/services/fuse/MANIFEST.in b/services/fuse/MANIFEST.in
new file mode 100644 (file)
index 0000000..9561fb1
--- /dev/null
@@ -0,0 +1 @@
+include README.rst
diff --git a/services/fuse/README.rst b/services/fuse/README.rst
new file mode 100644 (file)
index 0000000..d9a9a07
--- /dev/null
@@ -0,0 +1,62 @@
+========================
+Arvados Keep FUSE Driver
+========================
+
+Overview
+--------
+
+This package provides a FUSE driver for Keep, the Arvados_ storage
+system.  It allows you to read data from your collections as if they
+were on the local filesystem.
+
+.. _Arvados: https://arvados.org/
+
+Installation
+------------
+
+Installing under your user account
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This method lets you install the package without root access.
+However, other users on the same system won't be able to use it.
+
+1. Run ``pip install --user arvados_fuse``.
+
+2. In your shell configuration, make sure you add ``$HOME/.local/bin``
+   to your PATH environment variable.  For example, you could add the
+   command ``PATH=$PATH:$HOME/.local/bin`` to your ``.bashrc`` file.
+
+3. Reload your shell configuration.  For example, bash users could run
+   ``source ~/.bashrc``.
+
+Installing on Debian systems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1. Add this Arvados repository to your sources list::
+
+     deb http://apt.arvados.org/ wheezy main
+
+2. Update your package list.
+
+3. Install the ``python-arvados-fuse`` package.
+
+Configuration
+-------------
+
+This driver needs two pieces of information to connect to
+Arvados: the DNS name of the API server, and an API authorization
+token.  You can set these in environment variables, or the file
+``$HOME/.config/arvados/settings.conf``.  `The Arvados user
+documentation
+<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes
+how to find this information in the Arvados Workbench, and install it
+on your system.
+
+Testing and Development
+-----------------------
+
+This package is one part of the Arvados source package, and it has
+integration tests to check interoperability with other Arvados
+components.  Our `hacking guide
+<https://arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_
+describes how to set up a development environment and run tests.
diff --git a/services/fuse/arvados_fuse/__init__.py b/services/fuse/arvados_fuse/__init__.py
new file mode 100644 (file)
index 0000000..dfdd312
--- /dev/null
@@ -0,0 +1,923 @@
+#
+# FUSE driver for Arvados Keep
+#
+
+import os
+import sys
+import llfuse
+import errno
+import stat
+import threading
+import arvados
+import pprint
+import arvados.events
+import re
+import apiclient
+import json
+import logging
+import time
+import _strptime
+import calendar
+import threading
+import itertools
+
+from arvados.util import portable_data_hash_pattern, uuid_pattern, collection_uuid_pattern, group_uuid_pattern, user_uuid_pattern, link_uuid_pattern
+
+_logger = logging.getLogger('arvados.arvados_fuse')
+
+# Match any character which FUSE or Linux cannot accommodate as part
+# of a filename. (If present in a collection filename, they will
+# appear as underscores in the fuse mount.)
+_disallowed_filename_characters = re.compile('[\x00/]')
+
+class SafeApi(object):
+    '''Threadsafe wrapper for API object.  This stores and returns a different api
+    object per thread, because httplib2 which underlies apiclient is not
+    threadsafe.
+    '''
+
+    def __init__(self, config):
+        self.host = config.get('ARVADOS_API_HOST')
+        self.api_token = config.get('ARVADOS_API_TOKEN')
+        self.insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')
+        self.local = threading.local()
+        self.block_cache = arvados.KeepBlockCache()
+
+    def localapi(self):
+        if 'api' not in self.local.__dict__:
+            self.local.api = arvados.api('v1', False, self.host,
+                                         self.api_token, self.insecure)
+        return self.local.api
+
+    def localkeep(self):
+        if 'keep' not in self.local.__dict__:
+            self.local.keep = arvados.KeepClient(api_client=self.localapi(), block_cache=self.block_cache)
+        return self.local.keep
+
+    def __getattr__(self, name):
+        # Proxy nonexistent attributes to the local API client.
+        try:
+            return getattr(self.localapi(), name)
+        except AttributeError:
+            return super(SafeApi, self).__getattr__(name)
+
+
+def convertTime(t):
+    '''Parse Arvados timestamp to unix time.'''
+    try:
+        return calendar.timegm(time.strptime(t, "%Y-%m-%dT%H:%M:%SZ"))
+    except (TypeError, ValueError):
+        return 0
+
+def sanitize_filename(dirty):
+    '''Replace disallowed filename characters with harmless "_".'''
+    if dirty is None:
+        return None
+    elif dirty == '':
+        return '_'
+    elif dirty == '.':
+        return '_'
+    elif dirty == '..':
+        return '__'
+    else:
+        return _disallowed_filename_characters.sub('_', dirty)
+
+
+class FreshBase(object):
+    '''Base class for maintaining fresh/stale state to determine when to update.'''
+    def __init__(self):
+        self._stale = True
+        self._poll = False
+        self._last_update = time.time()
+        self._atime = time.time()
+        self._poll_time = 60
+
+    # Mark the value as stale
+    def invalidate(self):
+        self._stale = True
+
+    # Test if the entries dict is stale.
+    def stale(self):
+        if self._stale:
+            return True
+        if self._poll:
+            return (self._last_update + self._poll_time) < self._atime
+        return False
+
+    def fresh(self):
+        self._stale = False
+        self._last_update = time.time()
+
+    def atime(self):
+        return self._atime
+
+class File(FreshBase):
+    '''Base for file objects.'''
+
+    def __init__(self, parent_inode, _mtime=0):
+        super(File, self).__init__()
+        self.inode = None
+        self.parent_inode = parent_inode
+        self._mtime = _mtime
+
+    def size(self):
+        return 0
+
+    def readfrom(self, off, size):
+        return ''
+
+    def mtime(self):
+        return self._mtime
+
+
+class StreamReaderFile(File):
+    '''Wraps a StreamFileReader as a file.'''
+
+    def __init__(self, parent_inode, reader, _mtime):
+        super(StreamReaderFile, self).__init__(parent_inode, _mtime)
+        self.reader = reader
+
+    def size(self):
+        return self.reader.size()
+
+    def readfrom(self, off, size):
+        return self.reader.readfrom(off, size)
+
+    def stale(self):
+        return False
+
+
+class StringFile(File):
+    '''Wrap a simple string as a file'''
+    def __init__(self, parent_inode, contents, _mtime):
+        super(StringFile, self).__init__(parent_inode, _mtime)
+        self.contents = contents
+
+    def size(self):
+        return len(self.contents)
+
+    def readfrom(self, off, size):
+        return self.contents[off:(off+size)]
+
+
+class ObjectFile(StringFile):
+    '''Wrap a dict as a serialized json object.'''
+
+    def __init__(self, parent_inode, obj):
+        super(ObjectFile, self).__init__(parent_inode, "", 0)
+        self.uuid = obj['uuid']
+        self.update(obj)
+
+    def update(self, obj):
+        self._mtime = convertTime(obj['modified_at']) if 'modified_at' in obj else 0
+        self.contents = json.dumps(obj, indent=4, sort_keys=True) + "\n"
+
+
+class Directory(FreshBase):
+    '''Generic directory object, backed by a dict.
+    Consists of a set of entries with the key representing the filename
+    and the value referencing a File or Directory object.
+    '''
+
+    def __init__(self, parent_inode):
+        super(Directory, self).__init__()
+
+        '''parent_inode is the integer inode number'''
+        self.inode = None
+        if not isinstance(parent_inode, int):
+            raise Exception("parent_inode should be an int")
+        self.parent_inode = parent_inode
+        self._entries = {}
+        self._mtime = time.time()
+
+    #  Overriden by subclasses to implement logic to update the entries dict
+    #  when the directory is stale
+    def update(self):
+        pass
+
+    # Only used when computing the size of the disk footprint of the directory
+    # (stub)
+    def size(self):
+        return 0
+
+    def checkupdate(self):
+        if self.stale():
+            try:
+                self.update()
+            except apiclient.errors.HttpError as e:
+                _logger.debug(e)
+
+    def __getitem__(self, item):
+        self.checkupdate()
+        return self._entries[item]
+
+    def items(self):
+        self.checkupdate()
+        return self._entries.items()
+
+    def __iter__(self):
+        self.checkupdate()
+        return self._entries.iterkeys()
+
+    def __contains__(self, k):
+        self.checkupdate()
+        return k in self._entries
+
+    def merge(self, items, fn, same, new_entry):
+        '''Helper method for updating the contents of the directory.  Takes a list
+        describing the new contents of the directory, reuse entries that are
+        the same in both the old and new lists, create new entries, and delete
+        old entries missing from the new list.
+
+        items: iterable with new directory contents
+
+        fn: function to take an entry in 'items' and return the desired file or
+        directory name, or None if this entry should be skipped
+
+        same: function to compare an existing entry (a File or Directory
+        object) with an entry in the items list to determine whether to keep
+        the existing entry.
+
+        new_entry: function to create a new directory entry (File or Directory
+        object) from an entry in the items list.
+
+        '''
+
+        oldentries = self._entries
+        self._entries = {}
+        changed = False
+        for i in items:
+            name = sanitize_filename(fn(i))
+            if name:
+                if name in oldentries and same(oldentries[name], i):
+                    # move existing directory entry over
+                    self._entries[name] = oldentries[name]
+                    del oldentries[name]
+                else:
+                    # create new directory entry
+                    ent = new_entry(i)
+                    if ent is not None:
+                        self._entries[name] = self.inodes.add_entry(ent)
+                        changed = True
+
+        # delete any other directory entries that were not in found in 'items'
+        for i in oldentries:
+            llfuse.invalidate_entry(self.inode, str(i))
+            self.inodes.del_entry(oldentries[i])
+            changed = True
+
+        if changed:
+            self._mtime = time.time()
+
+        self.fresh()
+
+    def clear(self):
+        '''Delete all entries'''
+        oldentries = self._entries
+        self._entries = {}
+        for n in oldentries:
+            if isinstance(n, Directory):
+                n.clear()
+            llfuse.invalidate_entry(self.inode, str(n))
+            self.inodes.del_entry(oldentries[n])
+        self.invalidate()
+
+    def mtime(self):
+        return self._mtime
+
+
+class CollectionDirectory(Directory):
+    '''Represents the root of a directory tree holding a collection.'''
+
+    def __init__(self, parent_inode, inodes, api, num_retries, collection):
+        super(CollectionDirectory, self).__init__(parent_inode)
+        self.inodes = inodes
+        self.api = api
+        self.num_retries = num_retries
+        self.collection_object_file = None
+        self.collection_object = None
+        if isinstance(collection, dict):
+            self.collection_locator = collection['uuid']
+        else:
+            self.collection_locator = collection
+
+    def same(self, i):
+        return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
+
+    def new_collection(self, new_collection_object, coll_reader):
+        self.collection_object = new_collection_object
+
+        if self.collection_object_file is not None:
+            self.collection_object_file.update(self.collection_object)
+
+        self.clear()
+        for s in coll_reader.all_streams():
+            cwd = self
+            for part in s.name().split('/'):
+                if part != '' and part != '.':
+                    partname = sanitize_filename(part)
+                    if partname not in cwd._entries:
+                        cwd._entries[partname] = self.inodes.add_entry(Directory(cwd.inode))
+                    cwd = cwd._entries[partname]
+            for k, v in s.files().items():
+                cwd._entries[sanitize_filename(k)] = self.inodes.add_entry(StreamReaderFile(cwd.inode, v, self.mtime()))
+
+    def update(self):
+        try:
+            if self.collection_object is not None and portable_data_hash_pattern.match(self.collection_locator):
+                return True
+
+            with llfuse.lock_released:
+                coll_reader = arvados.CollectionReader(
+                    self.collection_locator, self.api, self.api.localkeep(),
+                    num_retries=self.num_retries)
+                new_collection_object = coll_reader.api_response() or {}
+                # If the Collection only exists in Keep, there will be no API
+                # response.  Fill in the fields we need.
+                if 'uuid' not in new_collection_object:
+                    new_collection_object['uuid'] = self.collection_locator
+                if "portable_data_hash" not in new_collection_object:
+                    new_collection_object["portable_data_hash"] = new_collection_object["uuid"]
+                if 'manifest_text' not in new_collection_object:
+                    new_collection_object['manifest_text'] = coll_reader.manifest_text()
+                coll_reader.normalize()
+            # end with llfuse.lock_released, re-acquire lock
+
+            if self.collection_object is None or self.collection_object["portable_data_hash"] != new_collection_object["portable_data_hash"]:
+                self.new_collection(new_collection_object, coll_reader)
+
+            self.fresh()
+            return True
+        except apiclient.errors.NotFoundError:
+            _logger.exception("arv-mount %s: error", self.collection_locator)
+        except arvados.errors.ArgumentError as detail:
+            _logger.warning("arv-mount %s: error %s", self.collection_locator, detail)
+            if self.collection_object is not None and "manifest_text" in self.collection_object:
+                _logger.warning("arv-mount manifest_text is: %s", self.collection_object["manifest_text"])
+        except Exception:
+            _logger.exception("arv-mount %s: error", self.collection_locator)
+            if self.collection_object is not None and "manifest_text" in self.collection_object:
+                _logger.error("arv-mount manifest_text is: %s", self.collection_object["manifest_text"])
+        return False
+
+    def __getitem__(self, item):
+        self.checkupdate()
+        if item == '.arvados#collection':
+            if self.collection_object_file is None:
+                self.collection_object_file = ObjectFile(self.inode, self.collection_object)
+                self.inodes.add_entry(self.collection_object_file)
+            return self.collection_object_file
+        else:
+            return super(CollectionDirectory, self).__getitem__(item)
+
+    def __contains__(self, k):
+        if k == '.arvados#collection':
+            return True
+        else:
+            return super(CollectionDirectory, self).__contains__(k)
+
+    def mtime(self):
+        self.checkupdate()
+        return convertTime(self.collection_object["modified_at"]) if self.collection_object is not None and 'modified_at' in self.collection_object else 0
+
+
+class MagicDirectory(Directory):
+    '''A special directory that logically contains the set of all extant keep
+    locators.  When a file is referenced by lookup(), it is tested to see if it
+    is a valid keep locator to a manifest, and if so, loads the manifest
+    contents as a subdirectory of this directory with the locator as the
+    directory name.  Since querying a list of all extant keep locators is
+    impractical, only collections that have already been accessed are visible
+    to readdir().
+    '''
+
+    README_TEXT = '''
+This directory provides access to Arvados collections as subdirectories listed
+by uuid (in the form 'zzzzz-4zz18-1234567890abcde') or portable data hash (in
+the form '1234567890abcdefghijklmnopqrstuv+123').
+
+Note that this directory will appear empty until you attempt to access a
+specific collection subdirectory (such as trying to 'cd' into it), at which
+point the collection will actually be looked up on the server and the directory
+will appear if it exists.
+'''.lstrip()
+
+    def __init__(self, parent_inode, inodes, api, num_retries):
+        super(MagicDirectory, self).__init__(parent_inode)
+        self.inodes = inodes
+        self.api = api
+        self.num_retries = num_retries
+
+    def __setattr__(self, name, value):
+        super(MagicDirectory, self).__setattr__(name, value)
+        # When we're assigned an inode, add a README.
+        if ((name == 'inode') and (self.inode is not None) and
+              (not self._entries)):
+            self._entries['README'] = self.inodes.add_entry(
+                StringFile(self.inode, self.README_TEXT, time.time()))
+            # If we're the root directory, add an identical by_id subdirectory.
+            if self.inode == llfuse.ROOT_INODE:
+                self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
+                        self.inode, self.inodes, self.api, self.num_retries))
+
+    def __contains__(self, k):
+        if k in self._entries:
+            return True
+
+        if not portable_data_hash_pattern.match(k) and not uuid_pattern.match(k):
+            return False
+
+        try:
+            e = self.inodes.add_entry(CollectionDirectory(
+                    self.inode, self.inodes, self.api, self.num_retries, k))
+            if e.update():
+                self._entries[k] = e
+                return True
+            else:
+                return False
+        except Exception as e:
+            _logger.debug('arv-mount exception keep %s', e)
+            return False
+
+    def __getitem__(self, item):
+        if item in self:
+            return self._entries[item]
+        else:
+            raise KeyError("No collection with id " + item)
+
+
+class RecursiveInvalidateDirectory(Directory):
+    def invalidate(self):
+        if self.inode == llfuse.ROOT_INODE:
+            llfuse.lock.acquire()
+        try:
+            super(RecursiveInvalidateDirectory, self).invalidate()
+            for a in self._entries:
+                self._entries[a].invalidate()
+        except Exception:
+            _logger.exception()
+        finally:
+            if self.inode == llfuse.ROOT_INODE:
+                llfuse.lock.release()
+
+
+class TagsDirectory(RecursiveInvalidateDirectory):
+    '''A special directory that contains as subdirectories all tags visible to the user.'''
+
+    def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
+        super(TagsDirectory, self).__init__(parent_inode)
+        self.inodes = inodes
+        self.api = api
+        self.num_retries = num_retries
+        self._poll = True
+        self._poll_time = poll_time
+
+    def update(self):
+        with llfuse.lock_released:
+            tags = self.api.links().list(
+                filters=[['link_class', '=', 'tag']],
+                select=['name'], distinct=True
+                ).execute(num_retries=self.num_retries)
+        if "items" in tags:
+            self.merge(tags['items'],
+                       lambda i: i['name'] if 'name' in i else i['uuid'],
+                       lambda a, i: a.tag == i,
+                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+
+
+class TagDirectory(Directory):
+    '''A special directory that contains as subdirectories all collections visible
+    to the user that are tagged with a particular tag.
+    '''
+
+    def __init__(self, parent_inode, inodes, api, num_retries, tag,
+                 poll=False, poll_time=60):
+        super(TagDirectory, self).__init__(parent_inode)
+        self.inodes = inodes
+        self.api = api
+        self.num_retries = num_retries
+        self.tag = tag
+        self._poll = poll
+        self._poll_time = poll_time
+
+    def update(self):
+        with llfuse.lock_released:
+            taggedcollections = self.api.links().list(
+                filters=[['link_class', '=', 'tag'],
+                         ['name', '=', self.tag],
+                         ['head_uuid', 'is_a', 'arvados#collection']],
+                select=['head_uuid']
+                ).execute(num_retries=self.num_retries)
+        self.merge(taggedcollections['items'],
+                   lambda i: i['head_uuid'],
+                   lambda a, i: a.collection_locator == i['head_uuid'],
+                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid']))
+
+
+class ProjectDirectory(Directory):
+    '''A special directory that contains the contents of a project.'''
+
+    def __init__(self, parent_inode, inodes, api, num_retries, project_object,
+                 poll=False, poll_time=60):
+        super(ProjectDirectory, self).__init__(parent_inode)
+        self.inodes = inodes
+        self.api = api
+        self.num_retries = num_retries
+        self.project_object = project_object
+        self.project_object_file = None
+        self.uuid = project_object['uuid']
+        self._poll = poll
+        self._poll_time = poll_time
+
+    def createDirectory(self, i):
+        if collection_uuid_pattern.match(i['uuid']):
+            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i)
+        elif group_uuid_pattern.match(i['uuid']):
+            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i, self._poll, self._poll_time)
+        elif link_uuid_pattern.match(i['uuid']):
+            if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
+                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid'])
+            else:
+                return None
+        elif uuid_pattern.match(i['uuid']):
+            return ObjectFile(self.parent_inode, i)
+        else:
+            return None
+
+    def update(self):
+        if self.project_object_file == None:
+            self.project_object_file = ObjectFile(self.inode, self.project_object)
+            self.inodes.add_entry(self.project_object_file)
+
+        def namefn(i):
+            if 'name' in i:
+                if i['name'] is None or len(i['name']) == 0:
+                    return None
+                elif collection_uuid_pattern.match(i['uuid']) or group_uuid_pattern.match(i['uuid']):
+                    # collection or subproject
+                    return i['name']
+                elif link_uuid_pattern.match(i['uuid']) and i['head_kind'] == 'arvados#collection':
+                    # name link
+                    return i['name']
+                elif 'kind' in i and i['kind'].startswith('arvados#'):
+                    # something else
+                    return "{}.{}".format(i['name'], i['kind'][8:])
+            else:
+                return None
+
+        def samefn(a, i):
+            if isinstance(a, CollectionDirectory):
+                return a.collection_locator == i['uuid']
+            elif isinstance(a, ProjectDirectory):
+                return a.uuid == i['uuid']
+            elif isinstance(a, ObjectFile):
+                return a.uuid == i['uuid'] and not a.stale()
+            return False
+
+        with llfuse.lock_released:
+            if group_uuid_pattern.match(self.uuid):
+                self.project_object = self.api.groups().get(
+                    uuid=self.uuid).execute(num_retries=self.num_retries)
+            elif user_uuid_pattern.match(self.uuid):
+                self.project_object = self.api.users().get(
+                    uuid=self.uuid).execute(num_retries=self.num_retries)
+
+            contents = arvados.util.list_all(self.api.groups().contents,
+                                             self.num_retries, uuid=self.uuid)
+            # Name links will be obsolete soon, take this out when there are no more pre-#3036 in use.
+            contents += arvados.util.list_all(
+                self.api.links().list, self.num_retries,
+                filters=[['tail_uuid', '=', self.uuid],
+                         ['link_class', '=', 'name']])
+
+        # end with llfuse.lock_released, re-acquire lock
+
+        self.merge(contents,
+                   namefn,
+                   samefn,
+                   self.createDirectory)
+
+    def __getitem__(self, item):
+        self.checkupdate()
+        if item == '.arvados#project':
+            return self.project_object_file
+        else:
+            return super(ProjectDirectory, self).__getitem__(item)
+
+    def __contains__(self, k):
+        if k == '.arvados#project':
+            return True
+        else:
+            return super(ProjectDirectory, self).__contains__(k)
+
+
+class SharedDirectory(Directory):
+    '''A special directory that represents users or groups who have shared projects with me.'''
+
+    def __init__(self, parent_inode, inodes, api, num_retries, exclude,
+                 poll=False, poll_time=60):
+        super(SharedDirectory, self).__init__(parent_inode)
+        self.inodes = inodes
+        self.api = api
+        self.num_retries = num_retries
+        self.current_user = api.users().current().execute(num_retries=num_retries)
+        self._poll = True
+        self._poll_time = poll_time
+
+    def update(self):
+        with llfuse.lock_released:
+            all_projects = arvados.util.list_all(
+                self.api.groups().list, self.num_retries,
+                filters=[['group_class','=','project']])
+            objects = {}
+            for ob in all_projects:
+                objects[ob['uuid']] = ob
+
+            roots = []
+            root_owners = {}
+            for ob in all_projects:
+                if ob['owner_uuid'] != self.current_user['uuid'] and ob['owner_uuid'] not in objects:
+                    roots.append(ob)
+                    root_owners[ob['owner_uuid']] = True
+
+            lusers = arvados.util.list_all(
+                self.api.users().list, self.num_retries,
+                filters=[['uuid','in', list(root_owners)]])
+            lgroups = arvados.util.list_all(
+                self.api.groups().list, self.num_retries,
+                filters=[['uuid','in', list(root_owners)]])
+
+            users = {}
+            groups = {}
+
+            for l in lusers:
+                objects[l["uuid"]] = l
+            for l in lgroups:
+                objects[l["uuid"]] = l
+
+            contents = {}
+            for r in root_owners:
+                if r in objects:
+                    obr = objects[r]
+                    if "name" in obr:
+                        contents[obr["name"]] = obr
+                    if "first_name" in obr:
+                        contents[u"{} {}".format(obr["first_name"], obr["last_name"])] = obr
+
+            for r in roots:
+                if r['owner_uuid'] not in objects:
+                    contents[r['name']] = r
+
+        # end with llfuse.lock_released, re-acquire lock
+
+        try:
+            self.merge(contents.items(),
+                       lambda i: i[0],
+                       lambda a, i: a.uuid == i[1]['uuid'],
+                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time))
+        except Exception:
+            _logger.exception()
+
+
+class FileHandle(object):
+    '''Connects a numeric file handle to a File or Directory object that has
+    been opened by the client.'''
+
+    def __init__(self, fh, entry):
+        self.fh = fh
+        self.entry = entry
+
+
+class Inodes(object):
+    '''Manage the set of inodes.  This is the mapping from a numeric id
+    to a concrete File or Directory object'''
+
+    def __init__(self):
+        self._entries = {}
+        self._counter = itertools.count(llfuse.ROOT_INODE)
+
+    def __getitem__(self, item):
+        return self._entries[item]
+
+    def __setitem__(self, key, item):
+        self._entries[key] = item
+
+    def __iter__(self):
+        return self._entries.iterkeys()
+
+    def items(self):
+        return self._entries.items()
+
+    def __contains__(self, k):
+        return k in self._entries
+
+    def add_entry(self, entry):
+        entry.inode = next(self._counter)
+        self._entries[entry.inode] = entry
+        return entry
+
+    def del_entry(self, entry):
+        llfuse.invalidate_inode(entry.inode)
+        del self._entries[entry.inode]
+
+class Operations(llfuse.Operations):
+    '''This is the main interface with llfuse.  The methods on this object are
+    called by llfuse threads to service FUSE events to query and read from
+    the file system.
+
+    llfuse has its own global lock which is acquired before calling a request handler,
+    so request handlers do not run concurrently unless the lock is explicitly released
+    using "with llfuse.lock_released:"'''
+
+    def __init__(self, uid, gid, encoding="utf-8", set_executable_bit=False):
+        super(Operations, self).__init__()
+
+        self.inodes = Inodes()
+        self.uid = uid
+        self.gid = gid
+        self.encoding = encoding
+        self.set_executable_bit = set_executable_bit
+
+        # dict of inode to filehandle
+        self._filehandles = {}
+        self._filehandles_counter = 1
+
+        # Other threads that need to wait until the fuse driver
+        # is fully initialized should wait() on this event object.
+        self.initlock = threading.Event()
+
+    def init(self):
+        # Allow threads that are waiting for the driver to be finished
+        # initializing to continue
+        self.initlock.set()
+
+    def access(self, inode, mode, ctx):
+        return True
+
+    def getattr(self, inode):
+        if inode not in self.inodes:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+        e = self.inodes[inode]
+
+        entry = llfuse.EntryAttributes()
+        entry.st_ino = inode
+        entry.generation = 0
+        entry.entry_timeout = 300
+        entry.attr_timeout = 300
+
+        entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+        if isinstance(e, Directory):
+            entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR
+        elif isinstance(e, StreamReaderFile) and self.set_executable_bit:
+            entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFREG
+        else:
+            entry.st_mode |= stat.S_IFREG
+
+        entry.st_nlink = 1
+        entry.st_uid = self.uid
+        entry.st_gid = self.gid
+        entry.st_rdev = 0
+
+        entry.st_size = e.size()
+
+        entry.st_blksize = 512
+        entry.st_blocks = (e.size()/512)+1
+        entry.st_atime = int(e.atime())
+        entry.st_mtime = int(e.mtime())
+        entry.st_ctime = int(e.mtime())
+
+        return entry
+
+    def lookup(self, parent_inode, name):
+        name = unicode(name, self.encoding)
+        _logger.debug("arv-mount lookup: parent_inode %i name %s",
+                      parent_inode, name)
+        inode = None
+
+        if name == '.':
+            inode = parent_inode
+        else:
+            if parent_inode in self.inodes:
+                p = self.inodes[parent_inode]
+                if name == '..':
+                    inode = p.parent_inode
+                elif isinstance(p, Directory) and name in p:
+                    inode = p[name].inode
+
+        if inode != None:
+            return self.getattr(inode)
+        else:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+    def open(self, inode, flags):
+        if inode in self.inodes:
+            p = self.inodes[inode]
+        else:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+        if (flags & os.O_WRONLY) or (flags & os.O_RDWR):
+            raise llfuse.FUSEError(errno.EROFS)
+
+        if isinstance(p, Directory):
+            raise llfuse.FUSEError(errno.EISDIR)
+
+        fh = self._filehandles_counter
+        self._filehandles_counter += 1
+        self._filehandles[fh] = FileHandle(fh, p)
+        return fh
+
+    def read(self, fh, off, size):
+        _logger.debug("arv-mount read %i %i %i", fh, off, size)
+        if fh in self._filehandles:
+            handle = self._filehandles[fh]
+        else:
+            raise llfuse.FUSEError(errno.EBADF)
+
+        # update atime
+        handle.entry._atime = time.time()
+
+        try:
+            with llfuse.lock_released:
+                return handle.entry.readfrom(off, size)
+        except arvados.errors.NotFoundError as e:
+            _logger.warning("Block not found: " + str(e))
+            raise llfuse.FUSEError(errno.EIO)
+        except Exception:
+            _logger.exception()
+            raise llfuse.FUSEError(errno.EIO)
+
+    def release(self, fh):
+        if fh in self._filehandles:
+            del self._filehandles[fh]
+
+    def opendir(self, inode):
+        _logger.debug("arv-mount opendir: inode %i", inode)
+
+        if inode in self.inodes:
+            p = self.inodes[inode]
+        else:
+            raise llfuse.FUSEError(errno.ENOENT)
+
+        if not isinstance(p, Directory):
+            raise llfuse.FUSEError(errno.ENOTDIR)
+
+        fh = self._filehandles_counter
+        self._filehandles_counter += 1
+        if p.parent_inode in self.inodes:
+            parent = self.inodes[p.parent_inode]
+        else:
+            raise llfuse.FUSEError(errno.EIO)
+
+        # update atime
+        p._atime = time.time()
+
+        self._filehandles[fh] = FileHandle(fh, [('.', p), ('..', parent)] + list(p.items()))
+        return fh
+
+    def readdir(self, fh, off):
+        _logger.debug("arv-mount readdir: fh %i off %i", fh, off)
+
+        if fh in self._filehandles:
+            handle = self._filehandles[fh]
+        else:
+            raise llfuse.FUSEError(errno.EBADF)
+
+        _logger.debug("arv-mount handle.entry %s", handle.entry)
+
+        e = off
+        while e < len(handle.entry):
+            if handle.entry[e][1].inode in self.inodes:
+                try:
+                    yield (handle.entry[e][0].encode(self.encoding), self.getattr(handle.entry[e][1].inode), e+1)
+                except UnicodeEncodeError:
+                    pass
+            e += 1
+
+    def releasedir(self, fh):
+        del self._filehandles[fh]
+
+    def statfs(self):
+        st = llfuse.StatvfsData()
+        st.f_bsize = 64 * 1024
+        st.f_blocks = 0
+        st.f_files = 0
+
+        st.f_bfree = 0
+        st.f_bavail = 0
+
+        st.f_ffree = 0
+        st.f_favail = 0
+
+        st.f_frsize = 0
+        return st
+
+    # The llfuse documentation recommends only overloading functions that
+    # are actually implemented, as the default implementation will raise ENOSYS.
+    # However, there is a bug in the llfuse default implementation of create()
+    # "create() takes exactly 5 positional arguments (6 given)" which will crash
+    # arv-mount.
+    # The workaround is to implement it with the proper number of parameters,
+    # and then everything works out.
+    def create(self, p1, p2, p3, p4, p5):
+        raise llfuse.FUSEError(errno.EROFS)
diff --git a/services/fuse/bin/arv-mount b/services/fuse/bin/arv-mount
new file mode 100755 (executable)
index 0000000..c047205
--- /dev/null
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+
+import argparse
+import arvados
+import daemon
+import logging
+import os
+import signal
+import subprocess
+import time
+
+import arvados.commands._util as arv_cmd
+from arvados_fuse import *
+
+logger = logging.getLogger('arvados.arv-mount')
+
+if __name__ == '__main__':
+    # Handle command line parameters
+    parser = argparse.ArgumentParser(
+        parents=[arv_cmd.retry_opt],
+        description='''Mount Keep data under the local filesystem.  Default mode is --home''',
+        epilog="""
+Note: When using the --exec feature, you must either specify the
+mountpoint before --exec, or mark the end of your --exec arguments
+with "--".
+""")
+    parser.add_argument('mountpoint', type=str, help="""Mount point.""")
+    parser.add_argument('--allow-other', action='store_true',
+                        help="""Let other users read the mount""")
+
+    mount_mode = parser.add_mutually_exclusive_group()
+
+    mount_mode.add_argument('--all', action='store_true', help="""Mount a subdirectory for each mode: home, shared, by_tag, by_id (default).""")
+    mount_mode.add_argument('--home', action='store_true', help="""Mount only the user's home project.""")
+    mount_mode.add_argument('--shared', action='store_true', help="""Mount only list of projects shared with the user.""")
+    mount_mode.add_argument('--by-tag', action='store_true',
+                            help="""Mount subdirectories listed by tag.""")
+    mount_mode.add_argument('--by-id', action='store_true',
+                            help="""Mount subdirectories listed by portable data hash or uuid.""")
+    mount_mode.add_argument('--project', type=str, help="""Mount a specific project.""")
+    mount_mode.add_argument('--collection', type=str, help="""Mount only the specified collection.""")
+
+    parser.add_argument('--debug', action='store_true', help="""Debug mode""")
+    parser.add_argument('--logfile', help="""Write debug logs and errors to the specified file (default stderr).""")
+    parser.add_argument('--foreground', action='store_true', help="""Run in foreground (default is to daemonize unless --exec specified)""", default=False)
+    parser.add_argument('--encoding', type=str, help="Character encoding to use for filesystem, default is utf-8 (see Python codec registry for list of available encodings)", default="utf-8")
+    parser.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
+                        dest="exec_args", metavar=('command', 'args', '...', '--'),
+                        help="""Mount, run a command, then unmount and exit""")
+    parser.add_argument('--set-executable-bit', action='store_true', help="""Set executable bit on collection files""")
+
+    args = parser.parse_args()
+    args.mountpoint = os.path.realpath(args.mountpoint)
+    if args.logfile:
+        args.logfile = os.path.realpath(args.logfile)
+
+    # Daemonize as early as possible, so we don't accidentally close
+    # file descriptors we're using.
+    if not (args.exec_args or args.foreground):
+        os.chdir(args.mountpoint)
+        daemon_ctx = daemon.DaemonContext(working_directory='.')
+        daemon_ctx.open()
+    else:
+        daemon_ctx = None
+
+    # Configure a log handler based on command-line switches.
+    if args.logfile:
+        log_handler = logging.FileHandler(args.logfile)
+    elif daemon_ctx:
+        log_handler = logging.NullHandler()
+    else:
+        log_handler = None
+
+    if log_handler is not None:
+        arvados.logger.removeHandler(arvados.log_handler)
+        arvados.logger.addHandler(log_handler)
+
+    if args.debug:
+        arvados.logger.setLevel(logging.DEBUG)
+        logger.debug("arv-mount debugging enabled")
+
+    try:
+        # Create the request handler
+        operations = Operations(os.getuid(), os.getgid(), args.encoding, args.set_executable_bit)
+        api = SafeApi(arvados.config)
+
+        usr = api.users().current().execute(num_retries=args.retries)
+        now = time.time()
+        dir_class = None
+        dir_args = [llfuse.ROOT_INODE, operations.inodes, api, args.retries]
+        if args.by_id:
+            # Set up the request handler with the 'magic directory' at the root
+            dir_class = MagicDirectory
+        elif args.by_tag:
+            dir_class = TagsDirectory
+        elif args.shared:
+            dir_class = SharedDirectory
+            dir_args.append(usr)
+        elif args.home:
+            dir_class = ProjectDirectory
+            dir_args.append(usr)
+            dir_args.append(True)
+        elif args.collection is not None:
+            # Set up the request handler with the collection at the root
+            dir_class = CollectionDirectory
+            dir_args.append(args.collection)
+        elif args.project is not None:
+            dir_class = ProjectDirectory
+            dir_args.append(api.groups().get(uuid=args.project).execute(
+                    num_retries=args.retries))
+
+        if dir_class is not None:
+            operations.inodes.add_entry(dir_class(*dir_args))
+        else:
+            e = operations.inodes.add_entry(Directory(llfuse.ROOT_INODE))
+            dir_args[0] = e.inode
+
+            e._entries['by_id'] = operations.inodes.add_entry(MagicDirectory(*dir_args))
+            e._entries['by_tag'] = operations.inodes.add_entry(TagsDirectory(*dir_args))
+
+            dir_args.append(usr)
+            dir_args.append(True)
+            e._entries['home'] = operations.inodes.add_entry(ProjectDirectory(*dir_args))
+            e._entries['shared'] = operations.inodes.add_entry(SharedDirectory(*dir_args))
+
+            text = '''
+Welcome to Arvados!  This directory provides file system access to files and objects
+available on the Arvados installation located at '{}'
+using credentials for user '{}'.
+
+From here, the following directories are available:
+
+  by_id/     Access to Keep collections by uuid or portable data hash (see by_id/README for details).
+  by_tag/    Access to Keep collections organized by tag.
+  home/      The contents of your home project.
+  shared/    Projects shared with you.
+'''.format(arvados.config.get('ARVADOS_API_HOST'), usr['email'])
+
+            e._entries["README"] = operations.inodes.add_entry(StringFile(e.inode, text, now))
+
+
+    except Exception:
+        logger.exception("arv-mount: exception during API setup")
+        exit(1)
+
+    # FUSE options, see mount.fuse(8)
+    opts = [optname for optname in ['allow_other', 'debug']
+            if getattr(args, optname)]
+
+    if args.exec_args:
+        # Initialize the fuse connection
+        llfuse.init(operations, args.mountpoint, opts)
+
+        t = threading.Thread(None, lambda: llfuse.main())
+        t.start()
+
+        # wait until the driver is finished initializing
+        operations.initlock.wait()
+
+        rc = 255
+        try:
+            sp = subprocess.Popen(args.exec_args, shell=False)
+
+            # forward signals to the process.
+            signal.signal(signal.SIGINT, lambda signum, frame: sp.send_signal(signum))
+            signal.signal(signal.SIGTERM, lambda signum, frame: sp.send_signal(signum))
+            signal.signal(signal.SIGQUIT, lambda signum, frame: sp.send_signal(signum))
+
+            # wait for process to complete.
+            rc = sp.wait()
+
+            # restore default signal handlers.
+            signal.signal(signal.SIGINT, signal.SIG_DFL)
+            signal.signal(signal.SIGTERM, signal.SIG_DFL)
+            signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+        except Exception as e:
+            logger.exception('arv-mount: exception during exec %s',
+                             args.exec_args)
+            try:
+                rc = e.errno
+            except AttributeError:
+                pass
+        finally:
+            subprocess.call(["fusermount", "-u", "-z", args.mountpoint])
+
+        exit(rc)
+    else:
+        try:
+            llfuse.init(operations, args.mountpoint, opts)
+            llfuse.main()
+        except Exception as e:
+            logger.exception('arv-mount: exception during mount')
+            exit(getattr(e, 'errno', 1))
diff --git a/services/fuse/setup.py b/services/fuse/setup.py
new file mode 100644 (file)
index 0000000..43b563a
--- /dev/null
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+import os
+import subprocess
+import time
+
+from setuptools import setup, find_packages
+from setuptools.command.egg_info import egg_info
+
+SETUP_DIR = os.path.dirname(__file__)
+README = os.path.join(SETUP_DIR, 'README.rst')
+
+class TagBuildWithCommit(egg_info):
+    """Tag the build with the sha1 and date of the last git commit.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def tags(self):
+        if self.tag_build is None:
+            git_tags = subprocess.check_output(
+                ['git', 'log', '--first-parent', '--max-count=1',
+                 '--format=format:%ct %h', SETUP_DIR]).split()
+            assert len(git_tags) == 2
+            git_tags[0] = time.strftime(
+                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
+            self.tag_build = '.{}+{}'.format(*git_tags)
+        return egg_info.tags(self)
+
+
+setup(name='arvados_fuse',
+      version='0.1',
+      description='Arvados FUSE driver',
+      long_description=open(README).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      download_url="https://github.com/curoverse/arvados.git",
+      license='GNU Affero General Public License, version 3.0',
+      packages=['arvados_fuse'],
+      scripts=[
+        'bin/arv-mount'
+        ],
+      install_requires=[
+        'arvados-python-client>=0.1.20141203150737.277b3c7',
+        'llfuse',
+        'python-daemon'
+        ],
+      test_suite='tests',
+      tests_require=['PyYAML'],
+      zip_safe=False,
+      cmdclass={'egg_info': TagBuildWithCommit},
+      )
diff --git a/services/fuse/tests/__init__.py b/services/fuse/tests/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/services/fuse/tests/run_test_server.py b/services/fuse/tests/run_test_server.py
new file mode 120000 (symlink)
index 0000000..76bcc16
--- /dev/null
@@ -0,0 +1 @@
+../../../sdk/python/tests/run_test_server.py
\ No newline at end of file
diff --git a/services/fuse/tests/test_mount.py b/services/fuse/tests/test_mount.py
new file mode 100644 (file)
index 0000000..84dceee
--- /dev/null
@@ -0,0 +1,321 @@
+import unittest
+import arvados
+import arvados_fuse as fuse
+import threading
+import time
+import os
+import llfuse
+import tempfile
+import shutil
+import subprocess
+import glob
+import run_test_server
+import json
+
+class MountTestBase(unittest.TestCase):
+    def setUp(self):
+        self.keeptmp = tempfile.mkdtemp()
+        os.environ['KEEP_LOCAL_STORE'] = self.keeptmp
+        self.mounttmp = tempfile.mkdtemp()
+        run_test_server.run(False)
+        run_test_server.authorize_with("admin")
+        self.api = api = fuse.SafeApi(arvados.config)
+
+    def make_mount(self, root_class, *root_args):
+        operations = fuse.Operations(os.getuid(), os.getgid())
+        operations.inodes.add_entry(root_class(
+                llfuse.ROOT_INODE, operations.inodes, self.api, 0, *root_args))
+        llfuse.init(operations, self.mounttmp, [])
+        threading.Thread(None, llfuse.main).start()
+        # wait until the driver is finished initializing
+        operations.initlock.wait()
+
+    def tearDown(self):
+        run_test_server.stop()
+
+        # llfuse.close is buggy, so use fusermount instead.
+        #llfuse.close(unmount=True)
+        count = 0
+        success = 1
+        while (count < 9 and success != 0):
+          success = subprocess.call(["fusermount", "-u", self.mounttmp])
+          time.sleep(0.5)
+          count += 1
+
+        os.rmdir(self.mounttmp)
+        shutil.rmtree(self.keeptmp)
+
+    def assertDirContents(self, subdir, expect_content):
+        path = self.mounttmp
+        if subdir:
+            path = os.path.join(path, subdir)
+        self.assertEqual(sorted(expect_content), sorted(os.listdir(path)))
+
+
+class FuseMountTest(MountTestBase):
+    def setUp(self):
+        super(FuseMountTest, self).setUp()
+
+        cw = arvados.CollectionWriter()
+
+        cw.start_new_file('thing1.txt')
+        cw.write("data 1")
+        cw.start_new_file('thing2.txt')
+        cw.write("data 2")
+        cw.start_new_stream('dir1')
+
+        cw.start_new_file('thing3.txt')
+        cw.write("data 3")
+        cw.start_new_file('thing4.txt')
+        cw.write("data 4")
+
+        cw.start_new_stream('dir2')
+        cw.start_new_file('thing5.txt')
+        cw.write("data 5")
+        cw.start_new_file('thing6.txt')
+        cw.write("data 6")
+
+        cw.start_new_stream('dir2/dir3')
+        cw.start_new_file('thing7.txt')
+        cw.write("data 7")
+
+        cw.start_new_file('thing8.txt')
+        cw.write("data 8")
+
+        cw.start_new_stream('edgecases')
+        for f in ":/./../.../-/*/\x01\\/ ".split("/"):
+            cw.start_new_file(f)
+            cw.write('x')
+
+        for f in ":/../.../-/*/\x01\\/ ".split("/"):
+            cw.start_new_stream('edgecases/dirs/' + f)
+            cw.start_new_file('x/x')
+            cw.write('x')
+
+        self.testcollection = cw.finish()
+        self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
+
+    def runTest(self):
+        self.make_mount(fuse.CollectionDirectory, self.testcollection)
+
+        self.assertDirContents(None, ['thing1.txt', 'thing2.txt',
+                                      'edgecases', 'dir1', 'dir2'])
+        self.assertDirContents('dir1', ['thing3.txt', 'thing4.txt'])
+        self.assertDirContents('dir2', ['thing5.txt', 'thing6.txt', 'dir3'])
+        self.assertDirContents('dir2/dir3', ['thing7.txt', 'thing8.txt'])
+        self.assertDirContents('edgecases',
+                               "dirs/:/_/__/.../-/*/\x01\\/ ".split("/"))
+        self.assertDirContents('edgecases/dirs',
+                               ":/__/.../-/*/\x01\\/ ".split("/"))
+
+        files = {'thing1.txt': 'data 1',
+                 'thing2.txt': 'data 2',
+                 'dir1/thing3.txt': 'data 3',
+                 'dir1/thing4.txt': 'data 4',
+                 'dir2/thing5.txt': 'data 5',
+                 'dir2/thing6.txt': 'data 6',
+                 'dir2/dir3/thing7.txt': 'data 7',
+                 'dir2/dir3/thing8.txt': 'data 8'}
+
+        for k, v in files.items():
+            with open(os.path.join(self.mounttmp, k)) as f:
+                self.assertEqual(v, f.read())
+
+
+class FuseNoAPITest(MountTestBase):
+    def setUp(self):
+        super(FuseNoAPITest, self).setUp()
+        keep = arvados.keep.KeepClient(local_store=self.keeptmp)
+        self.file_data = "API-free text\n"
+        self.file_loc = keep.put(self.file_data)
+        self.coll_loc = keep.put(". {} 0:{}:api-free.txt\n".format(
+                self.file_loc, len(self.file_data)))
+
+    def runTest(self):
+        self.make_mount(fuse.MagicDirectory)
+        self.assertDirContents(self.coll_loc, ['api-free.txt'])
+        with open(os.path.join(
+                self.mounttmp, self.coll_loc, 'api-free.txt')) as keep_file:
+            actual = keep_file.read(-1)
+        self.assertEqual(self.file_data, actual)
+
+
+class FuseMagicTest(MountTestBase):
+    def setUp(self):
+        super(FuseMagicTest, self).setUp()
+
+        cw = arvados.CollectionWriter()
+
+        cw.start_new_file('thing1.txt')
+        cw.write("data 1")
+
+        self.testcollection = cw.finish()
+        self.api.collections().create(body={"manifest_text":cw.manifest_text()}).execute()
+
+    def runTest(self):
+        self.make_mount(fuse.MagicDirectory)
+
+        mount_ls = os.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertFalse(any(arvados.util.keep_locator_pattern.match(fn) or
+                             arvados.util.uuid_pattern.match(fn)
+                             for fn in mount_ls),
+                         "new FUSE MagicDirectory lists Collection")
+        self.assertDirContents(self.testcollection, ['thing1.txt'])
+        self.assertDirContents(os.path.join('by_id', self.testcollection),
+                               ['thing1.txt'])
+        mount_ls = os.listdir(self.mounttmp)
+        self.assertIn('README', mount_ls)
+        self.assertIn(self.testcollection, mount_ls)
+        self.assertIn(self.testcollection,
+                      os.listdir(os.path.join(self.mounttmp, 'by_id')))
+
+        files = {}
+        files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'
+
+        for k, v in files.items():
+            with open(os.path.join(self.mounttmp, k)) as f:
+                self.assertEqual(v, f.read())
+
+
+class FuseTagsTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.TagsDirectory)
+
+        d1 = os.listdir(self.mounttmp)
+        d1.sort()
+        self.assertEqual(['foo_tag'], d1)
+
+        d2 = os.listdir(os.path.join(self.mounttmp, 'foo_tag'))
+        d2.sort()
+        self.assertEqual(['zzzzz-4zz18-fy296fx3hot09f7'], d2)
+
+        d3 = os.listdir(os.path.join(self.mounttmp, 'foo_tag', 'zzzzz-4zz18-fy296fx3hot09f7'))
+        d3.sort()
+        self.assertEqual(['foo'], d3)
+
+
+class FuseTagsUpdateTest(MountTestBase):
+    def tag_collection(self, coll_uuid, tag_name):
+        return self.api.links().create(
+            body={'link': {'head_uuid': coll_uuid,
+                           'link_class': 'tag',
+                           'name': tag_name,
+        }}).execute()
+
+    def runTest(self):
+        operations = fuse.Operations(os.getuid(), os.getgid())
+        e = operations.inodes.add_entry(fuse.TagsDirectory(llfuse.ROOT_INODE, operations.inodes, self.api, 0, poll_time=1))
+
+        llfuse.init(operations, self.mounttmp, [])
+        t = threading.Thread(None, lambda: llfuse.main())
+        t.start()
+
+        # wait until the driver is finished initializing
+        operations.initlock.wait()
+        self.assertIn('foo_tag', os.listdir(self.mounttmp))
+
+        bar_uuid = run_test_server.fixture('collections')['bar_file']['uuid']
+        self.tag_collection(bar_uuid, 'fuse_test_tag')
+        time.sleep(1)
+        self.assertIn('fuse_test_tag', os.listdir(self.mounttmp))
+        self.assertDirContents('fuse_test_tag', [bar_uuid])
+
+        baz_uuid = run_test_server.fixture('collections')['baz_file']['uuid']
+        l = self.tag_collection(baz_uuid, 'fuse_test_tag')
+        time.sleep(1)
+        self.assertDirContents('fuse_test_tag', [bar_uuid, baz_uuid])
+
+        self.api.links().delete(uuid=l['uuid']).execute()
+        time.sleep(1)
+        self.assertDirContents('fuse_test_tag', [bar_uuid])
+
+
+class FuseSharedTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.SharedDirectory,
+                        self.api.users().current().execute()['uuid'])
+
+        # shared_dirs is a list of the directories exposed
+        # by fuse.SharedDirectory (i.e. any object visible
+        # to the current user)
+        shared_dirs = os.listdir(self.mounttmp)
+        shared_dirs.sort()
+        self.assertIn('FUSE User', shared_dirs)
+
+        # fuse_user_objs is a list of the objects owned by the FUSE
+        # test user (which present as files in the 'FUSE User'
+        # directory)
+        fuse_user_objs = os.listdir(os.path.join(self.mounttmp, 'FUSE User'))
+        fuse_user_objs.sort()
+        self.assertEqual(['Empty collection.link',                # permission link on collection
+                          'FUSE Test Project',                    # project owned by user
+                          'collection #1 owned by FUSE',          # collection owned by user
+                          'collection #2 owned by FUSE',          # collection owned by user
+                          'pipeline instance owned by FUSE.pipelineInstance',  # pipeline instance owned by user
+                      ], fuse_user_objs)
+
+        # test_proj_files is a list of the files in the FUSE Test Project.
+        test_proj_files = os.listdir(os.path.join(self.mounttmp, 'FUSE User', 'FUSE Test Project'))
+        test_proj_files.sort()
+        self.assertEqual(['collection in FUSE project',
+                          'pipeline instance in FUSE project.pipelineInstance',
+                          'pipeline template in FUSE project.pipelineTemplate'
+                      ], test_proj_files)
+
+        # Double check that we can open and read objects in this folder as a file,
+        # and that its contents are what we expect.
+        with open(os.path.join(
+                self.mounttmp,
+                'FUSE User',
+                'FUSE Test Project',
+                'pipeline template in FUSE project.pipelineTemplate')) as f:
+            j = json.load(f)
+            self.assertEqual("pipeline template in FUSE project", j['name'])
+
+
+class FuseHomeTest(MountTestBase):
+    def runTest(self):
+        self.make_mount(fuse.ProjectDirectory,
+                        self.api.users().current().execute())
+
+        d1 = os.listdir(self.mounttmp)
+        self.assertIn('Unrestricted public data', d1)
+
+        d2 = os.listdir(os.path.join(self.mounttmp, 'Unrestricted public data'))
+        self.assertEqual(['GNU General Public License, version 3'], d2)
+
+        d3 = os.listdir(os.path.join(self.mounttmp, 'Unrestricted public data', 'GNU General Public License, version 3'))
+        self.assertEqual(["GNU_General_Public_License,_version_3.pdf"], d3)
+
+
+class FuseUnitTest(unittest.TestCase):
+    def test_sanitize_filename(self):
+        acceptable = [
+            "foo.txt",
+            ".foo",
+            "..foo",
+            "...",
+            "foo...",
+            "foo..",
+            "foo.",
+            "-",
+            "\x01\x02\x03",
+            ]
+        unacceptable = [
+            "f\00",
+            "\00\00",
+            "/foo",
+            "foo/",
+            "//",
+            ]
+        for f in acceptable:
+            self.assertEqual(f, fuse.sanitize_filename(f))
+        for f in unacceptable:
+            self.assertNotEqual(f, fuse.sanitize_filename(f))
+            # The sanitized filename should be the same length, though.
+            self.assertEqual(len(f), len(fuse.sanitize_filename(f)))
+        # Special cases
+        self.assertEqual("_", fuse.sanitize_filename(""))
+        self.assertEqual("_", fuse.sanitize_filename("."))
+        self.assertEqual("__", fuse.sanitize_filename(".."))
diff --git a/services/keep/tools/traffic_test.py b/services/keep/tools/traffic_test.py
new file mode 100755 (executable)
index 0000000..26285c1
--- /dev/null
@@ -0,0 +1,126 @@
+#! /usr/bin/env python
+
+# traffic_test.py
+#
+# Launch a test Keep and API server and PUT and GET a bunch of blocks.
+# Can be used to simulate client traffic in Keep to evaluate memory usage,
+# error logging, performance, etc.
+#
+# This script is warty and is relatively environment-specific, but the
+# example run described below should execute cleanly.
+#
+# Usage:
+#   traffic_test.py start
+#       Starts the test servers.
+#   traffic_test.py put file1 file2 file3 ....
+#       Runs arv-put on each file.
+#   traffic_test.py get hash1 hash2 hash3 ....
+#       Loops forever issuing GET requests for specified blocks.
+#   traffic_test.py stop
+#       Stops the test servers.
+#
+# Example:
+#
+#   $ ./traffic_test.py start
+#   $ ./traffic_test.py put GS00253-DNA_A02_200_37.tsv.bz2 \
+#         GS00253-DNA_B01_200_37.tsv.bz2 \
+#         GS00253-DNA_B02_200_37.tsv.bz2
+#   $ ./traffic_test.py get $(find /tmp/tmp* -type f -printf "%f ")
+#     [loops forever]
+#     ^C
+#   $ ./traffic_test.py stop
+#
+# Multiple "get" runs may be run concurrently to evaluate Keep's handling
+# of additional concurrent clients.
+
+PYSDK_DIR    = "../../../sdk/python"
+PYTEST_DIR   = PYSDK_DIR + "/tests"
+ARV_PUT_PATH = PYSDK_DIR + "/bin/arv-put"
+ARV_GET_PATH = PYSDK_DIR + "/bin/arv-get"
+SECONDS_BETWEEN_GETS = 1
+
+import argparse
+import httplib2
+import os
+import random
+import subprocess
+import sys
+import time
+
+# for run_test_server.py
+sys.path.insert(0, PYSDK_DIR)
+sys.path.insert(0, PYTEST_DIR)
+import arvados
+import run_test_server
+
+def arv_cmd(*args):
+    p = subprocess.Popen([sys.executable] + list(args),
+                         stdout=subprocess.PIPE)
+    (arvout, arverr) = p.communicate()
+    if p.returncode != 0:
+        print "error {} from {} {}: {}".format(
+            p.returncode, sys.executable, args, arverr)
+        sys.exit(p.returncode)
+    return arvout
+
+def start():
+    run_test_server.run()
+    run_test_server.run_keep()
+
+def put(files):
+    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+    run_test_server.authorize_with('active')
+    for v in ["ARVADOS_API_HOST",
+              "ARVADOS_API_HOST_INSECURE",
+              "ARVADOS_API_TOKEN"]:
+        os.environ[v] = arvados.config.settings()[v]
+
+    if not os.environ.has_key('PYTHONPATH'):
+        os.environ['PYTHONPATH'] = ''
+    os.environ['PYTHONPATH'] = "{}:{}:{}".format(
+        PYSDK_DIR, PYTEST_DIR, os.environ['PYTHONPATH'])
+
+    for c in files:
+        manifest_uuid = arv_cmd(ARV_PUT_PATH, c)
+
+def get(blocks):
+    os.environ["ARVADOS_API_HOST"] = "127.0.0.1:3000"
+
+    run_test_server.authorize_with('active')
+    for v in ["ARVADOS_API_HOST",
+              "ARVADOS_API_HOST_INSECURE",
+              "ARVADOS_API_TOKEN"]:
+        os.environ[v] = arvados.config.settings()[v]
+
+    nqueries = 0
+    while True:
+        b = random.choice(blocks)
+        print "GET /" + b
+        body = arv_cmd(ARV_GET_PATH, b)
+        print "got {} bytes".format(len(body))
+        time.sleep(SECONDS_BETWEEN_GETS)
+        nqueries = nqueries + 1
+
+def stop():
+    run_test_server.stop_keep()
+    run_test_server.stop()
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument('action',
+                        type=str,
+                        nargs='+',
+                        help='''"start", "put", "get", "stop"''')
+    args = parser.parse_args()
+
+    if args.action[0] == 'start':
+        start()
+    elif args.action[0] == 'put':
+        put(args.action[1:])
+    elif args.action[0] == 'get':
+        get(args.action[1:])
+    elif args.action[0] == 'stop':
+        stop()
+    else:
+        print('Unrecognized action "{}"'.format(args.action))
+        print('actions are "start", "put", "get", "stop"')
diff --git a/services/keepproxy/keepproxy.go b/services/keepproxy/keepproxy.go
new file mode 100644 (file)
index 0000000..ea14c6c
--- /dev/null
@@ -0,0 +1,466 @@
+package main
+
+import (
+       "flag"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       "github.com/gorilla/mux"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       "os"
+       "os/signal"
+       "sync"
+       "syscall"
+       "time"
+)
+
+// Default TCP address on which to listen for requests.
+// Initialized by the -listen flag.
+const DEFAULT_ADDR = ":25107"
+
+var listener net.Listener
+
+func main() {
+       var (
+               listen           string
+               no_get           bool
+               no_put           bool
+               default_replicas int
+               timeout          int64
+               pidfile          string
+       )
+
+       flagset := flag.NewFlagSet("default", flag.ExitOnError)
+
+       flagset.StringVar(
+               &listen,
+               "listen",
+               DEFAULT_ADDR,
+               "Interface on which to listen for requests, in the format "+
+                       "ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port "+
+                       "to listen on all network interfaces.")
+
+       flagset.BoolVar(
+               &no_get,
+               "no-get",
+               false,
+               "If set, disable GET operations")
+
+       flagset.BoolVar(
+               &no_put,
+               "no-put",
+               false,
+               "If set, disable PUT operations")
+
+       flagset.IntVar(
+               &default_replicas,
+               "default-replicas",
+               2,
+               "Default number of replicas to write if not specified by the client.")
+
+       flagset.Int64Var(
+               &timeout,
+               "timeout",
+               15,
+               "Timeout on requests to internal Keep services (default 15 seconds)")
+
+       flagset.StringVar(
+               &pidfile,
+               "pid",
+               "",
+               "Path to write pid file")
+
+       flagset.Parse(os.Args[1:])
+
+       arv, err := arvadosclient.MakeArvadosClient()
+       if err != nil {
+               log.Fatalf("Error setting up arvados client %s", err.Error())
+       }
+
+       kc, err := keepclient.MakeKeepClient(&arv)
+       if err != nil {
+               log.Fatalf("Error setting up keep client %s", err.Error())
+       }
+
+       if pidfile != "" {
+               f, err := os.Create(pidfile)
+               if err != nil {
+                       log.Fatalf("Error writing pid file (%s): %s", pidfile, err.Error())
+               }
+               fmt.Fprint(f, os.Getpid())
+               f.Close()
+               defer os.Remove(pidfile)
+       }
+
+       kc.Want_replicas = default_replicas
+
+       kc.Client.Timeout = time.Duration(timeout) * time.Second
+
+       listener, err = net.Listen("tcp", listen)
+       if err != nil {
+               log.Fatalf("Could not listen on %v", listen)
+       }
+
+       go RefreshServicesList(&kc)
+
+       // Shut down the server gracefully (by closing the listener)
+       // if SIGTERM is received.
+       term := make(chan os.Signal, 1)
+       go func(sig <-chan os.Signal) {
+               s := <-sig
+               log.Println("caught signal:", s)
+               listener.Close()
+       }(term)
+       signal.Notify(term, syscall.SIGTERM)
+       signal.Notify(term, syscall.SIGINT)
+
+       log.Printf("Arvados Keep proxy started listening on %v with server list %v", listener.Addr(), kc.ServiceRoots())
+
+       // Start listening for requests.
+       http.Serve(listener, MakeRESTRouter(!no_get, !no_put, &kc))
+
+       log.Println("shutting down")
+}
+
+type ApiTokenCache struct {
+       tokens     map[string]int64
+       lock       sync.Mutex
+       expireTime int64
+}
+
+// Refresh the keep service list every five minutes.
+func RefreshServicesList(kc *keepclient.KeepClient) {
+       for {
+               time.Sleep(300 * time.Second)
+               oldservices := kc.ServiceRoots()
+               kc.DiscoverKeepServers()
+               newservices := kc.ServiceRoots()
+               s1 := fmt.Sprint(oldservices)
+               s2 := fmt.Sprint(newservices)
+               if s1 != s2 {
+                       log.Printf("Updated server list to %v", s2)
+               }
+       }
+}
+
+// Cache the token and set an expire time.  If we already have an expire time
+// on the token, it is not updated.
+func (this *ApiTokenCache) RememberToken(token string) {
+       this.lock.Lock()
+       defer this.lock.Unlock()
+
+       now := time.Now().Unix()
+       if this.tokens[token] == 0 {
+               this.tokens[token] = now + this.expireTime
+       }
+}
+
+// Check if the cached token is known and still believed to be valid.
+func (this *ApiTokenCache) RecallToken(token string) bool {
+       this.lock.Lock()
+       defer this.lock.Unlock()
+
+       now := time.Now().Unix()
+       if this.tokens[token] == 0 {
+               // Unknown token
+               return false
+       } else if now < this.tokens[token] {
+               // Token is known and still valid
+               return true
+       } else {
+               // Token is expired
+               this.tokens[token] = 0
+               return false
+       }
+}
+
+func GetRemoteAddress(req *http.Request) string {
+       if realip := req.Header.Get("X-Real-IP"); realip != "" {
+               if forwarded := req.Header.Get("X-Forwarded-For"); forwarded != realip {
+                       return fmt.Sprintf("%s (X-Forwarded-For %s)", realip, forwarded)
+               } else {
+                       return realip
+               }
+       }
+       return req.RemoteAddr
+}
+
+func CheckAuthorizationHeader(kc keepclient.KeepClient, cache *ApiTokenCache, req *http.Request) (pass bool, tok string) {
+       var auth string
+       if auth = req.Header.Get("Authorization"); auth == "" {
+               return false, ""
+       }
+
+       _, err := fmt.Sscanf(auth, "OAuth2 %s", &tok)
+       if err != nil {
+               // Scanning error
+               return false, ""
+       }
+
+       if cache.RecallToken(tok) {
+               // Valid in the cache, short circut
+               return true, tok
+       }
+
+       arv := *kc.Arvados
+       arv.ApiToken = tok
+       if err := arv.Call("HEAD", "users", "", "current", nil, nil); err != nil {
+               log.Printf("%s: CheckAuthorizationHeader error: %v", GetRemoteAddress(req), err)
+               return false, ""
+       }
+
+       // Success!  Update cache
+       cache.RememberToken(tok)
+
+       return true, tok
+}
+
+type GetBlockHandler struct {
+       *keepclient.KeepClient
+       *ApiTokenCache
+}
+
+type PutBlockHandler struct {
+       *keepclient.KeepClient
+       *ApiTokenCache
+}
+
+type InvalidPathHandler struct{}
+
+type OptionsHandler struct{}
+
+// MakeRESTRouter
+//     Returns a mux.Router that passes GET and PUT requests to the
+//     appropriate handlers.
+//
+func MakeRESTRouter(
+       enable_get bool,
+       enable_put bool,
+       kc *keepclient.KeepClient) *mux.Router {
+
+       t := &ApiTokenCache{tokens: make(map[string]int64), expireTime: 300}
+
+       rest := mux.NewRouter()
+
+       if enable_get {
+               rest.Handle(`/{hash:[0-9a-f]{32}}+{hints}`,
+                       GetBlockHandler{kc, t}).Methods("GET", "HEAD")
+               rest.Handle(`/{hash:[0-9a-f]{32}}`, GetBlockHandler{kc, t}).Methods("GET", "HEAD")
+       }
+
+       if enable_put {
+               rest.Handle(`/{hash:[0-9a-f]{32}}+{hints}`, PutBlockHandler{kc, t}).Methods("PUT")
+               rest.Handle(`/{hash:[0-9a-f]{32}}`, PutBlockHandler{kc, t}).Methods("PUT")
+               rest.Handle(`/`, PutBlockHandler{kc, t}).Methods("POST")
+               rest.Handle(`/{any}`, OptionsHandler{}).Methods("OPTIONS")
+               rest.Handle(`/`, OptionsHandler{}).Methods("OPTIONS")
+       }
+
+       rest.NotFoundHandler = InvalidPathHandler{}
+
+       return rest
+}
+
+func SetCorsHeaders(resp http.ResponseWriter) {
+       resp.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, POST, PUT, OPTIONS")
+       resp.Header().Set("Access-Control-Allow-Origin", "*")
+       resp.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
+       resp.Header().Set("Access-Control-Max-Age", "86486400")
+}
+
+func (this InvalidPathHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       log.Printf("%s: %s %s unroutable", GetRemoteAddress(req), req.Method, req.URL.Path)
+       http.Error(resp, "Bad request", http.StatusBadRequest)
+}
+
+func (this OptionsHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       log.Printf("%s: %s %s", GetRemoteAddress(req), req.Method, req.URL.Path)
+       SetCorsHeaders(resp)
+}
+
+func (this GetBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       SetCorsHeaders(resp)
+
+       kc := *this.KeepClient
+
+       hash := mux.Vars(req)["hash"]
+       hints := mux.Vars(req)["hints"]
+
+       locator := keepclient.MakeLocator2(hash, hints)
+
+       log.Printf("%s: %s %s begin", GetRemoteAddress(req), req.Method, hash)
+
+       var pass bool
+       var tok string
+       if pass, tok = CheckAuthorizationHeader(kc, this.ApiTokenCache, req); !pass {
+               http.Error(resp, "Missing or invalid Authorization header", http.StatusForbidden)
+               return
+       }
+
+       // Copy ArvadosClient struct and use the client's API token
+       arvclient := *kc.Arvados
+       arvclient.ApiToken = tok
+       kc.Arvados = &arvclient
+
+       var reader io.ReadCloser
+       var err error
+       var blocklen int64
+
+       if req.Method == "GET" {
+               reader, blocklen, _, err = kc.AuthorizedGet(hash, locator.Signature, locator.Timestamp)
+               defer reader.Close()
+       } else if req.Method == "HEAD" {
+               blocklen, _, err = kc.AuthorizedAsk(hash, locator.Signature, locator.Timestamp)
+       }
+
+       if blocklen > -1 {
+               resp.Header().Set("Content-Length", fmt.Sprint(blocklen))
+       } else {
+               log.Printf("%s: %s %s Keep server did not return Content-Length",
+                       GetRemoteAddress(req), req.Method, hash)
+       }
+
+       var status = 0
+       switch err {
+       case nil:
+               status = http.StatusOK
+               if reader != nil {
+                       n, err2 := io.Copy(resp, reader)
+                       if blocklen > -1 && n != blocklen {
+                               log.Printf("%s: %s %s %v %v mismatched copy size expected Content-Length: %v",
+                                       GetRemoteAddress(req), req.Method, hash, status, n, blocklen)
+                       } else if err2 == nil {
+                               log.Printf("%s: %s %s %v %v",
+                                       GetRemoteAddress(req), req.Method, hash, status, n)
+                       } else {
+                               log.Printf("%s: %s %s %v %v copy error: %v",
+                                       GetRemoteAddress(req), req.Method, hash, status, n, err2.Error())
+                       }
+               } else {
+                       log.Printf("%s: %s %s %v 0", GetRemoteAddress(req), req.Method, hash, status)
+               }
+       case keepclient.BlockNotFound:
+               status = http.StatusNotFound
+               http.Error(resp, "Not found", http.StatusNotFound)
+       default:
+               status = http.StatusBadGateway
+               http.Error(resp, err.Error(), http.StatusBadGateway)
+       }
+
+       if err != nil {
+               log.Printf("%s: %s %s %v error: %v",
+                       GetRemoteAddress(req), req.Method, hash, status, err.Error())
+       }
+}
+
+func (this PutBlockHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       SetCorsHeaders(resp)
+
+       kc := *this.KeepClient
+
+       hash := mux.Vars(req)["hash"]
+       hints := mux.Vars(req)["hints"]
+
+       locator := keepclient.MakeLocator2(hash, hints)
+
+       var contentLength int64 = -1
+       if req.Header.Get("Content-Length") != "" {
+               _, err := fmt.Sscanf(req.Header.Get("Content-Length"), "%d", &contentLength)
+               if err != nil {
+                       resp.Header().Set("Content-Length", fmt.Sprintf("%d", contentLength))
+               }
+
+       }
+
+       log.Printf("%s: %s %s Content-Length %v", GetRemoteAddress(req), req.Method, hash, contentLength)
+
+       if contentLength < 0 {
+               http.Error(resp, "Must include Content-Length header", http.StatusLengthRequired)
+               return
+       }
+
+       if locator.Size > 0 && int64(locator.Size) != contentLength {
+               http.Error(resp, "Locator size hint does not match Content-Length header", http.StatusBadRequest)
+               return
+       }
+
+       var pass bool
+       var tok string
+       if pass, tok = CheckAuthorizationHeader(kc, this.ApiTokenCache, req); !pass {
+               http.Error(resp, "Missing or invalid Authorization header", http.StatusForbidden)
+               return
+       }
+
+       // Copy ArvadosClient struct and use the client's API token
+       arvclient := *kc.Arvados
+       arvclient.ApiToken = tok
+       kc.Arvados = &arvclient
+
+       // Check if the client specified the number of replicas
+       if req.Header.Get("X-Keep-Desired-Replicas") != "" {
+               var r int
+               _, err := fmt.Sscanf(req.Header.Get(keepclient.X_Keep_Desired_Replicas), "%d", &r)
+               if err != nil {
+                       kc.Want_replicas = r
+               }
+       }
+
+       // Now try to put the block through
+       var replicas int
+       var put_err error
+       if hash == "" {
+               if bytes, err := ioutil.ReadAll(req.Body); err != nil {
+                       msg := fmt.Sprintf("Error reading request body: %s", err)
+                       log.Printf(msg)
+                       http.Error(resp, msg, http.StatusInternalServerError)
+                       return
+               } else {
+                       hash, replicas, put_err = kc.PutB(bytes)
+               }
+       } else {
+               hash, replicas, put_err = kc.PutHR(hash, req.Body, contentLength)
+       }
+
+       // Tell the client how many successful PUTs we accomplished
+       resp.Header().Set(keepclient.X_Keep_Replicas_Stored, fmt.Sprintf("%d", replicas))
+
+       switch put_err {
+       case nil:
+               // Default will return http.StatusOK
+               log.Printf("%s: %s %s finished, stored %v replicas (desired %v)", GetRemoteAddress(req), req.Method, hash, replicas, kc.Want_replicas)
+               n, err2 := io.WriteString(resp, hash)
+               if err2 != nil {
+                       log.Printf("%s: wrote %v bytes to response body and got error %v", n, err2.Error())
+               }
+
+       case keepclient.OversizeBlockError:
+               // Too much data
+               http.Error(resp, fmt.Sprintf("Exceeded maximum blocksize %d", keepclient.BLOCKSIZE), http.StatusRequestEntityTooLarge)
+
+       case keepclient.InsufficientReplicasError:
+               if replicas > 0 {
+                       // At least one write is considered success.  The
+                       // client can decide if getting less than the number of
+                       // replications it asked for is a fatal error.
+                       // Default will return http.StatusOK
+                       n, err2 := io.WriteString(resp, hash)
+                       if err2 != nil {
+                               log.Printf("%s: wrote %v bytes to response body and got error %v", n, err2.Error())
+                       }
+               } else {
+                       http.Error(resp, put_err.Error(), http.StatusServiceUnavailable)
+               }
+
+       default:
+               http.Error(resp, put_err.Error(), http.StatusBadGateway)
+       }
+
+       if put_err != nil {
+               log.Printf("%s: %s %s stored %v replicas (desired %v) got error %v", GetRemoteAddress(req), req.Method, hash, replicas, kc.Want_replicas, put_err.Error())
+       }
+
+}
diff --git a/services/keepproxy/keepproxy_test.go b/services/keepproxy/keepproxy_test.go
new file mode 100644 (file)
index 0000000..8acf43a
--- /dev/null
@@ -0,0 +1,399 @@
+package main
+
+import (
+       "crypto/md5"
+       "crypto/tls"
+       "fmt"
+       "git.curoverse.com/arvados.git/sdk/go/arvadosclient"
+       "git.curoverse.com/arvados.git/sdk/go/keepclient"
+       . "gopkg.in/check.v1"
+       "io"
+       "io/ioutil"
+       "log"
+       "net/http"
+       "net/url"
+       "os"
+       "os/exec"
+       "strings"
+       "testing"
+       "time"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+       TestingT(t)
+}
+
+// Gocheck boilerplate
+var _ = Suite(&ServerRequiredSuite{})
+
+// Tests that require the Keep server running
+type ServerRequiredSuite struct{}
+
+func pythonDir() string {
+       cwd, _ := os.Getwd()
+       return fmt.Sprintf("%s/../../sdk/python/tests", cwd)
+}
+
+// Wait (up to 1 second) for keepproxy to listen on a port. This
+// avoids a race condition where we hit a "connection refused" error
+// because we start testing the proxy too soon.
+func waitForListener() {
+       const (
+               ms = 5
+       )
+       for i := 0; listener == nil && i < 1000; i += ms {
+               time.Sleep(ms * time.Millisecond)
+       }
+       if listener == nil {
+               log.Fatalf("Timed out waiting for listener to start")
+       }
+}
+
+func closeListener() {
+       if listener != nil {
+               listener.Close()
+       }
+}
+
+func (s *ServerRequiredSuite) SetUpSuite(c *C) {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+
+       os.Chdir(pythonDir())
+       {
+               cmd := exec.Command("python", "run_test_server.py", "start")
+               stderr, err := cmd.StderrPipe()
+               if err != nil {
+                       log.Fatalf("Setting up stderr pipe: %s", err)
+               }
+               go io.Copy(os.Stderr, stderr)
+               if err := cmd.Run(); err != nil {
+                       panic(fmt.Sprintf("'python run_test_server.py start' returned error %s", err))
+               }
+       }
+       {
+               cmd := exec.Command("python", "run_test_server.py", "start_keep")
+               stderr, err := cmd.StderrPipe()
+               if err != nil {
+                       log.Fatalf("Setting up stderr pipe: %s", err)
+               }
+               go io.Copy(os.Stderr, stderr)
+               if err := cmd.Run(); err != nil {
+                       panic(fmt.Sprintf("'python run_test_server.py start_keep' returned error %s", err))
+               }
+       }
+
+       os.Setenv("ARVADOS_API_HOST", "localhost:3000")
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       os.Setenv("ARVADOS_API_HOST_INSECURE", "true")
+}
+
+func (s *ServerRequiredSuite) TearDownSuite(c *C) {
+       cwd, _ := os.Getwd()
+       defer os.Chdir(cwd)
+
+       os.Chdir(pythonDir())
+       exec.Command("python", "run_test_server.py", "stop_keep").Run()
+       exec.Command("python", "run_test_server.py", "stop").Run()
+}
+
+func setupProxyService() {
+
+       client := &http.Client{Transport: &http.Transport{
+               TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}
+
+       var req *http.Request
+       var err error
+       if req, err = http.NewRequest("POST", fmt.Sprintf("https://%s/arvados/v1/keep_services", os.Getenv("ARVADOS_API_HOST")), nil); err != nil {
+               panic(err.Error())
+       }
+       req.Header.Add("Authorization", fmt.Sprintf("OAuth2 %s", os.Getenv("ARVADOS_API_TOKEN")))
+
+       reader, writer := io.Pipe()
+
+       req.Body = reader
+
+       go func() {
+               data := url.Values{}
+               data.Set("keep_service", `{
+  "service_host": "localhost",
+  "service_port": 29950,
+  "service_ssl_flag": false,
+  "service_type": "proxy"
+}`)
+
+               writer.Write([]byte(data.Encode()))
+               writer.Close()
+       }()
+
+       var resp *http.Response
+       if resp, err = client.Do(req); err != nil {
+               panic(err.Error())
+       }
+       if resp.StatusCode != 200 {
+               panic(resp.Status)
+       }
+}
+
+func runProxy(c *C, args []string, token string, port int) keepclient.KeepClient {
+       os.Args = append(args, fmt.Sprintf("-listen=:%v", port))
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+
+       listener = nil
+       go main()
+       time.Sleep(100 * time.Millisecond)
+
+       os.Setenv("ARVADOS_KEEP_PROXY", fmt.Sprintf("http://localhost:%v", port))
+       os.Setenv("ARVADOS_API_TOKEN", token)
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+       kc, err := keepclient.MakeKeepClient(&arv)
+       c.Assert(err, Equals, nil)
+       c.Check(kc.Using_proxy, Equals, true)
+       c.Check(len(kc.ServiceRoots()), Equals, 1)
+       for _, root := range kc.ServiceRoots() {
+               c.Check(root, Equals, fmt.Sprintf("http://localhost:%v", port))
+       }
+       os.Setenv("ARVADOS_KEEP_PROXY", "")
+       log.Print("keepclient created")
+       return kc
+}
+
+func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
+       log.Print("TestPutAndGet start")
+
+       os.Args = []string{"keepproxy", "-listen=:29950"}
+       os.Setenv("ARVADOS_API_TOKEN", "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+       listener = nil
+       go main()
+       time.Sleep(100 * time.Millisecond)
+
+       setupProxyService()
+
+       os.Setenv("ARVADOS_EXTERNAL_CLIENT", "true")
+       arv, err := arvadosclient.MakeArvadosClient()
+       c.Assert(err, Equals, nil)
+       kc, err := keepclient.MakeKeepClient(&arv)
+       c.Assert(err, Equals, nil)
+       c.Check(kc.Arvados.External, Equals, true)
+       c.Check(kc.Using_proxy, Equals, true)
+       c.Check(len(kc.ServiceRoots()), Equals, 1)
+       for _, root := range kc.ServiceRoots() {
+               c.Check(root, Equals, "http://localhost:29950")
+       }
+       os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
+       log.Print("keepclient created")
+
+       waitForListener()
+       defer closeListener()
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+       var hash2 string
+
+       {
+               _, _, err := kc.Ask(hash)
+               c.Check(err, Equals, keepclient.BlockNotFound)
+               log.Print("Ask 1")
+       }
+
+       {
+               var rep int
+               var err error
+               hash2, rep, err = kc.PutB([]byte("foo"))
+               c.Check(hash2, Matches, fmt.Sprintf(`^%s\+3(\+.+)?$`, hash))
+               c.Check(rep, Equals, 2)
+               c.Check(err, Equals, nil)
+               log.Print("PutB")
+       }
+
+       {
+               blocklen, _, err := kc.Ask(hash2)
+               c.Assert(err, Equals, nil)
+               c.Check(blocklen, Equals, int64(3))
+               log.Print("Ask 2")
+       }
+
+       {
+               reader, blocklen, _, err := kc.Get(hash2)
+               c.Assert(err, Equals, nil)
+               all, err := ioutil.ReadAll(reader)
+               c.Check(all, DeepEquals, []byte("foo"))
+               c.Check(blocklen, Equals, int64(3))
+               log.Print("Get")
+       }
+
+       {
+               var rep int
+               var err error
+               hash2, rep, err = kc.PutB([]byte(""))
+               c.Check(hash2, Matches, `^d41d8cd98f00b204e9800998ecf8427e\+0(\+.+)?$`)
+               c.Check(rep, Equals, 2)
+               c.Check(err, Equals, nil)
+               log.Print("PutB zero block")
+       }
+
+       {
+               reader, blocklen, _, err := kc.Get("d41d8cd98f00b204e9800998ecf8427e")
+               c.Assert(err, Equals, nil)
+               all, err := ioutil.ReadAll(reader)
+               c.Check(all, DeepEquals, []byte(""))
+               c.Check(blocklen, Equals, int64(0))
+               log.Print("Get zero block")
+       }
+
+       log.Print("TestPutAndGet done")
+}
+
+func (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {
+       log.Print("TestPutAskGetForbidden start")
+
+       kc := runProxy(c, []string{"keepproxy"}, "123abc", 29951)
+       waitForListener()
+       defer closeListener()
+
+       log.Print("keepclient created")
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("bar")))
+
+       {
+               _, _, err := kc.Ask(hash)
+               c.Check(err, Equals, keepclient.BlockNotFound)
+               log.Print("Ask 1")
+       }
+
+       {
+               hash2, rep, err := kc.PutB([]byte("bar"))
+               c.Check(hash2, Equals, "")
+               c.Check(rep, Equals, 0)
+               c.Check(err, Equals, keepclient.InsufficientReplicasError)
+               log.Print("PutB")
+       }
+
+       {
+               blocklen, _, err := kc.Ask(hash)
+               c.Assert(err, Equals, keepclient.BlockNotFound)
+               c.Check(blocklen, Equals, int64(0))
+               log.Print("Ask 2")
+       }
+
+       {
+               _, blocklen, _, err := kc.Get(hash)
+               c.Assert(err, Equals, keepclient.BlockNotFound)
+               c.Check(blocklen, Equals, int64(0))
+               log.Print("Get")
+       }
+
+       log.Print("TestPutAskGetForbidden done")
+}
+
+func (s *ServerRequiredSuite) TestGetDisabled(c *C) {
+       log.Print("TestGetDisabled start")
+
+       kc := runProxy(c, []string{"keepproxy", "-no-get"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29952)
+       waitForListener()
+       defer closeListener()
+
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("baz")))
+
+       {
+               _, _, err := kc.Ask(hash)
+               c.Check(err, Equals, keepclient.BlockNotFound)
+               log.Print("Ask 1")
+       }
+
+       {
+               hash2, rep, err := kc.PutB([]byte("baz"))
+               c.Check(hash2, Matches, fmt.Sprintf(`^%s\+3(\+.+)?$`, hash))
+               c.Check(rep, Equals, 2)
+               c.Check(err, Equals, nil)
+               log.Print("PutB")
+       }
+
+       {
+               blocklen, _, err := kc.Ask(hash)
+               c.Assert(err, Equals, keepclient.BlockNotFound)
+               c.Check(blocklen, Equals, int64(0))
+               log.Print("Ask 2")
+       }
+
+       {
+               _, blocklen, _, err := kc.Get(hash)
+               c.Assert(err, Equals, keepclient.BlockNotFound)
+               c.Check(blocklen, Equals, int64(0))
+               log.Print("Get")
+       }
+
+       log.Print("TestGetDisabled done")
+}
+
+func (s *ServerRequiredSuite) TestPutDisabled(c *C) {
+       log.Print("TestPutDisabled start")
+
+       kc := runProxy(c, []string{"keepproxy", "-no-put"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29953)
+       waitForListener()
+       defer closeListener()
+
+       {
+               hash2, rep, err := kc.PutB([]byte("quux"))
+               c.Check(hash2, Equals, "")
+               c.Check(rep, Equals, 0)
+               c.Check(err, Equals, keepclient.InsufficientReplicasError)
+               log.Print("PutB")
+       }
+
+       log.Print("TestPutDisabled done")
+}
+
+func (s *ServerRequiredSuite) TestCorsHeaders(c *C) {
+       runProxy(c, []string{"keepproxy"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29954)
+       waitForListener()
+       defer closeListener()
+
+       {
+               client := http.Client{}
+               req, err := http.NewRequest("OPTIONS",
+                       fmt.Sprintf("http://localhost:29954/%x+3",
+                               md5.Sum([]byte("foo"))),
+                       nil)
+               req.Header.Add("Access-Control-Request-Method", "PUT")
+               req.Header.Add("Access-Control-Request-Headers", "Authorization, X-Keep-Desired-Replicas")
+               resp, err := client.Do(req)
+               c.Check(err, Equals, nil)
+               c.Check(resp.StatusCode, Equals, 200)
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Check(string(body), Equals, "")
+               c.Check(resp.Header.Get("Access-Control-Allow-Methods"), Equals, "GET, HEAD, POST, PUT, OPTIONS")
+               c.Check(resp.Header.Get("Access-Control-Allow-Origin"), Equals, "*")
+       }
+
+       {
+               resp, err := http.Get(
+                       fmt.Sprintf("http://localhost:29954/%x+3",
+                               md5.Sum([]byte("foo"))))
+               c.Check(err, Equals, nil)
+               c.Check(resp.Header.Get("Access-Control-Allow-Headers"), Equals, "Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas")
+               c.Check(resp.Header.Get("Access-Control-Allow-Origin"), Equals, "*")
+       }
+}
+
+func (s *ServerRequiredSuite) TestPostWithoutHash(c *C) {
+       runProxy(c, []string{"keepproxy"}, "4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h", 29955)
+       waitForListener()
+       defer closeListener()
+
+       {
+               client := http.Client{}
+               req, err := http.NewRequest("POST",
+                       "http://localhost:29955/",
+                       strings.NewReader("qux"))
+               req.Header.Add("Authorization", "OAuth2 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h")
+               req.Header.Add("Content-Type", "application/octet-stream")
+               resp, err := client.Do(req)
+               c.Check(err, Equals, nil)
+               body, err := ioutil.ReadAll(resp.Body)
+               c.Check(err, Equals, nil)
+               c.Check(string(body), Equals,
+                       fmt.Sprintf("%x+%d", md5.Sum([]byte("qux")), 3))
+       }
+}
diff --git a/services/keepstore/handler_test.go b/services/keepstore/handler_test.go
new file mode 100644 (file)
index 0000000..05b410c
--- /dev/null
@@ -0,0 +1,761 @@
+// Tests for Keep HTTP handlers:
+//
+//     GetBlockHandler
+//     PutBlockHandler
+//     IndexHandler
+//
+// The HTTP handlers are responsible for enforcing permission policy,
+// so these tests must exercise all possible permission permutations.
+
+package main
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "regexp"
+       "strings"
+       "testing"
+       "time"
+)
+
+// A RequestTester represents the parameters for an HTTP request to
+// be issued on behalf of a unit test.
+type RequestTester struct {
+       uri          string
+       api_token    string
+       method       string
+       request_body []byte
+}
+
+// Test GetBlockHandler on the following situations:
+//   - permissions off, unauthenticated request, unsigned locator
+//   - permissions on, authenticated request, signed locator
+//   - permissions on, authenticated request, unsigned locator
+//   - permissions on, unauthenticated request, signed locator
+//   - permissions on, authenticated request, expired locator
+//
+func TestGetHandler(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes. Our block is stored on the second volume.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Quit()
+
+       vols := KeepVM.Volumes()
+       if err := vols[0].Put(TEST_HASH, TEST_BLOCK); err != nil {
+               t.Error(err)
+       }
+
+       // Create locators for testing.
+       // Turn on permission settings so we can generate signed locators.
+       enforce_permissions = true
+       PermissionSecret = []byte(known_key)
+       permission_ttl = time.Duration(300) * time.Second
+
+       var (
+               unsigned_locator  = "/" + TEST_HASH
+               valid_timestamp   = time.Now().Add(permission_ttl)
+               expired_timestamp = time.Now().Add(-time.Hour)
+               signed_locator    = "/" + SignLocator(TEST_HASH, known_token, valid_timestamp)
+               expired_locator   = "/" + SignLocator(TEST_HASH, known_token, expired_timestamp)
+       )
+
+       // -----------------
+       // Test unauthenticated request with permissions off.
+       enforce_permissions = false
+
+       // Unauthenticated request, unsigned locator
+       // => OK
+       response := IssueRequest(
+               &RequestTester{
+                       method: "GET",
+                       uri:    unsigned_locator,
+               })
+       ExpectStatusCode(t,
+               "Unauthenticated request, unsigned locator", http.StatusOK, response)
+       ExpectBody(t,
+               "Unauthenticated request, unsigned locator",
+               string(TEST_BLOCK),
+               response)
+
+       received_cl := response.Header().Get("Content-Length")
+       expected_cl := fmt.Sprintf("%d", len(TEST_BLOCK))
+       if received_cl != expected_cl {
+               t.Errorf("expected Content-Length %s, got %s", expected_cl, received_cl)
+       }
+
+       // ----------------
+       // Permissions: on.
+       enforce_permissions = true
+
+       // Authenticated request, signed locator
+       // => OK
+       response = IssueRequest(&RequestTester{
+               method:    "GET",
+               uri:       signed_locator,
+               api_token: known_token,
+       })
+       ExpectStatusCode(t,
+               "Authenticated request, signed locator", http.StatusOK, response)
+       ExpectBody(t,
+               "Authenticated request, signed locator", string(TEST_BLOCK), response)
+
+       received_cl = response.Header().Get("Content-Length")
+       expected_cl = fmt.Sprintf("%d", len(TEST_BLOCK))
+       if received_cl != expected_cl {
+               t.Errorf("expected Content-Length %s, got %s", expected_cl, received_cl)
+       }
+
+       // Authenticated request, unsigned locator
+       // => PermissionError
+       response = IssueRequest(&RequestTester{
+               method:    "GET",
+               uri:       unsigned_locator,
+               api_token: known_token,
+       })
+       ExpectStatusCode(t, "unsigned locator", PermissionError.HTTPCode, response)
+
+       // Unauthenticated request, signed locator
+       // => PermissionError
+       response = IssueRequest(&RequestTester{
+               method: "GET",
+               uri:    signed_locator,
+       })
+       ExpectStatusCode(t,
+               "Unauthenticated request, signed locator",
+               PermissionError.HTTPCode, response)
+
+       // Authenticated request, expired locator
+       // => ExpiredError
+       response = IssueRequest(&RequestTester{
+               method:    "GET",
+               uri:       expired_locator,
+               api_token: known_token,
+       })
+       ExpectStatusCode(t,
+               "Authenticated request, expired locator",
+               ExpiredError.HTTPCode, response)
+}
+
+// Test PutBlockHandler on the following situations:
+//   - no server key
+//   - with server key, authenticated request, unsigned locator
+//   - with server key, unauthenticated request, unsigned locator
+//
+func TestPutHandler(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Quit()
+
+       // --------------
+       // No server key.
+
+       // Unauthenticated request, no server key
+       // => OK (unsigned response)
+       unsigned_locator := "/" + TEST_HASH
+       response := IssueRequest(
+               &RequestTester{
+                       method:       "PUT",
+                       uri:          unsigned_locator,
+                       request_body: TEST_BLOCK,
+               })
+
+       ExpectStatusCode(t,
+               "Unauthenticated request, no server key", http.StatusOK, response)
+       ExpectBody(t,
+               "Unauthenticated request, no server key",
+               TEST_HASH_PUT_RESPONSE, response)
+
+       // ------------------
+       // With a server key.
+
+       PermissionSecret = []byte(known_key)
+       permission_ttl = time.Duration(300) * time.Second
+
+       // When a permission key is available, the locator returned
+       // from an authenticated PUT request will be signed.
+
+       // Authenticated PUT, signed locator
+       // => OK (signed response)
+       response = IssueRequest(
+               &RequestTester{
+                       method:       "PUT",
+                       uri:          unsigned_locator,
+                       request_body: TEST_BLOCK,
+                       api_token:    known_token,
+               })
+
+       ExpectStatusCode(t,
+               "Authenticated PUT, signed locator, with server key",
+               http.StatusOK, response)
+       response_locator := strings.TrimSpace(response.Body.String())
+       if !VerifySignature(response_locator, known_token) {
+               t.Errorf("Authenticated PUT, signed locator, with server key:\n"+
+                       "response '%s' does not contain a valid signature",
+                       response_locator)
+       }
+
+       // Unauthenticated PUT, unsigned locator
+       // => OK
+       response = IssueRequest(
+               &RequestTester{
+                       method:       "PUT",
+                       uri:          unsigned_locator,
+                       request_body: TEST_BLOCK,
+               })
+
+       ExpectStatusCode(t,
+               "Unauthenticated PUT, unsigned locator, with server key",
+               http.StatusOK, response)
+       ExpectBody(t,
+               "Unauthenticated PUT, unsigned locator, with server key",
+               TEST_HASH_PUT_RESPONSE, response)
+}
+
+// Test /index requests:
+//   - unauthenticated /index request
+//   - unauthenticated /index/prefix request
+//   - authenticated   /index request        | non-superuser
+//   - authenticated   /index/prefix request | non-superuser
+//   - authenticated   /index request        | superuser
+//   - authenticated   /index/prefix request | superuser
+//
+// The only /index requests that should succeed are those issued by the
+// superuser. They should pass regardless of the value of enforce_permissions.
+//
+func TestIndexHandler(t *testing.T) {
+       defer teardown()
+
+       // Set up Keep volumes and populate them.
+       // Include multiple blocks on different volumes, and
+       // some metadata files (which should be omitted from index listings)
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Quit()
+
+       vols := KeepVM.Volumes()
+       vols[0].Put(TEST_HASH, TEST_BLOCK)
+       vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
+       vols[0].Put(TEST_HASH+".meta", []byte("metadata"))
+       vols[1].Put(TEST_HASH_2+".meta", []byte("metadata"))
+
+       data_manager_token = "DATA MANAGER TOKEN"
+
+       unauthenticated_req := &RequestTester{
+               method: "GET",
+               uri:    "/index",
+       }
+       authenticated_req := &RequestTester{
+               method:    "GET",
+               uri:       "/index",
+               api_token: known_token,
+       }
+       superuser_req := &RequestTester{
+               method:    "GET",
+               uri:       "/index",
+               api_token: data_manager_token,
+       }
+       unauth_prefix_req := &RequestTester{
+               method: "GET",
+               uri:    "/index/" + TEST_HASH[0:3],
+       }
+       auth_prefix_req := &RequestTester{
+               method:    "GET",
+               uri:       "/index/" + TEST_HASH[0:3],
+               api_token: known_token,
+       }
+       superuser_prefix_req := &RequestTester{
+               method:    "GET",
+               uri:       "/index/" + TEST_HASH[0:3],
+               api_token: data_manager_token,
+       }
+
+       // -------------------------------------------------------------
+       // Only the superuser should be allowed to issue /index requests.
+
+       // ---------------------------
+       // enforce_permissions enabled
+       // This setting should not affect tests passing.
+       enforce_permissions = true
+
+       // unauthenticated /index request
+       // => UnauthorizedError
+       response := IssueRequest(unauthenticated_req)
+       ExpectStatusCode(t,
+               "enforce_permissions on, unauthenticated request",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // unauthenticated /index/prefix request
+       // => UnauthorizedError
+       response = IssueRequest(unauth_prefix_req)
+       ExpectStatusCode(t,
+               "permissions on, unauthenticated /index/prefix request",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // authenticated /index request, non-superuser
+       // => UnauthorizedError
+       response = IssueRequest(authenticated_req)
+       ExpectStatusCode(t,
+               "permissions on, authenticated request, non-superuser",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // authenticated /index/prefix request, non-superuser
+       // => UnauthorizedError
+       response = IssueRequest(auth_prefix_req)
+       ExpectStatusCode(t,
+               "permissions on, authenticated /index/prefix request, non-superuser",
+               UnauthorizedError.HTTPCode,
+               response)
+
+       // superuser /index request
+       // => OK
+       response = IssueRequest(superuser_req)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       // ----------------------------
+       // enforce_permissions disabled
+       // Valid Request should still pass.
+       enforce_permissions = false
+
+       // superuser /index request
+       // => OK
+       response = IssueRequest(superuser_req)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       expected := `^` + TEST_HASH + `\+\d+ \d+\n` +
+               TEST_HASH_2 + `\+\d+ \d+\n$`
+       match, _ := regexp.MatchString(expected, response.Body.String())
+       if !match {
+               t.Errorf(
+                       "permissions on, superuser request: expected %s, got:\n%s",
+                       expected, response.Body.String())
+       }
+
+       // superuser /index/prefix request
+       // => OK
+       response = IssueRequest(superuser_prefix_req)
+       ExpectStatusCode(t,
+               "permissions on, superuser request",
+               http.StatusOK,
+               response)
+
+       expected = `^` + TEST_HASH + `\+\d+ \d+\n$`
+       match, _ = regexp.MatchString(expected, response.Body.String())
+       if !match {
+               t.Errorf(
+                       "permissions on, superuser /index/prefix request: expected %s, got:\n%s",
+                       expected, response.Body.String())
+       }
+}
+
+// TestDeleteHandler
+//
+// Cases tested:
+//
+//   With no token and with a non-data-manager token:
+//   * Delete existing block
+//     (test for 403 Forbidden, confirm block not deleted)
+//
+//   With data manager token:
+//
+//   * Delete existing block
+//     (test for 200 OK, response counts, confirm block deleted)
+//
+//   * Delete nonexistent block
+//     (test for 200 OK, response counts)
+//
+//   TODO(twp):
+//
+//   * Delete block on read-only and read-write volume
+//     (test for 200 OK, response with copies_deleted=1,
+//     copies_failed=1, confirm block deleted only on r/w volume)
+//
+//   * Delete block on read-only volume only
+//     (test for 200 OK, response with copies_deleted=0, copies_failed=1,
+//     confirm block not deleted)
+//
+func TestDeleteHandler(t *testing.T) {
+       defer teardown()
+
+       // Set up Keep volumes and populate them.
+       // Include multiple blocks on different volumes, and
+       // some metadata files (which should be omitted from index listings)
+       KeepVM = MakeTestVolumeManager(2)
+       defer KeepVM.Quit()
+
+       vols := KeepVM.Volumes()
+       vols[0].Put(TEST_HASH, TEST_BLOCK)
+
+       // Explicitly set the permission_ttl to 0 for these
+       // tests, to ensure the MockVolume deletes the blocks
+       // even though they have just been created.
+       permission_ttl = time.Duration(0)
+
+       var user_token = "NOT DATA MANAGER TOKEN"
+       data_manager_token = "DATA MANAGER TOKEN"
+
+       unauth_req := &RequestTester{
+               method: "DELETE",
+               uri:    "/" + TEST_HASH,
+       }
+
+       user_req := &RequestTester{
+               method:    "DELETE",
+               uri:       "/" + TEST_HASH,
+               api_token: user_token,
+       }
+
+       superuser_existing_block_req := &RequestTester{
+               method:    "DELETE",
+               uri:       "/" + TEST_HASH,
+               api_token: data_manager_token,
+       }
+
+       superuser_nonexistent_block_req := &RequestTester{
+               method:    "DELETE",
+               uri:       "/" + TEST_HASH_2,
+               api_token: data_manager_token,
+       }
+
+       // Unauthenticated request returns PermissionError.
+       var response *httptest.ResponseRecorder
+       response = IssueRequest(unauth_req)
+       ExpectStatusCode(t,
+               "unauthenticated request",
+               PermissionError.HTTPCode,
+               response)
+
+       // Authenticated non-admin request returns PermissionError.
+       response = IssueRequest(user_req)
+       ExpectStatusCode(t,
+               "authenticated non-admin request",
+               PermissionError.HTTPCode,
+               response)
+
+       // Authenticated admin request for nonexistent block.
+       type deletecounter struct {
+               Deleted int `json:"copies_deleted"`
+               Failed  int `json:"copies_failed"`
+       }
+       var response_dc, expected_dc deletecounter
+
+       response = IssueRequest(superuser_nonexistent_block_req)
+       ExpectStatusCode(t,
+               "data manager request, nonexistent block",
+               http.StatusNotFound,
+               response)
+
+       // Authenticated admin request for existing block while never_delete is set.
+       never_delete = true
+       response = IssueRequest(superuser_existing_block_req)
+       ExpectStatusCode(t,
+               "authenticated request, existing block, method disabled",
+               MethodDisabledError.HTTPCode,
+               response)
+       never_delete = false
+
+       // Authenticated admin request for existing block.
+       response = IssueRequest(superuser_existing_block_req)
+       ExpectStatusCode(t,
+               "data manager request, existing block",
+               http.StatusOK,
+               response)
+       // Expect response {"copies_deleted":1,"copies_failed":0}
+       expected_dc = deletecounter{1, 0}
+       json.NewDecoder(response.Body).Decode(&response_dc)
+       if response_dc != expected_dc {
+               t.Errorf("superuser_existing_block_req\nexpected: %+v\nreceived: %+v",
+                       expected_dc, response_dc)
+       }
+       // Confirm the block has been deleted
+       _, err := vols[0].Get(TEST_HASH)
+       var block_deleted = os.IsNotExist(err)
+       if !block_deleted {
+               t.Error("superuser_existing_block_req: block not deleted")
+       }
+
+       // A DELETE request on a block newer than permission_ttl should return
+       // success but leave the block on the volume.
+       vols[0].Put(TEST_HASH, TEST_BLOCK)
+       permission_ttl = time.Duration(1) * time.Hour
+
+       response = IssueRequest(superuser_existing_block_req)
+       ExpectStatusCode(t,
+               "data manager request, existing block",
+               http.StatusOK,
+               response)
+       // Expect response {"copies_deleted":1,"copies_failed":0}
+       expected_dc = deletecounter{1, 0}
+       json.NewDecoder(response.Body).Decode(&response_dc)
+       if response_dc != expected_dc {
+               t.Errorf("superuser_existing_block_req\nexpected: %+v\nreceived: %+v",
+                       expected_dc, response_dc)
+       }
+       // Confirm the block has NOT been deleted.
+       _, err = vols[0].Get(TEST_HASH)
+       if err != nil {
+               t.Errorf("testing delete on new block: %s\n", err)
+       }
+}
+
+// TestPullHandler
+//
+// Test handling of the PUT /pull statement.
+//
+// Cases tested: syntactically valid and invalid pull lists, from the
+// data manager and from unprivileged users:
+//
+//   1. Valid pull list from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   2. Invalid pull request from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   3. Valid pull request from the data manager
+//      (expected result: 200 OK with request body "Received 3 pull
+//      requests"
+//
+//   4. Invalid pull request from the data manager
+//      (expected result: 400 Bad Request)
+//
+// Test that in the end, the pull manager received a good pull list with
+// the expected number of requests.
+//
+// TODO(twp): test concurrency: launch 100 goroutines to update the
+// pull list simultaneously.  Make sure that none of them return 400
+// Bad Request and that pullq.GetList() returns a valid list.
+//
+func TestPullHandler(t *testing.T) {
+       defer teardown()
+
+       var user_token = "USER TOKEN"
+       data_manager_token = "DATA MANAGER TOKEN"
+
+       good_json := []byte(`[
+               {
+                       "locator":"locator_with_two_servers",
+                       "servers":[
+                               "server1",
+                               "server2"
+                       ]
+               },
+               {
+                       "locator":"locator_with_no_servers",
+                       "servers":[]
+               },
+               {
+                       "locator":"",
+                       "servers":["empty_locator"]
+               }
+       ]`)
+
+       bad_json := []byte(`{ "key":"I'm a little teapot" }`)
+
+       type pullTest struct {
+               name          string
+               req           RequestTester
+               response_code int
+               response_body string
+       }
+       var testcases = []pullTest{
+               {
+                       "Valid pull list from an ordinary user",
+                       RequestTester{"/pull", user_token, "PUT", good_json},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Invalid pull request from an ordinary user",
+                       RequestTester{"/pull", user_token, "PUT", bad_json},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Valid pull request from the data manager",
+                       RequestTester{"/pull", data_manager_token, "PUT", good_json},
+                       http.StatusOK,
+                       "Received 3 pull requests\n",
+               },
+               {
+                       "Invalid pull request from the data manager",
+                       RequestTester{"/pull", data_manager_token, "PUT", bad_json},
+                       http.StatusBadRequest,
+                       "Bad Request\n",
+               },
+       }
+
+       for _, tst := range testcases {
+               response := IssueRequest(&tst.req)
+               ExpectStatusCode(t, tst.name, tst.response_code, response)
+               ExpectBody(t, tst.name, tst.response_body, response)
+       }
+
+       // The Keep pull manager should have received one good list with 3
+       // requests on it.
+       for i := 0; i < 3; i++ {
+               item := <-pullq.NextItem
+               if _, ok := item.(PullRequest); !ok {
+                       t.Errorf("item %v could not be parsed as a PullRequest", item)
+               }
+       }
+
+       expectChannelEmpty(t, pullq.NextItem)
+}
+
+// TestTrashHandler
+//
+// Test cases:
+//
+// Cases tested: syntactically valid and invalid trash lists, from the
+// data manager and from unprivileged users:
+//
+//   1. Valid trash list from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   2. Invalid trash list from an ordinary user
+//      (expected result: 401 Unauthorized)
+//
+//   3. Valid trash list from the data manager
+//      (expected result: 200 OK with request body "Received 3 trash
+//      requests"
+//
+//   4. Invalid trash list from the data manager
+//      (expected result: 400 Bad Request)
+//
+// Test that in the end, the trash collector received a good list
+// trash list with the expected number of requests.
+//
+// TODO(twp): test concurrency: launch 100 goroutines to update the
+// pull list simultaneously.  Make sure that none of them return 400
+// Bad Request and that replica.Dump() returns a valid list.
+//
+func TestTrashHandler(t *testing.T) {
+       defer teardown()
+
+       var user_token = "USER TOKEN"
+       data_manager_token = "DATA MANAGER TOKEN"
+
+       good_json := []byte(`[
+               {
+                       "locator":"block1",
+                       "block_mtime":1409082153
+               },
+               {
+                       "locator":"block2",
+                       "block_mtime":1409082153
+               },
+               {
+                       "locator":"block3",
+                       "block_mtime":1409082153
+               }
+       ]`)
+
+       bad_json := []byte(`I am not a valid JSON string`)
+
+       type trashTest struct {
+               name          string
+               req           RequestTester
+               response_code int
+               response_body string
+       }
+
+       var testcases = []trashTest{
+               {
+                       "Valid trash list from an ordinary user",
+                       RequestTester{"/trash", user_token, "PUT", good_json},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Invalid trash list from an ordinary user",
+                       RequestTester{"/trash", user_token, "PUT", bad_json},
+                       http.StatusUnauthorized,
+                       "Unauthorized\n",
+               },
+               {
+                       "Valid trash list from the data manager",
+                       RequestTester{"/trash", data_manager_token, "PUT", good_json},
+                       http.StatusOK,
+                       "Received 3 trash requests\n",
+               },
+               {
+                       "Invalid trash list from the data manager",
+                       RequestTester{"/trash", data_manager_token, "PUT", bad_json},
+                       http.StatusBadRequest,
+                       "Bad Request\n",
+               },
+       }
+
+       for _, tst := range testcases {
+               response := IssueRequest(&tst.req)
+               ExpectStatusCode(t, tst.name, tst.response_code, response)
+               ExpectBody(t, tst.name, tst.response_body, response)
+       }
+
+       // The trash collector should have received one good list with 3
+       // requests on it.
+       for i := 0; i < 3; i++ {
+               item := <-trashq.NextItem
+               if _, ok := item.(TrashRequest); !ok {
+                       t.Errorf("item %v could not be parsed as a TrashRequest", item)
+               }
+       }
+
+       expectChannelEmpty(t, trashq.NextItem)
+}
+
+// ====================
+// Helper functions
+// ====================
+
+// IssueTestRequest executes an HTTP request described by rt, to a
+// REST router.  It returns the HTTP response to the request.
+func IssueRequest(rt *RequestTester) *httptest.ResponseRecorder {
+       response := httptest.NewRecorder()
+       body := bytes.NewReader(rt.request_body)
+       req, _ := http.NewRequest(rt.method, rt.uri, body)
+       if rt.api_token != "" {
+               req.Header.Set("Authorization", "OAuth2 "+rt.api_token)
+       }
+       loggingRouter := MakeLoggingRESTRouter()
+       loggingRouter.ServeHTTP(response, req)
+       return response
+}
+
+// ExpectStatusCode checks whether a response has the specified status code,
+// and reports a test failure if not.
+func ExpectStatusCode(
+       t *testing.T,
+       testname string,
+       expected_status int,
+       response *httptest.ResponseRecorder) {
+       if response.Code != expected_status {
+               t.Errorf("%s: expected status %s, got %+v",
+                       testname, expected_status, response)
+       }
+}
+
+func ExpectBody(
+       t *testing.T,
+       testname string,
+       expected_body string,
+       response *httptest.ResponseRecorder) {
+       if response.Body.String() != expected_body {
+               t.Errorf("%s: expected response body '%s', got %+v",
+                       testname, expected_body, response)
+       }
+}
diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
new file mode 100644 (file)
index 0000000..c7559a1
--- /dev/null
@@ -0,0 +1,730 @@
+package main
+
+// REST handlers for Keep are implemented here.
+//
+// GetBlockHandler (GET /locator)
+// PutBlockHandler (PUT /locator)
+// IndexHandler    (GET /index, GET /index/prefix)
+// StatusHandler   (GET /status.json)
+
+import (
+       "bufio"
+       "bytes"
+       "container/list"
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "github.com/gorilla/mux"
+       "io"
+       "log"
+       "net/http"
+       "os"
+       "regexp"
+       "runtime"
+       "strconv"
+       "strings"
+       "syscall"
+       "time"
+)
+
+// MakeRESTRouter returns a new mux.Router that forwards all Keep
+// requests to the appropriate handlers.
+//
+func MakeRESTRouter() *mux.Router {
+       rest := mux.NewRouter()
+
+       rest.HandleFunc(
+               `/{hash:[0-9a-f]{32}}`, GetBlockHandler).Methods("GET", "HEAD")
+       rest.HandleFunc(
+               `/{hash:[0-9a-f]{32}}+{hints}`,
+               GetBlockHandler).Methods("GET", "HEAD")
+
+       rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, PutBlockHandler).Methods("PUT")
+       rest.HandleFunc(`/{hash:[0-9a-f]{32}}`, DeleteHandler).Methods("DELETE")
+
+       // For IndexHandler we support:
+       //   /index           - returns all locators
+       //   /index/{prefix}  - returns all locators that begin with {prefix}
+       //      {prefix} is a string of hexadecimal digits between 0 and 32 digits.
+       //      If {prefix} is the empty string, return an index of all locators
+       //      (so /index and /index/ behave identically)
+       //      A client may supply a full 32-digit locator string, in which
+       //      case the server will return an index with either zero or one
+       //      entries. This usage allows a client to check whether a block is
+       //      present, and its size and upload time, without retrieving the
+       //      entire block.
+       //
+       rest.HandleFunc(`/index`, IndexHandler).Methods("GET", "HEAD")
+       rest.HandleFunc(
+               `/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
+       rest.HandleFunc(`/status.json`, StatusHandler).Methods("GET", "HEAD")
+
+       // The PullHandler and TrashHandler process "PUT /pull" and "PUT
+       // /trash" requests from Data Manager.  These requests instruct
+       // Keep to replicate or delete blocks; see
+       // https://arvados.org/projects/arvados/wiki/Keep_Design_Doc
+       // for more details.
+       //
+       // Each handler parses the JSON list of block management requests
+       // in the message body, and replaces any existing pull queue or
+       // trash queue with their contentes.
+       //
+       rest.HandleFunc(`/pull`, PullHandler).Methods("PUT")
+       rest.HandleFunc(`/trash`, TrashHandler).Methods("PUT")
+
+       // Any request which does not match any of these routes gets
+       // 400 Bad Request.
+       rest.NotFoundHandler = http.HandlerFunc(BadRequestHandler)
+
+       return rest
+}
+
+func BadRequestHandler(w http.ResponseWriter, r *http.Request) {
+       http.Error(w, BadRequestError.Error(), BadRequestError.HTTPCode)
+}
+
+// FindKeepVolumes scans all mounted volumes on the system for Keep
+// volumes, and returns a list of matching paths.
+//
+// A device is assumed to be a Keep volume if it is a normal or tmpfs
+// volume and has a "/keep" directory directly underneath the mount
+// point.
+//
+func FindKeepVolumes() []string {
+       vols := make([]string, 0)
+
+       if f, err := os.Open(PROC_MOUNTS); err != nil {
+               log.Fatalf("opening %s: %s\n", PROC_MOUNTS, err)
+       } else {
+               scanner := bufio.NewScanner(f)
+               for scanner.Scan() {
+                       args := strings.Fields(scanner.Text())
+                       dev, mount := args[0], args[1]
+                       if mount != "/" &&
+                               (dev == "tmpfs" || strings.HasPrefix(dev, "/dev/")) {
+                               keep := mount + "/keep"
+                               if st, err := os.Stat(keep); err == nil && st.IsDir() {
+                                       vols = append(vols, keep)
+                               }
+                       }
+               }
+               if err := scanner.Err(); err != nil {
+                       log.Fatal(err)
+               }
+       }
+       return vols
+}
+
+func GetBlockHandler(resp http.ResponseWriter, req *http.Request) {
+       hash := mux.Vars(req)["hash"]
+
+       hints := mux.Vars(req)["hints"]
+
+       // Parse the locator string and hints from the request.
+       // TODO(twp): implement a Locator type.
+       var signature, timestamp string
+       if hints != "" {
+               signature_pat, _ := regexp.Compile("^A([[:xdigit:]]+)@([[:xdigit:]]{8})$")
+               for _, hint := range strings.Split(hints, "+") {
+                       if match, _ := regexp.MatchString("^[[:digit:]]+$", hint); match {
+                               // Server ignores size hints
+                       } else if m := signature_pat.FindStringSubmatch(hint); m != nil {
+                               signature = m[1]
+                               timestamp = m[2]
+                       } else if match, _ := regexp.MatchString("^[[:upper:]]", hint); match {
+                               // Any unknown hint that starts with an uppercase letter is
+                               // presumed to be valid and ignored, to permit forward compatibility.
+                       } else {
+                               // Unknown format; not a valid locator.
+                               http.Error(resp, BadRequestError.Error(), BadRequestError.HTTPCode)
+                               return
+                       }
+               }
+       }
+
+       // If permission checking is in effect, verify this
+       // request's permission signature.
+       if enforce_permissions {
+               if signature == "" || timestamp == "" {
+                       http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+                       return
+               } else if IsExpired(timestamp) {
+                       http.Error(resp, ExpiredError.Error(), ExpiredError.HTTPCode)
+                       return
+               } else {
+                       req_locator := req.URL.Path[1:] // strip leading slash
+                       if !VerifySignature(req_locator, GetApiToken(req)) {
+                               http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+                               return
+                       }
+               }
+       }
+
+       block, err := GetBlock(hash, false)
+
+       // Garbage collect after each GET. Fixes #2865.
+       // TODO(twp): review Keep memory usage and see if there's
+       // a better way to do this than blindly garbage collecting
+       // after every block.
+       defer runtime.GC()
+
+       if err != nil {
+               // This type assertion is safe because the only errors
+               // GetBlock can return are DiskHashError or NotFoundError.
+               http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
+               return
+       }
+
+       resp.Header().Set("Content-Length", fmt.Sprintf("%d", len(block)))
+
+       _, err = resp.Write(block)
+
+       return
+}
+
+func PutBlockHandler(resp http.ResponseWriter, req *http.Request) {
+       // Garbage collect after each PUT. Fixes #2865.
+       // See also GetBlockHandler.
+       defer runtime.GC()
+
+       hash := mux.Vars(req)["hash"]
+
+       // Read the block data to be stored.
+       // If the request exceeds BLOCKSIZE bytes, issue a HTTP 500 error.
+       //
+       if req.ContentLength > BLOCKSIZE {
+               http.Error(resp, TooLongError.Error(), TooLongError.HTTPCode)
+               return
+       }
+
+       buf := make([]byte, req.ContentLength)
+       nread, err := io.ReadFull(req.Body, buf)
+       if err != nil {
+               http.Error(resp, err.Error(), 500)
+       } else if int64(nread) < req.ContentLength {
+               http.Error(resp, "request truncated", 500)
+       } else {
+               if err := PutBlock(buf, hash); err == nil {
+                       // Success; add a size hint, sign the locator if
+                       // possible, and return it to the client.
+                       return_hash := fmt.Sprintf("%s+%d", hash, len(buf))
+                       api_token := GetApiToken(req)
+                       if PermissionSecret != nil && api_token != "" {
+                               expiry := time.Now().Add(permission_ttl)
+                               return_hash = SignLocator(return_hash, api_token, expiry)
+                       }
+                       resp.Write([]byte(return_hash + "\n"))
+               } else {
+                       ke := err.(*KeepError)
+                       http.Error(resp, ke.Error(), ke.HTTPCode)
+               }
+       }
+       return
+}
+
+// IndexHandler
+//     A HandleFunc to address /index and /index/{prefix} requests.
+//
+func IndexHandler(resp http.ResponseWriter, req *http.Request) {
+       // Reject unauthorized requests.
+       if !IsDataManagerToken(GetApiToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+
+       prefix := mux.Vars(req)["prefix"]
+
+       var index string
+       for _, vol := range KeepVM.Volumes() {
+               index = index + vol.Index(prefix)
+       }
+       resp.Write([]byte(index))
+}
+
+// StatusHandler
+//     Responds to /status.json requests with the current node status,
+//     described in a JSON structure.
+//
+//     The data given in a status.json response includes:
+//        volumes - a list of Keep volumes currently in use by this server
+//          each volume is an object with the following fields:
+//            * mount_point
+//            * device_num (an integer identifying the underlying filesystem)
+//            * bytes_free
+//            * bytes_used
+//
+type VolumeStatus struct {
+       MountPoint string `json:"mount_point"`
+       DeviceNum  uint64 `json:"device_num"`
+       BytesFree  uint64 `json:"bytes_free"`
+       BytesUsed  uint64 `json:"bytes_used"`
+}
+
+type NodeStatus struct {
+       Volumes []*VolumeStatus `json:"volumes"`
+}
+
+func StatusHandler(resp http.ResponseWriter, req *http.Request) {
+       st := GetNodeStatus()
+       if jstat, err := json.Marshal(st); err == nil {
+               resp.Write(jstat)
+       } else {
+               log.Printf("json.Marshal: %s\n", err)
+               log.Printf("NodeStatus = %v\n", st)
+               http.Error(resp, err.Error(), 500)
+       }
+}
+
+// GetNodeStatus
+//     Returns a NodeStatus struct describing this Keep
+//     node's current status.
+//
+func GetNodeStatus() *NodeStatus {
+       st := new(NodeStatus)
+
+       st.Volumes = make([]*VolumeStatus, len(KeepVM.Volumes()))
+       for i, vol := range KeepVM.Volumes() {
+               st.Volumes[i] = vol.Status()
+       }
+       return st
+}
+
+// GetVolumeStatus
+//     Returns a VolumeStatus describing the requested volume.
+//
+func GetVolumeStatus(volume string) *VolumeStatus {
+       var fs syscall.Statfs_t
+       var devnum uint64
+
+       if fi, err := os.Stat(volume); err == nil {
+               devnum = fi.Sys().(*syscall.Stat_t).Dev
+       } else {
+               log.Printf("GetVolumeStatus: os.Stat: %s\n", err)
+               return nil
+       }
+
+       err := syscall.Statfs(volume, &fs)
+       if err != nil {
+               log.Printf("GetVolumeStatus: statfs: %s\n", err)
+               return nil
+       }
+       // These calculations match the way df calculates disk usage:
+       // "free" space is measured by fs.Bavail, but "used" space
+       // uses fs.Blocks - fs.Bfree.
+       free := fs.Bavail * uint64(fs.Bsize)
+       used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
+       return &VolumeStatus{volume, devnum, free, used}
+}
+
+// DeleteHandler processes DELETE requests.
+//
+// DELETE /{hash:[0-9a-f]{32} will delete the block with the specified hash
+// from all connected volumes.
+//
+// Only the Data Manager, or an Arvados admin with scope "all", are
+// allowed to issue DELETE requests.  If a DELETE request is not
+// authenticated or is issued by a non-admin user, the server returns
+// a PermissionError.
+//
+// Upon receiving a valid request from an authorized user,
+// DeleteHandler deletes all copies of the specified block on local
+// writable volumes.
+//
+// Response format:
+//
+// If the requested blocks was not found on any volume, the response
+// code is HTTP 404 Not Found.
+//
+// Otherwise, the response code is 200 OK, with a response body
+// consisting of the JSON message
+//
+//    {"copies_deleted":d,"copies_failed":f}
+//
+// where d and f are integers representing the number of blocks that
+// were successfully and unsuccessfully deleted.
+//
+func DeleteHandler(resp http.ResponseWriter, req *http.Request) {
+       hash := mux.Vars(req)["hash"]
+
+       // Confirm that this user is an admin and has a token with unlimited scope.
+       var tok = GetApiToken(req)
+       if tok == "" || !CanDelete(tok) {
+               http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
+               return
+       }
+
+       if never_delete {
+               http.Error(resp, MethodDisabledError.Error(), MethodDisabledError.HTTPCode)
+               return
+       }
+
+       // Delete copies of this block from all available volumes.  Report
+       // how many blocks were successfully and unsuccessfully
+       // deleted.
+       var result struct {
+               Deleted int `json:"copies_deleted"`
+               Failed  int `json:"copies_failed"`
+       }
+       for _, vol := range KeepVM.Volumes() {
+               if err := vol.Delete(hash); err == nil {
+                       result.Deleted++
+               } else if os.IsNotExist(err) {
+                       continue
+               } else {
+                       result.Failed++
+                       log.Println("DeleteHandler:", err)
+               }
+       }
+
+       var st int
+
+       if result.Deleted == 0 && result.Failed == 0 {
+               st = http.StatusNotFound
+       } else {
+               st = http.StatusOK
+       }
+
+       resp.WriteHeader(st)
+
+       if st == http.StatusOK {
+               if body, err := json.Marshal(result); err == nil {
+                       resp.Write(body)
+               } else {
+                       log.Printf("json.Marshal: %s (result = %v)\n", err, result)
+                       http.Error(resp, err.Error(), 500)
+               }
+       }
+}
+
+/* PullHandler processes "PUT /pull" requests for the data manager.
+   The request body is a JSON message containing a list of pull
+   requests in the following format:
+
+   [
+      {
+         "locator":"e4d909c290d0fb1ca068ffaddf22cbd0+4985",
+         "servers":[
+                       "keep0.qr1hi.arvadosapi.com:25107",
+                       "keep1.qr1hi.arvadosapi.com:25108"
+                ]
+         },
+         {
+                "locator":"55ae4d45d2db0793d53f03e805f656e5+658395",
+                "servers":[
+                       "10.0.1.5:25107",
+                       "10.0.1.6:25107",
+                       "10.0.1.7:25108"
+                ]
+         },
+         ...
+   ]
+
+   Each pull request in the list consists of a block locator string
+   and an ordered list of servers.  Keepstore should try to fetch the
+   block from each server in turn.
+
+   If the request has not been sent by the Data Manager, return 401
+   Unauthorized.
+
+   If the JSON unmarshalling fails, return 400 Bad Request.
+*/
+
+type PullRequest struct {
+       Locator string   `json:"locator"`
+       Servers []string `json:"servers"`
+}
+
+func PullHandler(resp http.ResponseWriter, req *http.Request) {
+       // Reject unauthorized requests.
+       if !IsDataManagerToken(GetApiToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+
+       // Parse the request body.
+       var pr []PullRequest
+       r := json.NewDecoder(req.Body)
+       if err := r.Decode(&pr); err != nil {
+               http.Error(resp, BadRequestError.Error(), BadRequestError.HTTPCode)
+               return
+       }
+
+       // We have a properly formatted pull list sent from the data
+       // manager.  Report success and send the list to the pull list
+       // manager for further handling.
+       resp.WriteHeader(http.StatusOK)
+       resp.Write([]byte(
+               fmt.Sprintf("Received %d pull requests\n", len(pr))))
+
+       plist := list.New()
+       for _, p := range pr {
+               plist.PushBack(p)
+       }
+
+       if pullq == nil {
+               pullq = NewWorkQueue()
+       }
+       pullq.ReplaceQueue(plist)
+}
+
+type TrashRequest struct {
+       Locator    string `json:"locator"`
+       BlockMtime int64  `json:"block_mtime"`
+}
+
+func TrashHandler(resp http.ResponseWriter, req *http.Request) {
+       // Reject unauthorized requests.
+       if !IsDataManagerToken(GetApiToken(req)) {
+               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
+               return
+       }
+
+       // Parse the request body.
+       var trash []TrashRequest
+       r := json.NewDecoder(req.Body)
+       if err := r.Decode(&trash); err != nil {
+               http.Error(resp, BadRequestError.Error(), BadRequestError.HTTPCode)
+               return
+       }
+
+       // We have a properly formatted trash list sent from the data
+       // manager.  Report success and send the list to the trash work
+       // queue for further handling.
+       resp.WriteHeader(http.StatusOK)
+       resp.Write([]byte(
+               fmt.Sprintf("Received %d trash requests\n", len(trash))))
+
+       tlist := list.New()
+       for _, t := range trash {
+               tlist.PushBack(t)
+       }
+
+       if trashq == nil {
+               trashq = NewWorkQueue()
+       }
+       trashq.ReplaceQueue(tlist)
+}
+
+// ==============================
+// GetBlock and PutBlock implement lower-level code for handling
+// blocks by rooting through volumes connected to the local machine.
+// Once the handler has determined that system policy permits the
+// request, it calls these methods to perform the actual operation.
+//
+// TODO(twp): this code would probably be better located in the
+// VolumeManager interface. As an abstraction, the VolumeManager
+// should be the only part of the code that cares about which volume a
+// block is stored on, so it should be responsible for figuring out
+// which volume to check for fetching blocks, storing blocks, etc.
+
+// ==============================
+// GetBlock fetches and returns the block identified by "hash".  If
+// the update_timestamp argument is true, GetBlock also updates the
+// block's file modification time (for the sake of PutBlock, which
+// must update the file's timestamp when the block already exists).
+//
+// On success, GetBlock returns a byte slice with the block data, and
+// a nil error.
+//
+// If the block cannot be found on any volume, returns NotFoundError.
+//
+// If the block found does not have the correct MD5 hash, returns
+// DiskHashError.
+//
+
+func GetBlock(hash string, update_timestamp bool) ([]byte, error) {
+       // Attempt to read the requested hash from a keep volume.
+       error_to_caller := NotFoundError
+
+       for _, vol := range KeepVM.Volumes() {
+               if buf, err := vol.Get(hash); err != nil {
+                       // IsNotExist is an expected error and may be ignored.
+                       // (If all volumes report IsNotExist, we return a NotFoundError)
+                       // All other errors should be logged but we continue trying to
+                       // read.
+                       switch {
+                       case os.IsNotExist(err):
+                               continue
+                       default:
+                               log.Printf("GetBlock: reading %s: %s\n", hash, err)
+                       }
+               } else {
+                       // Double check the file checksum.
+                       //
+                       filehash := fmt.Sprintf("%x", md5.Sum(buf))
+                       if filehash != hash {
+                               // TODO(twp): this condition probably represents a bad disk and
+                               // should raise major alarm bells for an administrator: e.g.
+                               // they should be sent directly to an event manager at high
+                               // priority or logged as urgent problems.
+                               //
+                               log.Printf("%s: checksum mismatch for request %s (actual %s)\n",
+                                       vol, hash, filehash)
+                               error_to_caller = DiskHashError
+                       } else {
+                               // Success!
+                               if error_to_caller != NotFoundError {
+                                       log.Printf("%s: checksum mismatch for request %s but a good copy was found on another volume and returned\n",
+                                               vol, hash)
+                               }
+                               // Update the timestamp if the caller requested.
+                               // If we could not update the timestamp, continue looking on
+                               // other volumes.
+                               if update_timestamp {
+                                       if vol.Touch(hash) != nil {
+                                               continue
+                                       }
+                               }
+                               return buf, nil
+                       }
+               }
+       }
+
+       if error_to_caller != NotFoundError {
+               log.Printf("%s: checksum mismatch, no good copy found\n", hash)
+       }
+       return nil, error_to_caller
+}
+
+/* PutBlock(block, hash)
+   Stores the BLOCK (identified by the content id HASH) in Keep.
+
+   The MD5 checksum of the block must be identical to the content id HASH.
+   If not, an error is returned.
+
+   PutBlock stores the BLOCK on the first Keep volume with free space.
+   A failure code is returned to the user only if all volumes fail.
+
+   On success, PutBlock returns nil.
+   On failure, it returns a KeepError with one of the following codes:
+
+   500 Collision
+          A different block with the same hash already exists on this
+          Keep server.
+   422 MD5Fail
+          The MD5 hash of the BLOCK does not match the argument HASH.
+   503 Full
+          There was not enough space left in any Keep volume to store
+          the object.
+   500 Fail
+          The object could not be stored for some other reason (e.g.
+          all writes failed). The text of the error message should
+          provide as much detail as possible.
+*/
+
+func PutBlock(block []byte, hash string) error {
+       // Check that BLOCK's checksum matches HASH.
+       blockhash := fmt.Sprintf("%x", md5.Sum(block))
+       if blockhash != hash {
+               log.Printf("%s: MD5 checksum %s did not match request", hash, blockhash)
+               return RequestHashError
+       }
+
+       // If we already have a block on disk under this identifier, return
+       // success (but check for MD5 collisions).  While fetching the block,
+       // update its timestamp.
+       // The only errors that GetBlock can return are DiskHashError and NotFoundError.
+       // In either case, we want to write our new (good) block to disk,
+       // so there is nothing special to do if err != nil.
+       //
+       if oldblock, err := GetBlock(hash, true); err == nil {
+               if bytes.Compare(block, oldblock) == 0 {
+                       // The block already exists; return success.
+                       return nil
+               } else {
+                       return CollisionError
+               }
+       }
+
+       // Choose a Keep volume to write to.
+       // If this volume fails, try all of the volumes in order.
+       vol := KeepVM.Choose()
+       if err := vol.Put(hash, block); err == nil {
+               return nil // success!
+       } else {
+               allFull := true
+               for _, vol := range KeepVM.Volumes() {
+                       err := vol.Put(hash, block)
+                       if err == nil {
+                               return nil // success!
+                       }
+                       if err != FullError {
+                               // The volume is not full but the write did not succeed.
+                               // Report the error and continue trying.
+                               allFull = false
+                               log.Printf("%s: Write(%s): %s\n", vol, hash, err)
+                       }
+               }
+
+               if allFull {
+                       log.Printf("all Keep volumes full")
+                       return FullError
+               } else {
+                       log.Printf("all Keep volumes failed")
+                       return GenericError
+               }
+       }
+}
+
+// IsValidLocator
+//     Return true if the specified string is a valid Keep locator.
+//     When Keep is extended to support hash types other than MD5,
+//     this should be updated to cover those as well.
+//
+func IsValidLocator(loc string) bool {
+       match, err := regexp.MatchString(`^[0-9a-f]{32}$`, loc)
+       if err == nil {
+               return match
+       }
+       log.Printf("IsValidLocator: %s\n", err)
+       return false
+}
+
+// GetApiToken returns the OAuth2 token from the Authorization
+// header of a HTTP request, or an empty string if no matching
+// token is found.
+func GetApiToken(req *http.Request) string {
+       if auth, ok := req.Header["Authorization"]; ok {
+               if pat, err := regexp.Compile(`^OAuth2\s+(.*)`); err != nil {
+                       log.Println(err)
+               } else if match := pat.FindStringSubmatch(auth[0]); match != nil {
+                       return match[1]
+               }
+       }
+       return ""
+}
+
+// IsExpired returns true if the given Unix timestamp (expressed as a
+// hexadecimal string) is in the past, or if timestamp_hex cannot be
+// parsed as a hexadecimal string.
+func IsExpired(timestamp_hex string) bool {
+       ts, err := strconv.ParseInt(timestamp_hex, 16, 0)
+       if err != nil {
+               log.Printf("IsExpired: %s\n", err)
+               return true
+       }
+       return time.Unix(ts, 0).Before(time.Now())
+}
+
+// CanDelete returns true if the user identified by api_token is
+// allowed to delete blocks.
+func CanDelete(api_token string) bool {
+       if api_token == "" {
+               return false
+       }
+       // Blocks may be deleted only when Keep has been configured with a
+       // data manager.
+       if IsDataManagerToken(api_token) {
+               return true
+       }
+       // TODO(twp): look up api_token with the API server
+       // return true if is_admin is true and if the token
+       // has unlimited scope
+       return false
+}
+
+// IsDataManagerToken returns true if api_token represents the data
+// manager's token.
+func IsDataManagerToken(api_token string) bool {
+       return data_manager_token != "" && api_token == data_manager_token
+}
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
new file mode 100644 (file)
index 0000000..75b6c40
--- /dev/null
@@ -0,0 +1,306 @@
+package main
+
+import (
+       "bytes"
+       "flag"
+       "fmt"
+       "io/ioutil"
+       "log"
+       "net"
+       "net/http"
+       "os"
+       "os/signal"
+       "strings"
+       "syscall"
+       "time"
+)
+
+// ======================
+// Configuration settings
+//
+// TODO(twp): make all of these configurable via command line flags
+// and/or configuration file settings.
+
+// Default TCP address on which to listen for requests.
+// Initialized by the --listen flag.
+const DEFAULT_ADDR = ":25107"
+
+// A Keep "block" is 64MB.
+const BLOCKSIZE = 64 * 1024 * 1024
+
+// A Keep volume must have at least MIN_FREE_KILOBYTES available
+// in order to permit writes.
+const MIN_FREE_KILOBYTES = BLOCKSIZE / 1024
+
+var PROC_MOUNTS = "/proc/mounts"
+
+// enforce_permissions controls whether permission signatures
+// should be enforced (affecting GET and DELETE requests).
+// Initialized by the --enforce-permissions flag.
+var enforce_permissions bool
+
+// permission_ttl is the time duration for which new permission
+// signatures (returned by PUT requests) will be valid.
+// Initialized by the --permission-ttl flag.
+var permission_ttl time.Duration
+
+// data_manager_token represents the API token used by the
+// Data Manager, and is required on certain privileged operations.
+// Initialized by the --data-manager-token-file flag.
+var data_manager_token string
+
+// never_delete can be used to prevent the DELETE handler from
+// actually deleting anything.
+var never_delete = false
+
+// ==========
+// Error types.
+//
+type KeepError struct {
+       HTTPCode int
+       ErrMsg   string
+}
+
+var (
+       BadRequestError     = &KeepError{400, "Bad Request"}
+       UnauthorizedError   = &KeepError{401, "Unauthorized"}
+       CollisionError      = &KeepError{500, "Collision"}
+       RequestHashError    = &KeepError{422, "Hash mismatch in request"}
+       PermissionError     = &KeepError{403, "Forbidden"}
+       DiskHashError       = &KeepError{500, "Hash mismatch in stored data"}
+       ExpiredError        = &KeepError{401, "Expired permission signature"}
+       NotFoundError       = &KeepError{404, "Not Found"}
+       GenericError        = &KeepError{500, "Fail"}
+       FullError           = &KeepError{503, "Full"}
+       TooLongError        = &KeepError{504, "Timeout"}
+       MethodDisabledError = &KeepError{405, "Method disabled"}
+)
+
+func (e *KeepError) Error() string {
+       return e.ErrMsg
+}
+
+// ========================
+// Internal data structures
+//
+// These global variables are used by multiple parts of the
+// program. They are good candidates for moving into their own
+// packages.
+
+// The Keep VolumeManager maintains a list of available volumes.
+// Initialized by the --volumes flag (or by FindKeepVolumes).
+var KeepVM VolumeManager
+
+// The pull list manager and trash queue are threadsafe queues which
+// support atomic update operations. The PullHandler and TrashHandler
+// store results from Data Manager /pull and /trash requests here.
+//
+// See the Keep and Data Manager design documents for more details:
+// https://arvados.org/projects/arvados/wiki/Keep_Design_Doc
+// https://arvados.org/projects/arvados/wiki/Data_Manager_Design_Doc
+//
+var pullq *WorkQueue
+var trashq *WorkQueue
+
+// TODO(twp): continue moving as much code as possible out of main
+// so it can be effectively tested. Esp. handling and postprocessing
+// of command line flags (identifying Keep volumes and initializing
+// permission arguments).
+
+func main() {
+       log.Println("Keep started: pid", os.Getpid())
+
+       // Parse command-line flags:
+       //
+       // -listen=ipaddr:port
+       //    Interface on which to listen for requests. Use :port without
+       //    an ipaddr to listen on all network interfaces.
+       //    Examples:
+       //      -listen=127.0.0.1:4949
+       //      -listen=10.0.1.24:8000
+       //      -listen=:25107 (to listen to port 25107 on all interfaces)
+       //
+       // -volumes
+       //    A comma-separated list of directories to use as Keep volumes.
+       //    Example:
+       //      -volumes=/var/keep01,/var/keep02,/var/keep03/subdir
+       //
+       //    If -volumes is empty or is not present, Keep will select volumes
+       //    by looking at currently mounted filesystems for /keep top-level
+       //    directories.
+
+       var (
+               data_manager_token_file string
+               listen                  string
+               permission_key_file     string
+               permission_ttl_sec      int
+               serialize_io            bool
+               volumearg               string
+               pidfile                 string
+       )
+       flag.StringVar(
+               &data_manager_token_file,
+               "data-manager-token-file",
+               "",
+               "File with the API token used by the Data Manager. All DELETE "+
+                       "requests or GET /index requests must carry this token.")
+       flag.BoolVar(
+               &enforce_permissions,
+               "enforce-permissions",
+               false,
+               "Enforce permission signatures on requests.")
+       flag.StringVar(
+               &listen,
+               "listen",
+               DEFAULT_ADDR,
+               "Interface on which to listen for requests, in the format "+
+                       "ipaddr:port. e.g. -listen=10.0.1.24:8000. Use -listen=:port "+
+                       "to listen on all network interfaces.")
+       flag.BoolVar(
+               &never_delete,
+               "never-delete",
+               false,
+               "If set, nothing will be deleted. HTTP 405 will be returned "+
+                       "for valid DELETE requests.")
+       flag.StringVar(
+               &permission_key_file,
+               "permission-key-file",
+               "",
+               "File containing the secret key for generating and verifying "+
+                       "permission signatures.")
+       flag.IntVar(
+               &permission_ttl_sec,
+               "permission-ttl",
+               1209600,
+               "Expiration time (in seconds) for newly generated permission "+
+                       "signatures.")
+       flag.BoolVar(
+               &serialize_io,
+               "serialize",
+               false,
+               "If set, all read and write operations on local Keep volumes will "+
+                       "be serialized.")
+       flag.StringVar(
+               &volumearg,
+               "volumes",
+               "",
+               "Comma-separated list of directories to use for Keep volumes, "+
+                       "e.g. -volumes=/var/keep1,/var/keep2. If empty or not "+
+                       "supplied, Keep will scan mounted filesystems for volumes "+
+                       "with a /keep top-level directory.")
+
+       flag.StringVar(
+               &pidfile,
+               "pid",
+               "",
+               "Path to write pid file")
+
+       flag.Parse()
+
+       // Look for local keep volumes.
+       var keepvols []string
+       if volumearg == "" {
+               // TODO(twp): decide whether this is desirable default behavior.
+               // In production we may want to require the admin to specify
+               // Keep volumes explicitly.
+               keepvols = FindKeepVolumes()
+       } else {
+               keepvols = strings.Split(volumearg, ",")
+       }
+
+       // Check that the specified volumes actually exist.
+       var goodvols []Volume = nil
+       for _, v := range keepvols {
+               if _, err := os.Stat(v); err == nil {
+                       log.Println("adding Keep volume:", v)
+                       newvol := MakeUnixVolume(v, serialize_io)
+                       goodvols = append(goodvols, &newvol)
+               } else {
+                       log.Printf("bad Keep volume: %s\n", err)
+               }
+       }
+
+       if len(goodvols) == 0 {
+               log.Fatal("could not find any keep volumes")
+       }
+
+       // Initialize data manager token and permission key.
+       // If these tokens are specified but cannot be read,
+       // raise a fatal error.
+       if data_manager_token_file != "" {
+               if buf, err := ioutil.ReadFile(data_manager_token_file); err == nil {
+                       data_manager_token = strings.TrimSpace(string(buf))
+               } else {
+                       log.Fatalf("reading data manager token: %s\n", err)
+               }
+       }
+       if permission_key_file != "" {
+               if buf, err := ioutil.ReadFile(permission_key_file); err == nil {
+                       PermissionSecret = bytes.TrimSpace(buf)
+               } else {
+                       log.Fatalf("reading permission key: %s\n", err)
+               }
+       }
+
+       // Initialize permission TTL
+       permission_ttl = time.Duration(permission_ttl_sec) * time.Second
+
+       // If --enforce-permissions is true, we must have a permission key
+       // to continue.
+       if PermissionSecret == nil {
+               if enforce_permissions {
+                       log.Fatal("--enforce-permissions requires a permission key")
+               } else {
+                       log.Println("Running without a PermissionSecret. Block locators " +
+                               "returned by this server will not be signed, and will be rejected " +
+                               "by a server that enforces permissions.")
+                       log.Println("To fix this, run Keep with --permission-key-file=<path> " +
+                               "to define the location of a file containing the permission key.")
+               }
+       }
+
+       // Start a round-robin VolumeManager with the volumes we have found.
+       KeepVM = MakeRRVolumeManager(goodvols)
+
+       // Tell the built-in HTTP server to direct all requests to the REST router.
+       loggingRouter := MakeLoggingRESTRouter()
+       http.HandleFunc("/", func(resp http.ResponseWriter, req *http.Request) {
+               loggingRouter.ServeHTTP(resp, req)
+       })
+
+       // Set up a TCP listener.
+       listener, err := net.Listen("tcp", listen)
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       // Shut down the server gracefully (by closing the listener)
+       // if SIGTERM is received.
+       term := make(chan os.Signal, 1)
+       go func(sig <-chan os.Signal) {
+               s := <-sig
+               log.Println("caught signal:", s)
+               listener.Close()
+       }(term)
+       signal.Notify(term, syscall.SIGTERM)
+
+       if pidfile != "" {
+               f, err := os.Create(pidfile)
+               if err == nil {
+                       fmt.Fprint(f, os.Getpid())
+                       f.Close()
+               } else {
+                       log.Printf("Error writing pid file (%s): %s", pidfile, err.Error())
+               }
+       }
+
+       // Start listening for requests.
+       srv := &http.Server{Addr: listen}
+       srv.Serve(listener)
+
+       log.Println("shutting down")
+
+       if pidfile != "" {
+               os.Remove(pidfile)
+       }
+}
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
new file mode 100644 (file)
index 0000000..686f502
--- /dev/null
@@ -0,0 +1,473 @@
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "io/ioutil"
+       "os"
+       "path"
+       "regexp"
+       "sort"
+       "strings"
+       "testing"
+)
+
+var TEST_BLOCK = []byte("The quick brown fox jumps over the lazy dog.")
+var TEST_HASH = "e4d909c290d0fb1ca068ffaddf22cbd0"
+var TEST_HASH_PUT_RESPONSE = "e4d909c290d0fb1ca068ffaddf22cbd0+44\n"
+
+var TEST_BLOCK_2 = []byte("Pack my box with five dozen liquor jugs.")
+var TEST_HASH_2 = "f15ac516f788aec4f30932ffb6395c39"
+
+var TEST_BLOCK_3 = []byte("Now is the time for all good men to come to the aid of their country.")
+var TEST_HASH_3 = "eed29bbffbc2dbe5e5ee0bb71888e61f"
+
+// BAD_BLOCK is used to test collisions and corruption.
+// It must not match any test hashes.
+var BAD_BLOCK = []byte("The magic words are squeamish ossifrage.")
+
+// TODO(twp): Tests still to be written
+//
+//   * TestPutBlockFull
+//       - test that PutBlock returns 503 Full if the filesystem is full.
+//         (must mock FreeDiskSpace or Statfs? use a tmpfs?)
+//
+//   * TestPutBlockWriteErr
+//       - test the behavior when Write returns an error.
+//           - Possible solutions: use a small tmpfs and a high
+//             MIN_FREE_KILOBYTES to trick PutBlock into attempting
+//             to write a block larger than the amount of space left
+//           - use an interface to mock ioutil.TempFile with a File
+//             object that always returns an error on write
+//
+// ========================================
+// GetBlock tests.
+// ========================================
+
+// TestGetBlock
+//     Test that simple block reads succeed.
+//
+func TestGetBlock(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes. Our block is stored on the second volume.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       vols := KeepVM.Volumes()
+       if err := vols[1].Put(TEST_HASH, TEST_BLOCK); err != nil {
+               t.Error(err)
+       }
+
+       // Check that GetBlock returns success.
+       result, err := GetBlock(TEST_HASH, false)
+       if err != nil {
+               t.Errorf("GetBlock error: %s", err)
+       }
+       if fmt.Sprint(result) != fmt.Sprint(TEST_BLOCK) {
+               t.Errorf("expected %s, got %s", TEST_BLOCK, result)
+       }
+}
+
+// TestGetBlockMissing
+//     GetBlock must return an error when the block is not found.
+//
+func TestGetBlockMissing(t *testing.T) {
+       defer teardown()
+
+       // Create two empty test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       // Check that GetBlock returns failure.
+       result, err := GetBlock(TEST_HASH, false)
+       if err != NotFoundError {
+               t.Errorf("Expected NotFoundError, got %v", result)
+       }
+}
+
+// TestGetBlockCorrupt
+//     GetBlock must return an error when a corrupted block is requested
+//     (the contents of the file do not checksum to its hash).
+//
+func TestGetBlockCorrupt(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes and store a corrupt block in one.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       vols := KeepVM.Volumes()
+       vols[0].Put(TEST_HASH, BAD_BLOCK)
+
+       // Check that GetBlock returns failure.
+       result, err := GetBlock(TEST_HASH, false)
+       if err != DiskHashError {
+               t.Errorf("Expected DiskHashError, got %v (buf: %v)", err, result)
+       }
+}
+
+// ========================================
+// PutBlock tests
+// ========================================
+
+// TestPutBlockOK
+//     PutBlock can perform a simple block write and returns success.
+//
+func TestPutBlockOK(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       // Check that PutBlock stores the data as expected.
+       if err := PutBlock(TEST_BLOCK, TEST_HASH); err != nil {
+               t.Fatalf("PutBlock: %v", err)
+       }
+
+       vols := KeepVM.Volumes()
+       result, err := vols[0].Get(TEST_HASH)
+       if err != nil {
+               t.Fatalf("Volume #0 Get returned error: %v", err)
+       }
+       if string(result) != string(TEST_BLOCK) {
+               t.Fatalf("PutBlock stored '%s', Get retrieved '%s'",
+                       string(TEST_BLOCK), string(result))
+       }
+}
+
+// TestPutBlockOneVol
+//     PutBlock still returns success even when only one of the known
+//     volumes is online.
+//
+func TestPutBlockOneVol(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes, but cripple one of them.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       vols := KeepVM.Volumes()
+       vols[0].(*MockVolume).Bad = true
+
+       // Check that PutBlock stores the data as expected.
+       if err := PutBlock(TEST_BLOCK, TEST_HASH); err != nil {
+               t.Fatalf("PutBlock: %v", err)
+       }
+
+       result, err := GetBlock(TEST_HASH, false)
+       if err != nil {
+               t.Fatalf("GetBlock: %v", err)
+       }
+       if string(result) != string(TEST_BLOCK) {
+               t.Error("PutBlock/GetBlock mismatch")
+               t.Fatalf("PutBlock stored '%s', GetBlock retrieved '%s'",
+                       string(TEST_BLOCK), string(result))
+       }
+}
+
+// TestPutBlockMD5Fail
+//     Check that PutBlock returns an error if passed a block and hash that
+//     do not match.
+//
+func TestPutBlockMD5Fail(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       // Check that PutBlock returns the expected error when the hash does
+       // not match the block.
+       if err := PutBlock(BAD_BLOCK, TEST_HASH); err != RequestHashError {
+               t.Error("Expected RequestHashError, got %v", err)
+       }
+
+       // Confirm that GetBlock fails to return anything.
+       if result, err := GetBlock(TEST_HASH, false); err != NotFoundError {
+               t.Errorf("GetBlock succeeded after a corrupt block store (result = %s, err = %v)",
+                       string(result), err)
+       }
+}
+
+// TestPutBlockCorrupt
+//     PutBlock should overwrite corrupt blocks on disk when given
+//     a PUT request with a good block.
+//
+func TestPutBlockCorrupt(t *testing.T) {
+       defer teardown()
+
+       // Create two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       // Store a corrupted block under TEST_HASH.
+       vols := KeepVM.Volumes()
+       vols[0].Put(TEST_HASH, BAD_BLOCK)
+       if err := PutBlock(TEST_BLOCK, TEST_HASH); err != nil {
+               t.Errorf("PutBlock: %v", err)
+       }
+
+       // The block on disk should now match TEST_BLOCK.
+       if block, err := GetBlock(TEST_HASH, false); err != nil {
+               t.Errorf("GetBlock: %v", err)
+       } else if bytes.Compare(block, TEST_BLOCK) != 0 {
+               t.Errorf("GetBlock returned: '%s'", string(block))
+       }
+}
+
+// TestPutBlockCollision
+//     PutBlock returns a 400 Collision error when attempting to
+//     store a block that collides with another block on disk.
+//
+func TestPutBlockCollision(t *testing.T) {
+       defer teardown()
+
+       // These blocks both hash to the MD5 digest cee9a457e790cf20d4bdaa6d69f01e41.
+       var b1 = []byte("\x0e0eaU\x9a\xa7\x87\xd0\x0b\xc6\xf7\x0b\xbd\xfe4\x04\xcf\x03e\x9epO\x854\xc0\x0f\xfbe\x9cL\x87@\xcc\x94/\xeb-\xa1\x15\xa3\xf4\x15\\\xbb\x86\x07Is\x86em}\x1f4\xa4 Y\xd7\x8fZ\x8d\xd1\xef")
+       var b2 = []byte("\x0e0eaU\x9a\xa7\x87\xd0\x0b\xc6\xf7\x0b\xbd\xfe4\x04\xcf\x03e\x9etO\x854\xc0\x0f\xfbe\x9cL\x87@\xcc\x94/\xeb-\xa1\x15\xa3\xf4\x15\xdc\xbb\x86\x07Is\x86em}\x1f4\xa4 Y\xd7\x8fZ\x8d\xd1\xef")
+       var locator = "cee9a457e790cf20d4bdaa6d69f01e41"
+
+       // Prepare two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       // Store one block, then attempt to store the other. Confirm that
+       // PutBlock reported a CollisionError.
+       if err := PutBlock(b1, locator); err != nil {
+               t.Error(err)
+       }
+       if err := PutBlock(b2, locator); err == nil {
+               t.Error("PutBlock did not report a collision")
+       } else if err != CollisionError {
+               t.Errorf("PutBlock returned %v", err)
+       }
+}
+
+// TestPutBlockTouchFails
+//     When PutBlock is asked to PUT an existing block, but cannot
+//     modify the timestamp, it should write a second block.
+//
+func TestPutBlockTouchFails(t *testing.T) {
+       defer teardown()
+
+       // Prepare two test Keep volumes.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+       vols := KeepVM.Volumes()
+
+       // Store a block and then make the underlying volume bad,
+       // so a subsequent attempt to update the file timestamp
+       // will fail.
+       vols[0].Put(TEST_HASH, BAD_BLOCK)
+       old_mtime, err := vols[0].Mtime(TEST_HASH)
+       if err != nil {
+               t.Fatalf("vols[0].Mtime(%s): %s\n", TEST_HASH, err)
+       }
+
+       // vols[0].Touch will fail on the next call, so the volume
+       // manager will store a copy on vols[1] instead.
+       vols[0].(*MockVolume).Touchable = false
+       if err := PutBlock(TEST_BLOCK, TEST_HASH); err != nil {
+               t.Fatalf("PutBlock: %v", err)
+       }
+       vols[0].(*MockVolume).Touchable = true
+
+       // Now the mtime on the block on vols[0] should be unchanged, and
+       // there should be a copy of the block on vols[1].
+       new_mtime, err := vols[0].Mtime(TEST_HASH)
+       if err != nil {
+               t.Fatalf("vols[0].Mtime(%s): %s\n", TEST_HASH, err)
+       }
+       if !new_mtime.Equal(old_mtime) {
+               t.Errorf("mtime was changed on vols[0]:\nold_mtime = %v\nnew_mtime = %v\n",
+                       old_mtime, new_mtime)
+       }
+       result, err := vols[1].Get(TEST_HASH)
+       if err != nil {
+               t.Fatalf("vols[1]: %v", err)
+       }
+       if bytes.Compare(result, TEST_BLOCK) != 0 {
+               t.Errorf("new block does not match test block\nnew block = %v\n", result)
+       }
+}
+
+// ========================================
+// FindKeepVolumes tests.
+// ========================================
+
+// TestFindKeepVolumes
+//     Confirms that FindKeepVolumes finds tmpfs volumes with "/keep"
+//     directories at the top level.
+//
+func TestFindKeepVolumes(t *testing.T) {
+       var tempVols [2]string
+       var err error
+
+       defer func() {
+               for _, path := range tempVols {
+                       os.RemoveAll(path)
+               }
+       }()
+
+       // Create two directories suitable for using as keep volumes.
+       for i := range tempVols {
+               if tempVols[i], err = ioutil.TempDir("", "findvol"); err != nil {
+                       t.Fatal(err)
+               }
+               tempVols[i] = tempVols[i] + "/keep"
+               if err = os.Mkdir(tempVols[i], 0755); err != nil {
+                       t.Fatal(err)
+               }
+       }
+
+       // Set up a bogus PROC_MOUNTS file.
+       if f, err := ioutil.TempFile("", "keeptest"); err == nil {
+               for _, vol := range tempVols {
+                       fmt.Fprintf(f, "tmpfs %s tmpfs opts\n", path.Dir(vol))
+               }
+               f.Close()
+               PROC_MOUNTS = f.Name()
+
+               // Check that FindKeepVolumes finds the temp volumes.
+               resultVols := FindKeepVolumes()
+               if len(tempVols) != len(resultVols) {
+                       t.Fatalf("set up %d volumes, FindKeepVolumes found %d\n",
+                               len(tempVols), len(resultVols))
+               }
+               for i := range tempVols {
+                       if tempVols[i] != resultVols[i] {
+                               t.Errorf("FindKeepVolumes returned %s, expected %s\n",
+                                       resultVols[i], tempVols[i])
+                       }
+               }
+
+               os.Remove(f.Name())
+       }
+}
+
+// TestFindKeepVolumesFail
+//     When no Keep volumes are present, FindKeepVolumes returns an empty slice.
+//
+func TestFindKeepVolumesFail(t *testing.T) {
+       defer teardown()
+
+       // Set up a bogus PROC_MOUNTS file with no Keep vols.
+       if f, err := ioutil.TempFile("", "keeptest"); err == nil {
+               fmt.Fprintln(f, "rootfs / rootfs opts 0 0")
+               fmt.Fprintln(f, "sysfs /sys sysfs opts 0 0")
+               fmt.Fprintln(f, "proc /proc proc opts 0 0")
+               fmt.Fprintln(f, "udev /dev devtmpfs opts 0 0")
+               fmt.Fprintln(f, "devpts /dev/pts devpts opts 0 0")
+               f.Close()
+               PROC_MOUNTS = f.Name()
+
+               // Check that FindKeepVolumes returns an empty array.
+               resultVols := FindKeepVolumes()
+               if len(resultVols) != 0 {
+                       t.Fatalf("FindKeepVolumes returned %v", resultVols)
+               }
+
+               os.Remove(PROC_MOUNTS)
+       }
+}
+
+// TestIndex
+//     Test an /index request.
+func TestIndex(t *testing.T) {
+       defer teardown()
+
+       // Set up Keep volumes and populate them.
+       // Include multiple blocks on different volumes, and
+       // some metadata files.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       vols := KeepVM.Volumes()
+       vols[0].Put(TEST_HASH, TEST_BLOCK)
+       vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
+       vols[0].Put(TEST_HASH_3, TEST_BLOCK_3)
+       vols[0].Put(TEST_HASH+".meta", []byte("metadata"))
+       vols[1].Put(TEST_HASH_2+".meta", []byte("metadata"))
+
+       index := vols[0].Index("") + vols[1].Index("")
+       index_rows := strings.Split(index, "\n")
+       sort.Strings(index_rows)
+       sorted_index := strings.Join(index_rows, "\n")
+       expected := `^\n` + TEST_HASH + `\+\d+ \d+\n` +
+               TEST_HASH_3 + `\+\d+ \d+\n` +
+               TEST_HASH_2 + `\+\d+ \d+$`
+
+       match, err := regexp.MatchString(expected, sorted_index)
+       if err == nil {
+               if !match {
+                       t.Errorf("IndexLocators returned:\n%s", index)
+               }
+       } else {
+               t.Errorf("regexp.MatchString: %s", err)
+       }
+}
+
+// TestNodeStatus
+//     Test that GetNodeStatus returns valid info about available volumes.
+//
+//     TODO(twp): set up appropriate interfaces to permit more rigorous
+//     testing.
+//
+func TestNodeStatus(t *testing.T) {
+       defer teardown()
+
+       // Set up test Keep volumes with some blocks.
+       KeepVM = MakeTestVolumeManager(2)
+       defer func() { KeepVM.Quit() }()
+
+       vols := KeepVM.Volumes()
+       vols[0].Put(TEST_HASH, TEST_BLOCK)
+       vols[1].Put(TEST_HASH_2, TEST_BLOCK_2)
+
+       // Get node status and make a basic sanity check.
+       st := GetNodeStatus()
+       for i := range vols {
+               volinfo := st.Volumes[i]
+               mtp := volinfo.MountPoint
+               if mtp != "/bogo" {
+                       t.Errorf("GetNodeStatus mount_point %s, expected /bogo", mtp)
+               }
+               if volinfo.DeviceNum == 0 {
+                       t.Errorf("uninitialized device_num in %v", volinfo)
+               }
+               if volinfo.BytesFree == 0 {
+                       t.Errorf("uninitialized bytes_free in %v", volinfo)
+               }
+               if volinfo.BytesUsed == 0 {
+                       t.Errorf("uninitialized bytes_used in %v", volinfo)
+               }
+       }
+}
+
+// ========================================
+// Helper functions for unit tests.
+// ========================================
+
+// MakeTestVolumeManager
+//     Creates and returns a RRVolumeManager with the specified number
+//     of MockVolumes.
+//
+func MakeTestVolumeManager(num_volumes int) VolumeManager {
+       vols := make([]Volume, num_volumes)
+       for i := range vols {
+               vols[i] = CreateMockVolume()
+       }
+       return MakeRRVolumeManager(vols)
+}
+
+// teardown
+//     Cleanup to perform after each test.
+//
+func teardown() {
+       data_manager_token = ""
+       enforce_permissions = false
+       PermissionSecret = nil
+       KeepVM = nil
+}
diff --git a/services/keepstore/logging_router.go b/services/keepstore/logging_router.go
new file mode 100644 (file)
index 0000000..e30df87
--- /dev/null
@@ -0,0 +1,51 @@
+package main
+
+// LoggingRESTRouter
+// LoggingResponseWriter
+
+import (
+       "github.com/gorilla/mux"
+       "log"
+       "net/http"
+       "strings"
+)
+
+type LoggingResponseWriter struct {
+       Status int
+       Length int
+       http.ResponseWriter
+       ResponseBody string
+}
+
+func (loggingWriter *LoggingResponseWriter) WriteHeader(code int) {
+       loggingWriter.Status = code
+       loggingWriter.ResponseWriter.WriteHeader(code)
+}
+
+func (loggingWriter *LoggingResponseWriter) Write(data []byte) (int, error) {
+       loggingWriter.Length += len(data)
+       if loggingWriter.Status >= 400 {
+               loggingWriter.ResponseBody += string(data)
+       }
+       return loggingWriter.ResponseWriter.Write(data)
+}
+
+type LoggingRESTRouter struct {
+       router *mux.Router
+}
+
+func MakeLoggingRESTRouter() *LoggingRESTRouter {
+       router := MakeRESTRouter()
+       return (&LoggingRESTRouter{router})
+}
+
+func (loggingRouter *LoggingRESTRouter) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
+       loggingWriter := LoggingResponseWriter{200, 0, resp, ""}
+       loggingRouter.router.ServeHTTP(&loggingWriter, req)
+       statusText := "OK"
+       if loggingWriter.Status >= 400 {
+               statusText = strings.Replace(loggingWriter.ResponseBody, "\n", "", -1)
+       }
+       log.Printf("[%s] %s %s %d %d \"%s\"", req.RemoteAddr, req.Method, req.URL.Path[1:], loggingWriter.Status, loggingWriter.Length, statusText)
+
+}
diff --git a/services/keepstore/perms.go b/services/keepstore/perms.go
new file mode 100644 (file)
index 0000000..1048f53
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+Permissions management on Arvados locator hashes.
+
+The permissions structure for Arvados is as follows (from
+https://arvados.org/issues/2328)
+
+A Keep locator string has the following format:
+
+    [hash]+[size]+A[signature]@[timestamp]
+
+The "signature" string here is a cryptographic hash, expressed as a
+string of hexadecimal digits, and timestamp is a 32-bit Unix timestamp
+expressed as a hexadecimal number.  e.g.:
+
+    acbd18db4cc2f85cedef654fccc4a4d8+3+A257f3f5f5f0a4e4626a18fc74bd42ec34dcb228a@7fffffff
+
+The signature represents a guarantee that this locator was generated
+by either Keep or the API server for use with the supplied API token.
+If a request to Keep includes a locator with a valid signature and is
+accompanied by the proper API token, the user has permission to GET
+that object.
+
+The signature may be generated either by Keep (after the user writes a
+block) or by the API server (if the user has can_read permission on
+the specified object). Keep and API server share a secret that is used
+to generate signatures.
+
+To verify a permission hint, Keep generates a new hint for the
+requested object (using the locator string, the timestamp, the
+permission secret and the user's API token, which must appear in the
+request headers) and compares it against the hint included in the
+request. If the permissions do not match, or if the API token is not
+present, Keep returns a 401 error.
+*/
+
+package main
+
+import (
+       "crypto/hmac"
+       "crypto/sha1"
+       "fmt"
+       "regexp"
+       "strconv"
+       "strings"
+       "time"
+)
+
+// The PermissionSecret is the secret key used to generate SHA1
+// digests for permission hints. apiserver and Keep must use the same
+// key.
+var PermissionSecret []byte
+
+// MakePermSignature returns a string representing the signed permission
+// hint for the blob identified by blob_hash, api_token and expiration timestamp.
+func MakePermSignature(blob_hash string, api_token string, expiry string) string {
+       hmac := hmac.New(sha1.New, PermissionSecret)
+       hmac.Write([]byte(blob_hash))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(api_token))
+       hmac.Write([]byte("@"))
+       hmac.Write([]byte(expiry))
+       digest := hmac.Sum(nil)
+       return fmt.Sprintf("%x", digest)
+}
+
+// SignLocator takes a blob_locator, an api_token and an expiry time, and
+// returns a signed locator string.
+func SignLocator(blob_locator string, api_token string, expiry time.Time) string {
+       // If no permission secret or API token is available,
+       // return an unsigned locator.
+       if PermissionSecret == nil || api_token == "" {
+               return blob_locator
+       }
+       // Extract the hash from the blob locator, omitting any size hint that may be present.
+       blob_hash := strings.Split(blob_locator, "+")[0]
+       // Return the signed locator string.
+       timestamp_hex := fmt.Sprintf("%08x", expiry.Unix())
+       return blob_locator +
+               "+A" + MakePermSignature(blob_hash, api_token, timestamp_hex) +
+               "@" + timestamp_hex
+}
+
+var signedLocatorRe = regexp.MustCompile(`^([[:xdigit:]]{32}).*\+A([[:xdigit:]]{40})@([[:xdigit:]]{8})`)
+
+// VerifySignature returns true if the signature on the signed_locator
+// can be verified using the given api_token.
+func VerifySignature(signed_locator string, api_token string) bool {
+       matches := signedLocatorRe.FindStringSubmatch(signed_locator)
+       if matches == nil {
+               // Could not find a permission signature at all
+               return false
+       }
+       blob_hash := matches[1]
+       sig_hex := matches[2]
+       exp_hex := matches[3]
+       if exp_time, err := ParseHexTimestamp(exp_hex); err != nil || exp_time.Before(time.Now()) {
+               // Signature is expired, or timestamp is unparseable
+               return false
+       }
+       return sig_hex == MakePermSignature(blob_hash, api_token, exp_hex)
+}
+
+func ParseHexTimestamp(timestamp_hex string) (ts time.Time, err error) {
+       if ts_int, e := strconv.ParseInt(timestamp_hex, 16, 0); e == nil {
+               ts = time.Unix(ts_int, 0)
+       } else {
+               err = e
+       }
+       return ts, err
+}
diff --git a/services/keepstore/perms_test.go b/services/keepstore/perms_test.go
new file mode 100644 (file)
index 0000000..d0081cd
--- /dev/null
@@ -0,0 +1,125 @@
+package main
+
+import (
+       "testing"
+       "time"
+)
+
+const (
+       known_hash    = "acbd18db4cc2f85cedef654fccc4a4d8"
+       known_locator = known_hash + "+3"
+       known_token   = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
+       known_key     = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
+               "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
+               "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
+               "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
+               "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
+               "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
+               "786u5rw2a9gx743dj3fgq2irk"
+       known_signature      = "257f3f5f5f0a4e4626a18fc74bd42ec34dcb228a"
+       known_timestamp      = "7fffffff"
+       known_sig_hint       = "+A" + known_signature + "@" + known_timestamp
+       known_signed_locator = known_locator + known_sig_hint
+)
+
+func TestSignLocator(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       if ts, err := ParseHexTimestamp(known_timestamp); err != nil {
+               t.Errorf("bad known_timestamp %s", known_timestamp)
+       } else {
+               if known_signed_locator != SignLocator(known_locator, known_token, ts) {
+                       t.Fail()
+               }
+       }
+}
+
+func TestVerifySignature(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       if !VerifySignature(known_signed_locator, known_token) {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureExtraHints(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       if !VerifySignature(known_locator + "+K@xyzzy" + known_sig_hint, known_token) {
+               t.Fatal("Verify cannot handle hint before permission signature")
+       }
+
+       if !VerifySignature(known_locator + known_sig_hint + "+Zfoo", known_token) {
+               t.Fatal("Verify cannot handle hint after permission signature")
+       }
+
+       if !VerifySignature(known_locator + "+K@xyzzy" + known_sig_hint + "+Zfoo", known_token) {
+               t.Fatal("Verify cannot handle hints around permission signature")
+       }
+}
+
+// The size hint on the locator string should not affect signature validation.
+func TestVerifySignatureWrongSize(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       if !VerifySignature(known_hash + "+999999" + known_sig_hint, known_token) {
+               t.Fatal("Verify cannot handle incorrect size hint")
+       }
+
+       if !VerifySignature(known_hash + known_sig_hint, known_token) {
+               t.Fatal("Verify cannot handle missing size hint")
+       }
+}
+
+func TestVerifySignatureBadSig(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       bad_locator := known_locator + "+Aaaaaaaaaaaaaaaa@" + known_timestamp
+       if VerifySignature(bad_locator, known_token) {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureBadTimestamp(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       bad_locator := known_locator + "+A" + known_signature + "@00000000"
+       if VerifySignature(bad_locator, known_token) {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureBadSecret(t *testing.T) {
+       PermissionSecret = []byte("00000000000000000000")
+       defer func() { PermissionSecret = nil }()
+
+       if VerifySignature(known_signed_locator, known_token) {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureBadToken(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       if VerifySignature(known_signed_locator, "00000000") {
+               t.Fail()
+       }
+}
+
+func TestVerifySignatureExpired(t *testing.T) {
+       PermissionSecret = []byte(known_key)
+       defer func() { PermissionSecret = nil }()
+
+       yesterday := time.Now().AddDate(0, 0, -1)
+       expired_locator := SignLocator(known_hash, known_token, yesterday)
+       if VerifySignature(expired_locator, known_token) {
+               t.Fail()
+       }
+}
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
new file mode 100644 (file)
index 0000000..e7683ee
--- /dev/null
@@ -0,0 +1,183 @@
+// A Volume is an interface representing a Keep back-end storage unit:
+// for example, a single mounted disk, a RAID array, an Amazon S3 volume,
+// etc.
+
+package main
+
+import (
+       "errors"
+       "fmt"
+       "os"
+       "strings"
+       "time"
+)
+
+type Volume interface {
+       Get(loc string) ([]byte, error)
+       Put(loc string, block []byte) error
+       Touch(loc string) error
+       Mtime(loc string) (time.Time, error)
+       Index(prefix string) string
+       Delete(loc string) error
+       Status() *VolumeStatus
+       String() string
+}
+
+// MockVolumes are Volumes used to test the Keep front end.
+//
+// If the Bad field is true, this volume should return an error
+// on all writes and puts.
+//
+// The Touchable field signifies whether the Touch method will
+// succeed.  Defaults to true.  Note that Bad and Touchable are
+// independent: a MockVolume may be set up so that Put fails but Touch
+// works or vice versa.
+//
+// TODO(twp): rename Bad to something more descriptive, e.g. Writable,
+// and make sure that the tests that rely on it are testing the right
+// thing.  We may need to simulate Writable, Touchable and Corrupt
+// volumes in different ways.
+//
+type MockVolume struct {
+       Store      map[string][]byte
+       Timestamps map[string]time.Time
+       Bad        bool
+       Touchable  bool
+}
+
+func CreateMockVolume() *MockVolume {
+       return &MockVolume{
+               Store:      make(map[string][]byte),
+               Timestamps: make(map[string]time.Time),
+               Bad:        false,
+               Touchable:  true,
+       }
+}
+
+func (v *MockVolume) Get(loc string) ([]byte, error) {
+       if v.Bad {
+               return nil, errors.New("Bad volume")
+       } else if block, ok := v.Store[loc]; ok {
+               return block, nil
+       }
+       return nil, os.ErrNotExist
+}
+
+func (v *MockVolume) Put(loc string, block []byte) error {
+       if v.Bad {
+               return errors.New("Bad volume")
+       }
+       v.Store[loc] = block
+       return v.Touch(loc)
+}
+
+func (v *MockVolume) Touch(loc string) error {
+       if v.Touchable {
+               v.Timestamps[loc] = time.Now()
+               return nil
+       }
+       return errors.New("Touch failed")
+}
+
+func (v *MockVolume) Mtime(loc string) (time.Time, error) {
+       var mtime time.Time
+       var err error
+       if v.Bad {
+               err = errors.New("Bad volume")
+       } else if t, ok := v.Timestamps[loc]; ok {
+               mtime = t
+       } else {
+               err = os.ErrNotExist
+       }
+       return mtime, err
+}
+
+func (v *MockVolume) Index(prefix string) string {
+       var result string
+       for loc, block := range v.Store {
+               if IsValidLocator(loc) && strings.HasPrefix(loc, prefix) {
+                       result = result + fmt.Sprintf("%s+%d %d\n",
+                               loc, len(block), 123456789)
+               }
+       }
+       return result
+}
+
+func (v *MockVolume) Delete(loc string) error {
+       if _, ok := v.Store[loc]; ok {
+               if time.Since(v.Timestamps[loc]) < permission_ttl {
+                       return nil
+               }
+               delete(v.Store, loc)
+               return nil
+       }
+       return os.ErrNotExist
+}
+
+func (v *MockVolume) Status() *VolumeStatus {
+       var used uint64
+       for _, block := range v.Store {
+               used = used + uint64(len(block))
+       }
+       return &VolumeStatus{"/bogo", 123, 1000000 - used, used}
+}
+
+func (v *MockVolume) String() string {
+       return "[MockVolume]"
+}
+
+// A VolumeManager manages a collection of volumes.
+//
+// - Volumes is a slice of available Volumes.
+// - Choose() returns a Volume suitable for writing to.
+// - Quit() instructs the VolumeManager to shut down gracefully.
+//
+type VolumeManager interface {
+       Volumes() []Volume
+       Choose() Volume
+       Quit()
+}
+
+type RRVolumeManager struct {
+       volumes   []Volume
+       nextwrite chan Volume
+       quit      chan int
+}
+
+func MakeRRVolumeManager(vols []Volume) *RRVolumeManager {
+       // Create a new VolumeManager struct with the specified volumes,
+       // and with new Nextwrite and Quit channels.
+       // The Quit channel is buffered with a capacity of 1 so that
+       // another routine may write to it without blocking.
+       vm := &RRVolumeManager{vols, make(chan Volume), make(chan int, 1)}
+
+       // This goroutine implements round-robin volume selection.
+       // It sends each available Volume in turn to the Nextwrite
+       // channel, until receiving a notification on the Quit channel
+       // that it should terminate.
+       go func() {
+               var i int = 0
+               for {
+                       select {
+                       case <-vm.quit:
+                               return
+                       case vm.nextwrite <- vm.volumes[i]:
+                               i = (i + 1) % len(vm.volumes)
+                       }
+               }
+       }()
+
+       return vm
+}
+
+func (vm *RRVolumeManager) Volumes() []Volume {
+       return vm.volumes
+}
+
+func (vm *RRVolumeManager) Choose() Volume {
+       return <-vm.nextwrite
+}
+
+func (vm *RRVolumeManager) Quit() {
+       vm.quit <- 1
+}
diff --git a/services/keepstore/volume_unix.go b/services/keepstore/volume_unix.go
new file mode 100644 (file)
index 0000000..4db2a53
--- /dev/null
@@ -0,0 +1,352 @@
+// A UnixVolume is a Volume backed by a locally mounted disk.
+//
+package main
+
+import (
+       "fmt"
+       "io/ioutil"
+       "log"
+       "os"
+       "path/filepath"
+       "strconv"
+       "strings"
+       "syscall"
+       "time"
+)
+
+// IORequests are encapsulated Get or Put requests.  They are used to
+// implement serialized I/O (i.e. only one read/write operation per
+// volume). When running in serialized mode, the Keep front end sends
+// IORequests on a channel to an IORunner, which handles them one at a
+// time and returns an IOResponse.
+//
+type IOMethod int
+
+const (
+       KeepGet IOMethod = iota
+       KeepPut
+)
+
+type IORequest struct {
+       method IOMethod
+       loc    string
+       data   []byte
+       reply  chan *IOResponse
+}
+
+type IOResponse struct {
+       data []byte
+       err  error
+}
+
+// A UnixVolume has the following properties:
+//
+//   root
+//       the path to the volume's root directory
+//   queue
+//       A channel of IORequests. If non-nil, all I/O requests for
+//       this volume should be queued on this channel; the result
+//       will be delivered on the IOResponse channel supplied in the
+//       request.
+//
+type UnixVolume struct {
+       root  string // path to this volume
+       queue chan *IORequest
+}
+
+func (v *UnixVolume) IOHandler() {
+       for req := range v.queue {
+               var result IOResponse
+               switch req.method {
+               case KeepGet:
+                       result.data, result.err = v.Read(req.loc)
+               case KeepPut:
+                       result.err = v.Write(req.loc, req.data)
+               }
+               req.reply <- &result
+       }
+}
+
+func MakeUnixVolume(root string, serialize bool) (v UnixVolume) {
+       if serialize {
+               v = UnixVolume{root, make(chan *IORequest)}
+               go v.IOHandler()
+       } else {
+               v = UnixVolume{root, nil}
+       }
+       return
+}
+
+func (v *UnixVolume) Get(loc string) ([]byte, error) {
+       if v.queue == nil {
+               return v.Read(loc)
+       }
+       reply := make(chan *IOResponse)
+       v.queue <- &IORequest{KeepGet, loc, nil, reply}
+       response := <-reply
+       return response.data, response.err
+}
+
+func (v *UnixVolume) Put(loc string, block []byte) error {
+       if v.queue == nil {
+               return v.Write(loc, block)
+       }
+       reply := make(chan *IOResponse)
+       v.queue <- &IORequest{KeepPut, loc, block, reply}
+       response := <-reply
+       return response.err
+}
+
+func (v *UnixVolume) Touch(loc string) error {
+       p := v.blockPath(loc)
+       f, err := os.OpenFile(p, os.O_RDWR|os.O_APPEND, 0644)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+       if e := lockfile(f); e != nil {
+               return e
+       }
+       defer unlockfile(f)
+       now := time.Now().Unix()
+       utime := syscall.Utimbuf{now, now}
+       return syscall.Utime(p, &utime)
+}
+
+func (v *UnixVolume) Mtime(loc string) (time.Time, error) {
+       p := v.blockPath(loc)
+       if fi, err := os.Stat(p); err != nil {
+               return time.Time{}, err
+       } else {
+               return fi.ModTime(), nil
+       }
+}
+
+// Read retrieves a block identified by the locator string "loc", and
+// returns its contents as a byte slice.
+//
+// If the block could not be opened or read, Read returns a nil slice
+// and the os.Error that was generated.
+//
+// If the block is present but its content hash does not match loc,
+// Read returns the block and a CorruptError.  It is the caller's
+// responsibility to decide what (if anything) to do with the
+// corrupted data block.
+//
+func (v *UnixVolume) Read(loc string) ([]byte, error) {
+       buf, err := ioutil.ReadFile(v.blockPath(loc))
+       return buf, err
+}
+
+// Write stores a block of data identified by the locator string
+// "loc".  It returns nil on success.  If the volume is full, it
+// returns a FullError.  If the write fails due to some other error,
+// that error is returned.
+//
+func (v *UnixVolume) Write(loc string, block []byte) error {
+       if v.IsFull() {
+               return FullError
+       }
+       bdir := v.blockDir(loc)
+       if err := os.MkdirAll(bdir, 0755); err != nil {
+               log.Printf("%s: could not create directory %s: %s",
+                       loc, bdir, err)
+               return err
+       }
+
+       tmpfile, tmperr := ioutil.TempFile(bdir, "tmp"+loc)
+       if tmperr != nil {
+               log.Printf("ioutil.TempFile(%s, tmp%s): %s", bdir, loc, tmperr)
+               return tmperr
+       }
+       bpath := v.blockPath(loc)
+
+       if _, err := tmpfile.Write(block); err != nil {
+               log.Printf("%s: writing to %s: %s\n", v, bpath, err)
+               return err
+       }
+       if err := tmpfile.Close(); err != nil {
+               log.Printf("closing %s: %s\n", tmpfile.Name(), err)
+               os.Remove(tmpfile.Name())
+               return err
+       }
+       if err := os.Rename(tmpfile.Name(), bpath); err != nil {
+               log.Printf("rename %s %s: %s\n", tmpfile.Name(), bpath, err)
+               os.Remove(tmpfile.Name())
+               return err
+       }
+       return nil
+}
+
+// Status returns a VolumeStatus struct describing the volume's
+// current state.
+//
+func (v *UnixVolume) Status() *VolumeStatus {
+       var fs syscall.Statfs_t
+       var devnum uint64
+
+       if fi, err := os.Stat(v.root); err == nil {
+               devnum = fi.Sys().(*syscall.Stat_t).Dev
+       } else {
+               log.Printf("%s: os.Stat: %s\n", v, err)
+               return nil
+       }
+
+       err := syscall.Statfs(v.root, &fs)
+       if err != nil {
+               log.Printf("%s: statfs: %s\n", v, err)
+               return nil
+       }
+       // These calculations match the way df calculates disk usage:
+       // "free" space is measured by fs.Bavail, but "used" space
+       // uses fs.Blocks - fs.Bfree.
+       free := fs.Bavail * uint64(fs.Bsize)
+       used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
+       return &VolumeStatus{v.root, devnum, free, used}
+}
+
+// Index returns a list of blocks found on this volume which begin with
+// the specified prefix. If the prefix is an empty string, Index returns
+// a complete list of blocks.
+//
+// The return value is a multiline string (separated by
+// newlines). Each line is in the format
+//
+//     locator+size modification-time
+//
+// e.g.:
+//
+//     e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
+//     e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
+//     e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
+//
+func (v *UnixVolume) Index(prefix string) (output string) {
+       filepath.Walk(v.root,
+               func(path string, info os.FileInfo, err error) error {
+                       // This WalkFunc inspects each path in the volume
+                       // and prints an index line for all files that begin
+                       // with prefix.
+                       if err != nil {
+                               log.Printf("IndexHandler: %s: walking to %s: %s",
+                                       v, path, err)
+                               return nil
+                       }
+                       locator := filepath.Base(path)
+                       // Skip directories that do not match prefix.
+                       // We know there is nothing interesting inside.
+                       if info.IsDir() &&
+                               !strings.HasPrefix(locator, prefix) &&
+                               !strings.HasPrefix(prefix, locator) {
+                               return filepath.SkipDir
+                       }
+                       // Skip any file that is not apparently a locator, e.g. .meta files
+                       if !IsValidLocator(locator) {
+                               return nil
+                       }
+                       // Print filenames beginning with prefix
+                       if !info.IsDir() && strings.HasPrefix(locator, prefix) {
+                               output = output + fmt.Sprintf(
+                                       "%s+%d %d\n", locator, info.Size(), info.ModTime().Unix())
+                       }
+                       return nil
+               })
+
+       return
+}
+
+func (v *UnixVolume) Delete(loc string) error {
+       p := v.blockPath(loc)
+       f, err := os.OpenFile(p, os.O_RDWR|os.O_APPEND, 0644)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+       if e := lockfile(f); e != nil {
+               return e
+       }
+       defer unlockfile(f)
+
+       // If the block has been PUT more recently than -permission_ttl,
+       // return success without removing the block.  This guards against
+       // a race condition where a block is old enough that Data Manager
+       // has added it to the trash list, but the user submitted a PUT
+       // for the block since then.
+       if fi, err := os.Stat(p); err != nil {
+               return err
+       } else {
+               if time.Since(fi.ModTime()) < permission_ttl {
+                       return nil
+               }
+       }
+       return os.Remove(p)
+}
+
+// blockDir returns the fully qualified directory name for the directory
+// where loc is (or would be) stored on this volume.
+func (v *UnixVolume) blockDir(loc string) string {
+       return filepath.Join(v.root, loc[0:3])
+}
+
+// blockPath returns the fully qualified pathname for the path to loc
+// on this volume.
+func (v *UnixVolume) blockPath(loc string) string {
+       return filepath.Join(v.blockDir(loc), loc)
+}
+
+// IsFull returns true if the free space on the volume is less than
+// MIN_FREE_KILOBYTES.
+//
+func (v *UnixVolume) IsFull() (isFull bool) {
+       fullSymlink := v.root + "/full"
+
+       // Check if the volume has been marked as full in the last hour.
+       if link, err := os.Readlink(fullSymlink); err == nil {
+               if ts, err := strconv.Atoi(link); err == nil {
+                       fulltime := time.Unix(int64(ts), 0)
+                       if time.Since(fulltime).Hours() < 1.0 {
+                               return true
+                       }
+               }
+       }
+
+       if avail, err := v.FreeDiskSpace(); err == nil {
+               isFull = avail < MIN_FREE_KILOBYTES
+       } else {
+               log.Printf("%s: FreeDiskSpace: %s\n", v, err)
+               isFull = false
+       }
+
+       // If the volume is full, timestamp it.
+       if isFull {
+               now := fmt.Sprintf("%d", time.Now().Unix())
+               os.Symlink(now, fullSymlink)
+       }
+       return
+}
+
+// FreeDiskSpace returns the number of unused 1k blocks available on
+// the volume.
+//
+func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
+       var fs syscall.Statfs_t
+       err = syscall.Statfs(v.root, &fs)
+       if err == nil {
+               // Statfs output is not guaranteed to measure free
+               // space in terms of 1K blocks.
+               free = fs.Bavail * uint64(fs.Bsize) / 1024
+       }
+       return
+}
+
+func (v *UnixVolume) String() string {
+       return fmt.Sprintf("[UnixVolume %s]", v.root)
+}
+
+// lockfile and unlockfile use flock(2) to manage kernel file locks.
+func lockfile(f *os.File) error {
+       return syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
+}
+
+func unlockfile(f *os.File) error {
+       return syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
+}
diff --git a/services/keepstore/volume_unix_test.go b/services/keepstore/volume_unix_test.go
new file mode 100644 (file)
index 0000000..7a10fc5
--- /dev/null
@@ -0,0 +1,294 @@
+package main
+
+import (
+       "bytes"
+       "fmt"
+       "io/ioutil"
+       "os"
+       "syscall"
+       "testing"
+       "time"
+)
+
+func TempUnixVolume(t *testing.T, serialize bool) UnixVolume {
+       d, err := ioutil.TempDir("", "volume_test")
+       if err != nil {
+               t.Fatal(err)
+       }
+       return MakeUnixVolume(d, serialize)
+}
+
+func _teardown(v UnixVolume) {
+       if v.queue != nil {
+               close(v.queue)
+       }
+       os.RemoveAll(v.root)
+}
+
+// store writes a Keep block directly into a UnixVolume, for testing
+// UnixVolume methods.
+//
+func _store(t *testing.T, vol UnixVolume, filename string, block []byte) {
+       blockdir := fmt.Sprintf("%s/%s", vol.root, filename[:3])
+       if err := os.MkdirAll(blockdir, 0755); err != nil {
+               t.Fatal(err)
+       }
+
+       blockpath := fmt.Sprintf("%s/%s", blockdir, filename)
+       if f, err := os.Create(blockpath); err == nil {
+               f.Write(block)
+               f.Close()
+       } else {
+               t.Fatal(err)
+       }
+}
+
+func TestGet(t *testing.T) {
+       v := TempUnixVolume(t, false)
+       defer _teardown(v)
+       _store(t, v, TEST_HASH, TEST_BLOCK)
+
+       buf, err := v.Get(TEST_HASH)
+       if err != nil {
+               t.Error(err)
+       }
+       if bytes.Compare(buf, TEST_BLOCK) != 0 {
+               t.Errorf("expected %s, got %s", string(TEST_BLOCK), string(buf))
+       }
+}
+
+func TestGetNotFound(t *testing.T) {
+       v := TempUnixVolume(t, false)
+       defer _teardown(v)
+       _store(t, v, TEST_HASH, TEST_BLOCK)
+
+       buf, err := v.Get(TEST_HASH_2)
+       switch {
+       case os.IsNotExist(err):
+               break
+       case err == nil:
+               t.Errorf("Read should have failed, returned %s", string(buf))
+       default:
+               t.Errorf("Read expected ErrNotExist, got: %s", err)
+       }
+}
+
+func TestPut(t *testing.T) {
+       v := TempUnixVolume(t, false)
+       defer _teardown(v)
+
+       err := v.Put(TEST_HASH, TEST_BLOCK)
+       if err != nil {
+               t.Error(err)
+       }
+       p := fmt.Sprintf("%s/%s/%s", v.root, TEST_HASH[:3], TEST_HASH)
+       if buf, err := ioutil.ReadFile(p); err != nil {
+               t.Error(err)
+       } else if bytes.Compare(buf, TEST_BLOCK) != 0 {
+               t.Errorf("Write should have stored %s, did store %s",
+                       string(TEST_BLOCK), string(buf))
+       }
+}
+
+func TestPutBadVolume(t *testing.T) {
+       v := TempUnixVolume(t, false)
+       defer _teardown(v)
+
+       os.Chmod(v.root, 000)
+       err := v.Put(TEST_HASH, TEST_BLOCK)
+       if err == nil {
+               t.Error("Write should have failed")
+       }
+}
+
+// TestPutTouch
+//     Test that when applying PUT to a block that already exists,
+//     the block's modification time is updated.
+func TestPutTouch(t *testing.T) {
+       v := TempUnixVolume(t, false)
+       defer _teardown(v)
+
+       if err := v.Put(TEST_HASH, TEST_BLOCK); err != nil {
+               t.Error(err)
+       }
+
+       // We'll verify { t0 < threshold < t1 }, where t0 is the
+       // existing block's timestamp on disk before Put() and t1 is
+       // its timestamp after Put().
+       threshold := time.Now().Add(-time.Second)
+
+       // Set the stored block's mtime far enough in the past that we
+       // can see the difference between "timestamp didn't change"
+       // and "timestamp granularity is too low".
+       {
+               oldtime := time.Now().Add(-20 * time.Second).Unix()
+               if err := syscall.Utime(v.blockPath(TEST_HASH),
+                       &syscall.Utimbuf{oldtime, oldtime}); err != nil {
+                       t.Error(err)
+               }
+
+               // Make sure v.Mtime() agrees the above Utime really worked.
+               if t0, err := v.Mtime(TEST_HASH); err != nil || t0.IsZero() || !t0.Before(threshold) {
+                       t.Errorf("Setting mtime failed: %v, %v", t0, err)
+               }
+       }
+
+       // Write the same block again.
+       if err := v.Put(TEST_HASH, TEST_BLOCK); err != nil {
+               t.Error(err)
+       }
+
+       // Verify threshold < t1
+       t1, err := v.Mtime(TEST_HASH)
+       if err != nil {
+               t.Error(err)
+       }
+       if t1.Before(threshold) {
+               t.Errorf("t1 %v must be >= threshold %v after v.Put ",
+                       t1, threshold)
+       }
+}
+
+// Serialization tests: launch a bunch of concurrent
+//
+// TODO(twp): show that the underlying Read/Write operations executed
+// serially and not concurrently. The easiest way to do this is
+// probably to activate verbose or debug logging, capture log output
+// and examine it to confirm that Reads and Writes did not overlap.
+//
+// TODO(twp): a proper test of I/O serialization requires that a
+// second request start while the first one is still underway.
+// Guaranteeing that the test behaves this way requires some tricky
+// synchronization and mocking.  For now we'll just launch a bunch of
+// requests simultaenously in goroutines and demonstrate that they
+// return accurate results.
+//
+func TestGetSerialized(t *testing.T) {
+       // Create a volume with I/O serialization enabled.
+       v := TempUnixVolume(t, true)
+       defer _teardown(v)
+
+       _store(t, v, TEST_HASH, TEST_BLOCK)
+       _store(t, v, TEST_HASH_2, TEST_BLOCK_2)
+       _store(t, v, TEST_HASH_3, TEST_BLOCK_3)
+
+       sem := make(chan int)
+       go func(sem chan int) {
+               buf, err := v.Get(TEST_HASH)
+               if err != nil {
+                       t.Errorf("err1: %v", err)
+               }
+               if bytes.Compare(buf, TEST_BLOCK) != 0 {
+                       t.Errorf("buf should be %s, is %s", string(TEST_BLOCK), string(buf))
+               }
+               sem <- 1
+       }(sem)
+
+       go func(sem chan int) {
+               buf, err := v.Get(TEST_HASH_2)
+               if err != nil {
+                       t.Errorf("err2: %v", err)
+               }
+               if bytes.Compare(buf, TEST_BLOCK_2) != 0 {
+                       t.Errorf("buf should be %s, is %s", string(TEST_BLOCK_2), string(buf))
+               }
+               sem <- 1
+       }(sem)
+
+       go func(sem chan int) {
+               buf, err := v.Get(TEST_HASH_3)
+               if err != nil {
+                       t.Errorf("err3: %v", err)
+               }
+               if bytes.Compare(buf, TEST_BLOCK_3) != 0 {
+                       t.Errorf("buf should be %s, is %s", string(TEST_BLOCK_3), string(buf))
+               }
+               sem <- 1
+       }(sem)
+
+       // Wait for all goroutines to finish
+       for done := 0; done < 3; {
+               done += <-sem
+       }
+}
+
+func TestPutSerialized(t *testing.T) {
+       // Create a volume with I/O serialization enabled.
+       v := TempUnixVolume(t, true)
+       defer _teardown(v)
+
+       sem := make(chan int)
+       go func(sem chan int) {
+               err := v.Put(TEST_HASH, TEST_BLOCK)
+               if err != nil {
+                       t.Errorf("err1: %v", err)
+               }
+               sem <- 1
+       }(sem)
+
+       go func(sem chan int) {
+               err := v.Put(TEST_HASH_2, TEST_BLOCK_2)
+               if err != nil {
+                       t.Errorf("err2: %v", err)
+               }
+               sem <- 1
+       }(sem)
+
+       go func(sem chan int) {
+               err := v.Put(TEST_HASH_3, TEST_BLOCK_3)
+               if err != nil {
+                       t.Errorf("err3: %v", err)
+               }
+               sem <- 1
+       }(sem)
+
+       // Wait for all goroutines to finish
+       for done := 0; done < 2; {
+               done += <-sem
+       }
+
+       // Double check that we actually wrote the blocks we expected to write.
+       buf, err := v.Get(TEST_HASH)
+       if err != nil {
+               t.Errorf("Get #1: %v", err)
+       }
+       if bytes.Compare(buf, TEST_BLOCK) != 0 {
+               t.Errorf("Get #1: expected %s, got %s", string(TEST_BLOCK), string(buf))
+       }
+
+       buf, err = v.Get(TEST_HASH_2)
+       if err != nil {
+               t.Errorf("Get #2: %v", err)
+       }
+       if bytes.Compare(buf, TEST_BLOCK_2) != 0 {
+               t.Errorf("Get #2: expected %s, got %s", string(TEST_BLOCK_2), string(buf))
+       }
+
+       buf, err = v.Get(TEST_HASH_3)
+       if err != nil {
+               t.Errorf("Get #3: %v", err)
+       }
+       if bytes.Compare(buf, TEST_BLOCK_3) != 0 {
+               t.Errorf("Get #3: expected %s, got %s", string(TEST_BLOCK_3), string(buf))
+       }
+}
+
+func TestIsFull(t *testing.T) {
+       v := TempUnixVolume(t, false)
+       defer _teardown(v)
+
+       full_path := v.root + "/full"
+       now := fmt.Sprintf("%d", time.Now().Unix())
+       os.Symlink(now, full_path)
+       if !v.IsFull() {
+               t.Errorf("%s: claims not to be full", v)
+       }
+       os.Remove(full_path)
+
+       // Test with an expired /full link.
+       expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
+       os.Symlink(expired, full_path)
+       if v.IsFull() {
+               t.Errorf("%s: should no longer be full", v)
+       }
+}
diff --git a/services/keepstore/work_queue.go b/services/keepstore/work_queue.go
new file mode 100644 (file)
index 0000000..9509cac
--- /dev/null
@@ -0,0 +1,160 @@
+package main
+
+/* A WorkQueue is an asynchronous thread-safe queue manager.  It
+   provides a channel from which items can be read off the queue, and
+   permits replacing the contents of the queue at any time.
+
+   The overall work flow for a WorkQueue is as follows:
+
+     1. A WorkQueue is created with NewWorkQueue().  This
+        function instantiates a new WorkQueue and starts a manager
+        goroutine.  The manager listens on an input channel
+        (manager.newlist) and an output channel (manager.NextItem).
+
+     2. The manager first waits for a new list of requests on the
+        newlist channel.  When another goroutine calls
+        manager.ReplaceQueue(lst), it sends lst over the newlist
+        channel to the manager.  The manager goroutine now has
+        ownership of the list.
+
+     3. Once the manager has this initial list, it listens on both the
+        input and output channels for one of the following to happen:
+
+          a. A worker attempts to read an item from the NextItem
+             channel.  The manager sends the next item from the list
+             over this channel to the worker, and loops.
+
+          b. New data is sent to the manager on the newlist channel.
+             This happens when another goroutine calls
+             manager.ReplaceItem() with a new list.  The manager
+             discards the current list, replaces it with the new one,
+             and begins looping again.
+
+          c. The input channel is closed.  The manager closes its
+             output channel (signalling any workers to quit) and
+             terminates.
+
+   Tasks currently handled by WorkQueue:
+     * the pull list
+     * the trash list
+
+   Example usage:
+
+        // Any kind of user-defined type can be used with the
+        // WorkQueue.
+               type FrobRequest struct {
+                       frob string
+               }
+
+               // Make a work list.
+               froblist := NewWorkQueue()
+
+               // Start a concurrent worker to read items from the NextItem
+               // channel until it is closed, deleting each one.
+               go func(list WorkQueue) {
+                       for i := range list.NextItem {
+                               req := i.(FrobRequest)
+                               frob.Run(req)
+                       }
+               }(froblist)
+
+               // Set up a HTTP handler for PUT /frob
+               router.HandleFunc(`/frob`,
+                       func(w http.ResponseWriter, req *http.Request) {
+                               // Parse the request body into a list.List
+                               // of FrobRequests, and give this list to the
+                               // frob manager.
+                               newfrobs := parseBody(req.Body)
+                               froblist.ReplaceQueue(newfrobs)
+                       }).Methods("PUT")
+
+   Methods available on a WorkQueue:
+
+               ReplaceQueue(list)
+                       Replaces the current item list with a new one.  The list
+            manager discards any unprocessed items on the existing
+            list and replaces it with the new one. If the worker is
+            processing a list item when ReplaceQueue is called, it
+            finishes processing before receiving items from the new
+            list.
+               Close()
+                       Shuts down the manager goroutine. When Close is called,
+                       the manager closes the NextItem channel.
+*/
+
+import "container/list"
+
+type WorkQueue struct {
+       newlist  chan *list.List
+       NextItem chan interface{}
+}
+
+// NewWorkQueue returns a new worklist, and launches a listener
+// goroutine that waits for work and farms it out to workers.
+//
+func NewWorkQueue() *WorkQueue {
+       b := WorkQueue{
+               newlist:  make(chan *list.List),
+               NextItem: make(chan interface{}),
+       }
+       go b.listen()
+       return &b
+}
+
+// ReplaceQueue sends a new list of pull requests to the manager goroutine.
+// The manager will discard any outstanding pull list and begin
+// working on the new list.
+//
+func (b *WorkQueue) ReplaceQueue(list *list.List) {
+       b.newlist <- list
+}
+
+// Close shuts down the manager and terminates the goroutine, which
+// completes any pull request in progress and abandons any pending
+// requests.
+//
+func (b *WorkQueue) Close() {
+       close(b.newlist)
+}
+
+// listen is run in a goroutine. It reads new pull lists from its
+// input queue until the queue is closed.
+// listen takes ownership of the list that is passed to it.
+//
+// Note that the routine does not ever need to access the list
+// itself once the current_item has been initialized, so we do
+// not bother to keep a pointer to the list. Because it is a
+// doubly linked list, holding on to the current item will keep
+// it from garbage collection.
+//
+func (b *WorkQueue) listen() {
+       var current_item *list.Element
+
+       // When we're done, close the output channel to shut down any
+       // workers.
+       defer close(b.NextItem)
+
+       for {
+               // If the current list is empty, wait for a new list before
+               // even checking if workers are ready.
+               if current_item == nil {
+                       if p, ok := <-b.newlist; ok {
+                               current_item = p.Front()
+                       } else {
+                               // The channel was closed; shut down.
+                               return
+                       }
+               }
+               select {
+               case p, ok := <-b.newlist:
+                       if ok {
+                               current_item = p.Front()
+                       } else {
+                               // The input channel is closed; time to shut down
+                               return
+                       }
+               case b.NextItem <- current_item.Value:
+                       current_item = current_item.Next()
+               }
+       }
+}
diff --git a/services/keepstore/work_queue_test.go b/services/keepstore/work_queue_test.go
new file mode 100644 (file)
index 0000000..144e4c2
--- /dev/null
@@ -0,0 +1,149 @@
+package main
+
+import (
+       "container/list"
+       "testing"
+)
+
+func makeTestWorkList(ary []int) *list.List {
+       l := list.New()
+       for _, n := range ary {
+               l.PushBack(n)
+       }
+       return l
+}
+
+func expectChannelEmpty(t *testing.T, c <-chan interface{}) {
+       select {
+       case item := <-c:
+               t.Fatalf("Received value (%v) from channel that we expected to be empty", item)
+       default:
+               // no-op
+       }
+}
+
+func expectChannelNotEmpty(t *testing.T, c <-chan interface{}) {
+       if item, ok := <-c; !ok {
+               t.Fatal("expected data on a closed channel")
+       } else if item == nil {
+               t.Fatal("expected data on an empty channel")
+       }
+}
+
+func expectChannelClosed(t *testing.T, c <-chan interface{}) {
+       received, ok := <-c
+       if ok {
+               t.Fatalf("Expected channel to be closed, but received %v instead", received)
+       }
+}
+
+func expectFromChannel(t *testing.T, c <-chan interface{}, expected []int) {
+       for i := range expected {
+               actual, ok := <-c
+               t.Logf("received %v", actual)
+               if !ok {
+                       t.Fatalf("Expected %v but channel was closed after receiving the first %d elements correctly.", expected, i)
+               } else if actual.(int) != expected[i] {
+                       t.Fatalf("Expected %v but received '%v' after receiving the first %d elements correctly.", expected[i], actual, i)
+               }
+       }
+}
+
+// Create a WorkQueue, generate a list for it, and instantiate a worker.
+func TestWorkQueueReadWrite(t *testing.T) {
+       var input = []int{1, 1, 2, 3, 5, 8, 13, 21, 34}
+
+       b := NewWorkQueue()
+       b.ReplaceQueue(makeTestWorkList(input))
+
+       expectFromChannel(t, b.NextItem, input)
+       expectChannelEmpty(t, b.NextItem)
+       b.Close()
+}
+
+// Start a worker before the list has any input.
+func TestWorkQueueEarlyRead(t *testing.T) {
+       var input = []int{1, 1, 2, 3, 5, 8, 13, 21, 34}
+
+       b := NewWorkQueue()
+
+       // First, demonstrate that nothing is available on the NextItem
+       // channel.
+       expectChannelEmpty(t, b.NextItem)
+
+       // Start a reader in a goroutine. The reader will block until the
+       // block work list has been initialized.
+       //
+       done := make(chan int)
+       go func() {
+               expectFromChannel(t, b.NextItem, input)
+               b.Close()
+               done <- 1
+       }()
+
+       // Feed the blocklist a new worklist, and wait for the worker to
+       // finish.
+       b.ReplaceQueue(makeTestWorkList(input))
+       <-done
+
+       expectChannelClosed(t, b.NextItem)
+}
+
+// Show that a reader may block when the manager's list is exhausted,
+// and that the reader resumes automatically when new data is
+// available.
+func TestWorkQueueReaderBlocks(t *testing.T) {
+       var (
+               inputBeforeBlock = []int{1, 2, 3, 4, 5}
+               inputAfterBlock  = []int{6, 7, 8, 9, 10}
+       )
+
+       b := NewWorkQueue()
+       sendmore := make(chan int)
+       done := make(chan int)
+       go func() {
+               expectFromChannel(t, b.NextItem, inputBeforeBlock)
+
+               // Confirm that the channel is empty, so a subsequent read
+               // on it will block.
+               expectChannelEmpty(t, b.NextItem)
+
+               // Signal that we're ready for more input.
+               sendmore <- 1
+               expectFromChannel(t, b.NextItem, inputAfterBlock)
+               b.Close()
+               done <- 1
+       }()
+
+       // Write a slice of the first five elements and wait for the
+       // reader to signal that it's ready for us to send more input.
+       b.ReplaceQueue(makeTestWorkList(inputBeforeBlock))
+       <-sendmore
+
+       b.ReplaceQueue(makeTestWorkList(inputAfterBlock))
+
+       // Wait for the reader to complete.
+       <-done
+}
+
+// Replace one active work list with another.
+func TestWorkQueueReplaceQueue(t *testing.T) {
+       var firstInput = []int{1, 1, 2, 3, 5, 8, 13, 21, 34}
+       var replaceInput = []int{1, 4, 9, 16, 25, 36, 49, 64, 81}
+
+       b := NewWorkQueue()
+       b.ReplaceQueue(makeTestWorkList(firstInput))
+
+       // Read just the first five elements from the work list.
+       // Confirm that the channel is not empty.
+       expectFromChannel(t, b.NextItem, firstInput[0:5])
+       expectChannelNotEmpty(t, b.NextItem)
+
+       // Replace the work list and read five more elements.
+       // The old list should have been discarded and all new
+       // elements come from the new list.
+       b.ReplaceQueue(makeTestWorkList(replaceInput))
+       expectFromChannel(t, b.NextItem, replaceInput[0:5])
+
+       b.Close()
+}
diff --git a/services/nodemanager/.gitignore b/services/nodemanager/.gitignore
new file mode 120000 (symlink)
index 0000000..ed3b362
--- /dev/null
@@ -0,0 +1 @@
+../../sdk/python/.gitignore
\ No newline at end of file
diff --git a/services/nodemanager/README.rst b/services/nodemanager/README.rst
new file mode 100644 (file)
index 0000000..8713260
--- /dev/null
@@ -0,0 +1,39 @@
+====================
+Arvados Node Manager
+====================
+
+Overview
+--------
+
+This package provides ``arvados-node-manager``.  It dynamically starts
+and stops compute nodes on an Arvados_ cloud installation based on job
+demand.
+
+.. _Arvados: https://arvados.org/
+
+Setup
+-----
+
+1. Install the package.
+
+2. Write a configuration file.  ``doc/ec2.example.cfg`` documents all
+   of the options available, with specific tunables for EC2 clouds.
+
+3. Run ``arvados-node-manager --config YOURCONFIGFILE`` using whatever
+   supervisor you like (e.g., runit).
+
+Testing and Development
+-----------------------
+
+To run tests, just run::
+
+  python setup.py test
+
+Our `hacking guide
+<https://arvados.org/projects/arvados/wiki/Hacking_Node_Manager>`_
+provides an architectural overview of the Arvados Node Manager to help
+you find your way around the source.  The `Lifecycle of an Arvados
+compute node
+<https://arvados.org/projects/arvados/wiki/Lifecycle_of_an_Arvados_compute_node>`_
+page explains how it works in concert with other Arvados components to
+prepare a node for compute work.
diff --git a/services/nodemanager/arvnodeman/__init__.py b/services/nodemanager/arvnodeman/__init__.py
new file mode 100644 (file)
index 0000000..a1ecac7
--- /dev/null
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import _strptime  # See <http://bugs.python.org/issue7980#msg221094>.
+import logging
+
+logger = logging.getLogger('arvnodeman')
+logger.addHandler(logging.NullHandler())
diff --git a/services/nodemanager/arvnodeman/clientactor.py b/services/nodemanager/arvnodeman/clientactor.py
new file mode 100644 (file)
index 0000000..6319f4b
--- /dev/null
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import logging
+import time
+
+import pykka
+
+from .config import actor_class
+
+def _notify_subscribers(response, subscribers):
+    """Send the response to all the subscriber methods.
+
+    If any of the subscriber actors have stopped, remove them from the
+    subscriber set.
+    """
+    dead_subscribers = set()
+    for subscriber in subscribers:
+        try:
+            subscriber(response)
+        except pykka.ActorDeadError:
+            dead_subscribers.add(subscriber)
+    subscribers.difference_update(dead_subscribers)
+
+class RemotePollLoopActor(actor_class):
+    """Abstract actor class to regularly poll a remote service.
+
+    This actor sends regular requests to a remote service, and sends each
+    response to subscribers.  It takes care of error handling, and retrying
+    requests with exponential backoff.
+
+    To use this actor, define the _send_request method.  If you also
+    define an _item_key method, this class will support subscribing to
+    a specific item by key in responses.
+    """
+    def __init__(self, client, timer_actor, poll_wait=60, max_poll_wait=180):
+        super(RemotePollLoopActor, self).__init__()
+        self._client = client
+        self._timer = timer_actor
+        self._logger = logging.getLogger(self.LOGGER_NAME)
+        self._later = self.actor_ref.proxy()
+        self._polling_started = False
+        self.log_prefix = "{} (at {})".format(self.__class__.__name__, id(self))
+        self.min_poll_wait = poll_wait
+        self.max_poll_wait = max_poll_wait
+        self.poll_wait = self.min_poll_wait
+        self.all_subscribers = set()
+        self.key_subscribers = {}
+        if hasattr(self, '_item_key'):
+            self.subscribe_to = self._subscribe_to
+
+    def _start_polling(self):
+        if not self._polling_started:
+            self._polling_started = True
+            self._later.poll()
+
+    def subscribe(self, subscriber):
+        self.all_subscribers.add(subscriber)
+        self._logger.debug("%r subscribed to all events", subscriber)
+        self._start_polling()
+
+    # __init__ exposes this method to the proxy if the subclass defines
+    # _item_key.
+    def _subscribe_to(self, key, subscriber):
+        self.key_subscribers.setdefault(key, set()).add(subscriber)
+        self._logger.debug("%r subscribed to events for '%s'", subscriber, key)
+        self._start_polling()
+
+    def _send_request(self):
+        raise NotImplementedError("subclasses must implement request method")
+
+    def _got_response(self, response):
+        self._logger.debug("%s got response with %d items",
+                           self.log_prefix, len(response))
+        self.poll_wait = self.min_poll_wait
+        _notify_subscribers(response, self.all_subscribers)
+        if hasattr(self, '_item_key'):
+            items = {self._item_key(x): x for x in response}
+            for key, subscribers in self.key_subscribers.iteritems():
+                _notify_subscribers(items.get(key), subscribers)
+
+    def _got_error(self, error):
+        self.poll_wait = min(self.poll_wait * 2, self.max_poll_wait)
+        return "{} got error: {} - waiting {} seconds".format(
+            self.log_prefix, error, self.poll_wait)
+
+    def is_common_error(self, exception):
+        return False
+
+    def poll(self, scheduled_start=None):
+        self._logger.debug("%s sending poll", self.log_prefix)
+        start_time = time.time()
+        if scheduled_start is None:
+            scheduled_start = start_time
+        try:
+            response = self._send_request()
+        except Exception as error:
+            errmsg = self._got_error(error)
+            if self.is_common_error(error):
+                self._logger.warning(errmsg)
+            else:
+                self._logger.exception(errmsg)
+            next_poll = start_time + self.poll_wait
+        else:
+            self._got_response(response)
+            next_poll = scheduled_start + self.poll_wait
+        end_time = time.time()
+        if next_poll < end_time:  # We've drifted too much; start fresh.
+            next_poll = end_time + self.poll_wait
+        self._timer.schedule(next_poll, self._later.poll, next_poll)
diff --git a/services/nodemanager/arvnodeman/computenode/__init__.py b/services/nodemanager/arvnodeman/computenode/__init__.py
new file mode 100644 (file)
index 0000000..4955992
--- /dev/null
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import itertools
+import time
+
+def arvados_node_fqdn(arvados_node, default_hostname='dynamic.compute'):
+    hostname = arvados_node.get('hostname') or default_hostname
+    return '{}.{}'.format(hostname, arvados_node['domain'])
+
+def arvados_node_mtime(node):
+    return time.mktime(time.strptime(node['modified_at'] + 'UTC',
+                                     '%Y-%m-%dT%H:%M:%SZ%Z')) - time.timezone
+
+def timestamp_fresh(timestamp, fresh_time):
+    return (time.time() - timestamp) < fresh_time
+
+class ShutdownTimer(object):
+    """Keep track of a cloud node's shutdown windows.
+
+    Instantiate this class with a timestamp of when a cloud node started,
+    and a list of durations (in minutes) of when the node must not and may
+    be shut down, alternating.  The class will tell you when a shutdown
+    window is open, and when the next open window will start.
+    """
+    def __init__(self, start_time, shutdown_windows):
+        # The implementation is easiest if we have an even number of windows,
+        # because then windows always alternate between open and closed.
+        # Rig that up: calculate the first shutdown window based on what's
+        # passed in.  Then, if we were given an odd number of windows, merge
+        # that first window into the last one, since they both# represent
+        # closed state.
+        first_window = shutdown_windows[0]
+        shutdown_windows = list(shutdown_windows[1:])
+        self._next_opening = start_time + (60 * first_window)
+        if len(shutdown_windows) % 2:
+            shutdown_windows.append(first_window)
+        else:
+            shutdown_windows[-1] += first_window
+        self.shutdown_windows = itertools.cycle([60 * n
+                                                 for n in shutdown_windows])
+        self._open_start = self._next_opening
+        self._open_for = next(self.shutdown_windows)
+
+    def _advance_opening(self):
+        while self._next_opening < time.time():
+            self._open_start = self._next_opening
+            self._next_opening += self._open_for + next(self.shutdown_windows)
+            self._open_for = next(self.shutdown_windows)
+
+    def next_opening(self):
+        self._advance_opening()
+        return self._next_opening
+
+    def window_open(self):
+        self._advance_opening()
+        return 0 < (time.time() - self._open_start) < self._open_for
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
new file mode 100644 (file)
index 0000000..48e8dcf
--- /dev/null
@@ -0,0 +1,344 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import functools
+import logging
+import time
+
+import libcloud.common.types as cloud_types
+import pykka
+
+from .. import arvados_node_fqdn, arvados_node_mtime, timestamp_fresh
+from ...clientactor import _notify_subscribers
+from ... import config
+
+class ComputeNodeStateChangeBase(config.actor_class):
+    """Base class for actors that change a compute node's state.
+
+    This base class takes care of retrying changes and notifying
+    subscribers when the change is finished.
+    """
+    def __init__(self, logger_name, cloud_client, timer_actor,
+                 retry_wait, max_retry_wait):
+        super(ComputeNodeStateChangeBase, self).__init__()
+        self._later = self.actor_ref.proxy()
+        self._logger = logging.getLogger(logger_name)
+        self._cloud = cloud_client
+        self._timer = timer_actor
+        self.min_retry_wait = retry_wait
+        self.max_retry_wait = max_retry_wait
+        self.retry_wait = retry_wait
+        self.subscribers = set()
+
+    @staticmethod
+    def _retry(errors=()):
+        """Retry decorator for an actor method that makes remote requests.
+
+        Use this function to decorator an actor method, and pass in a
+        tuple of exceptions to catch.  This decorator will schedule
+        retries of that method with exponential backoff if the
+        original method raises a known cloud driver error, or any of the
+        given exception types.
+        """
+        def decorator(orig_func):
+            @functools.wraps(orig_func)
+            def retry_wrapper(self, *args, **kwargs):
+                start_time = time.time()
+                try:
+                    orig_func(self, *args, **kwargs)
+                except Exception as error:
+                    if not (isinstance(error, errors) or
+                            self._cloud.is_cloud_exception(error)):
+                        raise
+                    self._logger.warning(
+                        "Client error: %s - waiting %s seconds",
+                        error, self.retry_wait)
+                    self._timer.schedule(start_time + self.retry_wait,
+                                         getattr(self._later,
+                                                 orig_func.__name__),
+                                         *args, **kwargs)
+                    self.retry_wait = min(self.retry_wait * 2,
+                                          self.max_retry_wait)
+                else:
+                    self.retry_wait = self.min_retry_wait
+            return retry_wrapper
+        return decorator
+
+    def _finished(self):
+        _notify_subscribers(self._later, self.subscribers)
+        self.subscribers = None
+
+    def subscribe(self, subscriber):
+        if self.subscribers is None:
+            try:
+                subscriber(self._later)
+            except pykka.ActorDeadError:
+                pass
+        else:
+            self.subscribers.add(subscriber)
+
+
+class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
+    """Actor to create and set up a cloud compute node.
+
+    This actor prepares an Arvados node record for a new compute node
+    (either creating one or cleaning one passed in), then boots the
+    actual compute node.  It notifies subscribers when the cloud node
+    is successfully created (the last step in the process for Node
+    Manager to handle).
+    """
+    def __init__(self, timer_actor, arvados_client, cloud_client,
+                 cloud_size, arvados_node=None,
+                 retry_wait=1, max_retry_wait=180):
+        super(ComputeNodeSetupActor, self).__init__(
+            'arvnodeman.nodeup', cloud_client, timer_actor,
+            retry_wait, max_retry_wait)
+        self._arvados = arvados_client
+        self.cloud_size = cloud_size
+        self.arvados_node = None
+        self.cloud_node = None
+        if arvados_node is None:
+            self._later.create_arvados_node()
+        else:
+            self._later.prepare_arvados_node(arvados_node)
+
+    @ComputeNodeStateChangeBase._retry()
+    def create_arvados_node(self):
+        self.arvados_node = self._arvados.nodes().create(body={}).execute()
+        self._later.create_cloud_node()
+
+    @ComputeNodeStateChangeBase._retry()
+    def prepare_arvados_node(self, node):
+        self.arvados_node = self._arvados.nodes().update(
+            uuid=node['uuid'],
+            body={'hostname': None,
+                  'ip_address': None,
+                  'slot_number': None,
+                  'first_ping_at': None,
+                  'last_ping_at': None,
+                  'info': {'ec2_instance_id': None,
+                           'last_action': "Prepared by Node Manager"}}
+            ).execute()
+        self._later.create_cloud_node()
+
+    @ComputeNodeStateChangeBase._retry()
+    def create_cloud_node(self):
+        self._logger.info("Creating cloud node with size %s.",
+                          self.cloud_size.name)
+        self.cloud_node = self._cloud.create_node(self.cloud_size,
+                                                  self.arvados_node)
+        self._logger.info("Cloud node %s created.", self.cloud_node.id)
+        self._later.post_create()
+
+    @ComputeNodeStateChangeBase._retry()
+    def post_create(self):
+        self._cloud.post_create_node(self.cloud_node)
+        self._logger.info("%s post-create work done.", self.cloud_node.id)
+        self._finished()
+
+    def stop_if_no_cloud_node(self):
+        if self.cloud_node is None:
+            self.stop()
+
+
+class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
+    """Actor to shut down a compute node.
+
+    This actor simply destroys a cloud node, retrying as needed.
+    """
+    def __init__(self, timer_actor, cloud_client, node_monitor,
+                 cancellable=True, retry_wait=1, max_retry_wait=180):
+        # If a ShutdownActor is cancellable, it will ask the
+        # ComputeNodeMonitorActor if it's still eligible before taking each
+        # action, and stop the shutdown process if the node is no longer
+        # eligible.  Normal shutdowns based on job demand should be
+        # cancellable; shutdowns based on node misbehavior should not.
+        super(ComputeNodeShutdownActor, self).__init__(
+            'arvnodeman.nodedown', cloud_client, timer_actor,
+            retry_wait, max_retry_wait)
+        self._monitor = node_monitor.proxy()
+        self.cloud_node = self._monitor.cloud_node.get()
+        self.cancellable = cancellable
+        self.success = None
+
+    def on_start(self):
+        self._later.shutdown_node()
+
+    def cancel_shutdown(self):
+        self.success = False
+        self._finished()
+
+    def _stop_if_window_closed(orig_func):
+        @functools.wraps(orig_func)
+        def stop_wrapper(self, *args, **kwargs):
+            if (self.cancellable and
+                  (not self._monitor.shutdown_eligible().get())):
+                self._logger.info(
+                    "Cloud node %s shutdown cancelled - no longer eligible.",
+                    self.cloud_node.id)
+                self._later.cancel_shutdown()
+                return None
+            else:
+                return orig_func(self, *args, **kwargs)
+        return stop_wrapper
+
+    @_stop_if_window_closed
+    @ComputeNodeStateChangeBase._retry()
+    def shutdown_node(self):
+        if self._cloud.destroy_node(self.cloud_node):
+            self._logger.info("Cloud node %s shut down.", self.cloud_node.id)
+            self.success = True
+            self._finished()
+        else:
+            # Force a retry.
+            raise cloud_types.LibcloudError("destroy_node failed")
+
+    # Make the decorator available to subclasses.
+    _stop_if_window_closed = staticmethod(_stop_if_window_closed)
+
+
+class ComputeNodeUpdateActor(config.actor_class):
+    """Actor to dispatch one-off cloud management requests.
+
+    This actor receives requests for small cloud updates, and
+    dispatches them to a real driver.  ComputeNodeMonitorActors use
+    this to perform maintenance tasks on themselves.  Having a
+    dedicated actor for this gives us the opportunity to control the
+    flow of requests; e.g., by backing off when errors occur.
+
+    This actor is most like a "traditional" Pykka actor: there's no
+    subscribing, but instead methods return real driver results.  If
+    you're interested in those results, you should get them from the
+    Future that the proxy method returns.  Be prepared to handle exceptions
+    from the cloud driver when you do.
+    """
+    def __init__(self, cloud_factory, max_retry_wait=180):
+        super(ComputeNodeUpdateActor, self).__init__()
+        self._cloud = cloud_factory()
+        self.max_retry_wait = max_retry_wait
+        self.error_streak = 0
+        self.next_request_time = time.time()
+
+    def _throttle_errors(orig_func):
+        @functools.wraps(orig_func)
+        def throttle_wrapper(self, *args, **kwargs):
+            throttle_time = self.next_request_time - time.time()
+            if throttle_time > 0:
+                time.sleep(throttle_time)
+            self.next_request_time = time.time()
+            try:
+                result = orig_func(self, *args, **kwargs)
+            except Exception as error:
+                self.error_streak += 1
+                self.next_request_time += min(2 ** self.error_streak,
+                                              self.max_retry_wait)
+                raise
+            else:
+                self.error_streak = 0
+                return result
+        return throttle_wrapper
+
+    @_throttle_errors
+    def sync_node(self, cloud_node, arvados_node):
+        return self._cloud.sync_node(cloud_node, arvados_node)
+
+
+class ComputeNodeMonitorActor(config.actor_class):
+    """Actor to manage a running compute node.
+
+    This actor gets updates about a compute node's cloud and Arvados records.
+    It uses this information to notify subscribers when the node is eligible
+    for shutdown.
+    """
+    def __init__(self, cloud_node, cloud_node_start_time, shutdown_timer,
+                 timer_actor, update_actor, arvados_node=None,
+                 poll_stale_after=600, node_stale_after=3600):
+        super(ComputeNodeMonitorActor, self).__init__()
+        self._later = self.actor_ref.proxy()
+        self._logger = logging.getLogger('arvnodeman.computenode')
+        self._last_log = None
+        self._shutdowns = shutdown_timer
+        self._timer = timer_actor
+        self._update = update_actor
+        self.cloud_node = cloud_node
+        self.cloud_node_start_time = cloud_node_start_time
+        self.poll_stale_after = poll_stale_after
+        self.node_stale_after = node_stale_after
+        self.subscribers = set()
+        self.arvados_node = None
+        self._later.update_arvados_node(arvados_node)
+        self.last_shutdown_opening = None
+        self._later.consider_shutdown()
+
+    def subscribe(self, subscriber):
+        self.subscribers.add(subscriber)
+
+    def _debug(self, msg, *args):
+        if msg == self._last_log:
+            return
+        self._last_log = msg
+        self._logger.debug(msg, *args)
+
+    def in_state(self, *states):
+        # Return a boolean to say whether or not our Arvados node record is in
+        # one of the given states.  If state information is not
+        # available--because this node has no Arvados record, the record is
+        # stale, or the record has no state information--return None.
+        if (self.arvados_node is None) or not timestamp_fresh(
+              arvados_node_mtime(self.arvados_node), self.node_stale_after):
+            return None
+        state = self.arvados_node['info'].get('slurm_state')
+        if not state:
+            return None
+        result = state in states
+        if state == 'idle':
+            result = result and not self.arvados_node['job_uuid']
+        return result
+
+    def shutdown_eligible(self):
+        if not self._shutdowns.window_open():
+            return False
+        elif self.arvados_node is None:
+            # If this is a new, unpaired node, it's eligible for
+            # shutdown--we figure there was an error during bootstrap.
+            return timestamp_fresh(self.cloud_node_start_time,
+                                   self.node_stale_after)
+        else:
+            return self.in_state('idle')
+
+    def consider_shutdown(self):
+        next_opening = self._shutdowns.next_opening()
+        if self.shutdown_eligible():
+            self._debug("Node %s suggesting shutdown.", self.cloud_node.id)
+            _notify_subscribers(self._later, self.subscribers)
+        elif self._shutdowns.window_open():
+            self._debug("Node %s shutdown window open but node busy.",
+                        self.cloud_node.id)
+        elif self.last_shutdown_opening != next_opening:
+            self._debug("Node %s shutdown window closed.  Next at %s.",
+                        self.cloud_node.id, time.ctime(next_opening))
+            self._timer.schedule(next_opening, self._later.consider_shutdown)
+            self.last_shutdown_opening = next_opening
+
+    def offer_arvados_pair(self, arvados_node):
+        if self.arvados_node is not None:
+            return None
+        elif arvados_node['ip_address'] in self.cloud_node.private_ips:
+            self._later.update_arvados_node(arvados_node)
+            return self.cloud_node.id
+        else:
+            return None
+
+    def update_cloud_node(self, cloud_node):
+        if cloud_node is not None:
+            self.cloud_node = cloud_node
+            self._later.consider_shutdown()
+
+    def update_arvados_node(self, arvados_node):
+        if arvados_node is not None:
+            self.arvados_node = arvados_node
+            new_hostname = arvados_node_fqdn(self.arvados_node)
+            if new_hostname != self.cloud_node.name:
+                self._update.sync_node(self.cloud_node, self.arvados_node)
+            self._later.consider_shutdown()
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py
new file mode 100644 (file)
index 0000000..6eaa8b9
--- /dev/null
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import subprocess
+import time
+
+from . import \
+    ComputeNodeSetupActor, ComputeNodeUpdateActor, ComputeNodeMonitorActor
+from . import ComputeNodeShutdownActor as ShutdownActorBase
+
+class ComputeNodeShutdownActor(ShutdownActorBase):
+    SLURM_END_STATES = frozenset(['down\n', 'down*\n', 'drain\n', 'fail\n'])
+
+    def on_start(self):
+        arv_node = self._monitor.arvados_node.get()
+        if arv_node is None:
+            return super(ComputeNodeShutdownActor, self).on_start()
+        else:
+            self._nodename = arv_node['hostname']
+            self._logger.info("Draining SLURM node %s", self._nodename)
+            self._later.issue_slurm_drain()
+
+    def _set_node_state(self, state, *args):
+        cmd = ['scontrol', 'update', 'NodeName=' + self._nodename,
+               'State=' + state]
+        cmd.extend(args)
+        subprocess.check_output(cmd)
+
+    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
+    def cancel_shutdown(self):
+        self._set_node_state('RESUME')
+        return super(ComputeNodeShutdownActor, self).cancel_shutdown()
+
+    @ShutdownActorBase._stop_if_window_closed
+    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
+    def issue_slurm_drain(self):
+        self._set_node_state('DRAIN', 'Reason=Node Manager shutdown')
+        self._logger.info("Waiting for SLURM node %s to drain", self._nodename)
+        self._later.await_slurm_drain()
+
+    @ShutdownActorBase._stop_if_window_closed
+    @ShutdownActorBase._retry((subprocess.CalledProcessError,))
+    def await_slurm_drain(self):
+        output = subprocess.check_output(
+            ['sinfo', '--noheader', '-o', '%t', '-n', self._nodename])
+        if output in self.SLURM_END_STATES:
+            self._later.shutdown_node()
+        else:
+            self._timer.schedule(time.time() + 10,
+                                 self._later.await_slurm_drain)
diff --git a/services/nodemanager/arvnodeman/computenode/driver/__init__.py b/services/nodemanager/arvnodeman/computenode/driver/__init__.py
new file mode 100644 (file)
index 0000000..3a0c206
--- /dev/null
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import libcloud.common.types as cloud_types
+
+from ...config import NETWORK_ERRORS
+
+class BaseComputeNodeDriver(object):
+    """Abstract base class for compute node drivers.
+
+    libcloud abstracts away many of the differences between cloud providers,
+    but managing compute nodes requires some cloud-specific features (e.g.,
+    on EC2 we use tags to identify compute nodes).  Compute node drivers
+    are responsible for translating the node manager's cloud requests to a
+    specific cloud's vocabulary.
+
+    Subclasses must implement arvados_create_kwargs (to update node
+    creation kwargs with information about the specific Arvados node
+    record), sync_node, and node_start_time.
+    """
+    CLOUD_ERRORS = NETWORK_ERRORS + (cloud_types.LibcloudError,)
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs, driver_class):
+        self.real = driver_class(**auth_kwargs)
+        self.list_kwargs = list_kwargs
+        self.create_kwargs = create_kwargs
+
+    def __getattr__(self, name):
+        # Proxy non-extension methods to the real driver.
+        if (not name.startswith('_') and not name.startswith('ex_')
+              and hasattr(self.real, name)):
+            return getattr(self.real, name)
+        else:
+            return super(BaseComputeNodeDriver, self).__getattr__(name)
+
+    def search_for(self, term, list_method, key=lambda item: item.id):
+        cache_key = (list_method, term)
+        if cache_key not in self.SEARCH_CACHE:
+            results = [item for item in getattr(self.real, list_method)()
+                       if key(item) == term]
+            count = len(results)
+            if count != 1:
+                raise ValueError("{} returned {} results for '{}'".format(
+                        list_method, count, term))
+            self.SEARCH_CACHE[cache_key] = results[0]
+        return self.SEARCH_CACHE[cache_key]
+
+    def list_nodes(self):
+        return self.real.list_nodes(**self.list_kwargs)
+
+    def arvados_create_kwargs(self, arvados_node):
+        raise NotImplementedError("BaseComputeNodeDriver.arvados_create_kwargs")
+
+    def create_node(self, size, arvados_node):
+        kwargs = self.create_kwargs.copy()
+        kwargs.update(self.arvados_create_kwargs(arvados_node))
+        kwargs['size'] = size
+        return self.real.create_node(**kwargs)
+
+    def post_create_node(self, cloud_node):
+        # ComputeNodeSetupActor calls this method after the cloud node is
+        # created.  Any setup tasks that need to happen afterward (e.g.,
+        # tagging) should be done in this method.
+        pass
+
+    def sync_node(self, cloud_node, arvados_node):
+        # When a compute node first pings the API server, the API server
+        # will automatically assign some attributes on the corresponding
+        # node record, like hostname.  This method should propagate that
+        # information back to the cloud node appropriately.
+        raise NotImplementedError("BaseComputeNodeDriver.sync_node")
+
+    @classmethod
+    def node_start_time(cls, node):
+        raise NotImplementedError("BaseComputeNodeDriver.node_start_time")
+
+    @classmethod
+    def is_cloud_exception(cls, exception):
+        # libcloud compute drivers typically raise bare Exceptions to
+        # represent API errors.  Return True for any exception that is
+        # exactly an Exception, or a better-known higher-level exception.
+        return (isinstance(exception, cls.CLOUD_ERRORS) or
+                getattr(exception, '__class__', None) is Exception)
diff --git a/services/nodemanager/arvnodeman/computenode/driver/dummy.py b/services/nodemanager/arvnodeman/computenode/driver/dummy.py
new file mode 100644 (file)
index 0000000..3a286bb
--- /dev/null
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import time
+
+import libcloud.compute.providers as cloud_provider
+import libcloud.compute.types as cloud_types
+
+from . import BaseComputeNodeDriver
+from .. import arvados_node_fqdn
+
+class ComputeNodeDriver(BaseComputeNodeDriver):
+    """Compute node driver wrapper for libcloud's dummy driver.
+
+    This class provides the glue necessary to run the node manager with a
+    dummy cloud.  It's useful for testing.
+    """
+    DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.DUMMY)
+    DEFAULT_REAL = DEFAULT_DRIVER('ComputeNodeDriver')
+    DUMMY_START_TIME = time.time()
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class=DEFAULT_DRIVER):
+        super(ComputeNodeDriver, self).__init__(
+            auth_kwargs, list_kwargs, create_kwargs, driver_class)
+        if driver_class is self.DEFAULT_DRIVER:
+            self.real = self.DEFAULT_REAL
+
+    def _ensure_private_ip(self, node):
+        if not node.private_ips:
+            node.private_ips = ['10.10.0.{}'.format(node.id)]
+
+    def arvados_create_kwargs(self, arvados_node):
+        return {}
+
+    def list_nodes(self):
+        nodelist = super(ComputeNodeDriver, self).list_nodes()
+        for node in nodelist:
+            self._ensure_private_ip(node)
+        return nodelist
+
+    def create_node(self, size, arvados_node):
+        node = super(ComputeNodeDriver, self).create_node(size, arvados_node)
+        self._ensure_private_ip(node)
+        return node
+
+    def sync_node(self, cloud_node, arvados_node):
+        cloud_node.name = arvados_node_fqdn(arvados_node)
+
+    @classmethod
+    def node_start_time(cls, node):
+        return cls.DUMMY_START_TIME
diff --git a/services/nodemanager/arvnodeman/computenode/driver/ec2.py b/services/nodemanager/arvnodeman/computenode/driver/ec2.py
new file mode 100644 (file)
index 0000000..255a948
--- /dev/null
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import time
+
+import libcloud.compute.base as cloud_base
+import libcloud.compute.providers as cloud_provider
+import libcloud.compute.types as cloud_types
+from libcloud.compute.drivers import ec2 as cloud_ec2
+
+from . import BaseComputeNodeDriver
+from .. import arvados_node_fqdn
+
+### Monkeypatch libcloud to support AWS' new SecurityGroup API.
+# These classes can be removed when libcloud support specifying
+# security groups with the SecurityGroupId parameter.
+class ANMEC2Connection(cloud_ec2.EC2Connection):
+    def request(self, *args, **kwargs):
+        params = kwargs.get('params')
+        if (params is not None) and (params.get('Action') == 'RunInstances'):
+            for key in params.keys():
+                if key.startswith('SecurityGroup.'):
+                    new_key = key.replace('Group.', 'GroupId.', 1)
+                    params[new_key] = params.pop(key).id
+            kwargs['params'] = params
+        return super(ANMEC2Connection, self).request(*args, **kwargs)
+
+
+class ANMEC2NodeDriver(cloud_ec2.EC2NodeDriver):
+    connectionCls = ANMEC2Connection
+
+
+class ComputeNodeDriver(BaseComputeNodeDriver):
+    """Compute node driver wrapper for EC2.
+
+    This translates cloud driver requests to EC2's specific parameters.
+    """
+    DEFAULT_DRIVER = ANMEC2NodeDriver
+### End monkeypatch
+    SEARCH_CACHE = {}
+
+    def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
+                 driver_class=DEFAULT_DRIVER):
+        # We need full lists of keys up front because these loops modify
+        # dictionaries in-place.
+        for key in list_kwargs.keys():
+            list_kwargs[key.replace('_', ':')] = list_kwargs.pop(key)
+        self.tags = {key[4:]: value
+                     for key, value in list_kwargs.iteritems()
+                     if key.startswith('tag:')}
+        super(ComputeNodeDriver, self).__init__(
+            auth_kwargs, {'ex_filters': list_kwargs}, create_kwargs,
+            driver_class)
+        for key in self.create_kwargs.keys():
+            init_method = getattr(self, '_init_' + key, None)
+            if init_method is not None:
+                new_pair = init_method(self.create_kwargs.pop(key))
+                if new_pair is not None:
+                    self.create_kwargs[new_pair[0]] = new_pair[1]
+
+    def _init_image_id(self, image_id):
+        return 'image', self.search_for(image_id, 'list_images')
+
+    def _init_ping_host(self, ping_host):
+        self.ping_host = ping_host
+
+    def _init_security_groups(self, group_names):
+        return 'ex_security_groups', [
+            self.search_for(gname.strip(), 'ex_get_security_groups')
+            for gname in group_names.split(',')]
+
+    def _init_subnet_id(self, subnet_id):
+        return 'ex_subnet', self.search_for(subnet_id, 'ex_list_subnets')
+
+    def _init_ssh_key(self, filename):
+        with open(filename) as ssh_file:
+            key = cloud_base.NodeAuthSSHKey(ssh_file.read())
+        return 'auth', key
+
+    def arvados_create_kwargs(self, arvados_node):
+        result = {'name': arvados_node_fqdn(arvados_node)}
+        ping_secret = arvados_node['info'].get('ping_secret')
+        if ping_secret is not None:
+            ping_url = ('https://{}/arvados/v1/nodes/{}/ping?ping_secret={}'.
+                        format(self.ping_host, arvados_node['uuid'],
+                               ping_secret))
+            result['ex_userdata'] = ping_url
+        return result
+
+    def post_create_node(self, cloud_node):
+        self.real.ex_create_tags(cloud_node, self.tags)
+
+    def sync_node(self, cloud_node, arvados_node):
+        self.real.ex_create_tags(cloud_node,
+                                 {'Name': arvados_node_fqdn(arvados_node)})
+
+    @classmethod
+    def node_start_time(cls, node):
+        time_str = node.extra['launch_time'].split('.', 2)[0] + 'UTC'
+        return time.mktime(time.strptime(
+                time_str,'%Y-%m-%dT%H:%M:%S%Z')) - time.timezone
diff --git a/services/nodemanager/arvnodeman/config.py b/services/nodemanager/arvnodeman/config.py
new file mode 100644 (file)
index 0000000..b7ec1fc
--- /dev/null
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import ConfigParser
+import importlib
+import logging
+import ssl
+import sys
+
+import arvados
+import httplib2
+import pykka
+from apiclient import errors as apierror
+
+# IOError is the base class for socket.error and friends.
+# It seems like it hits the sweet spot for operations we want to retry:
+# it's low-level, but unlikely to catch code bugs.
+NETWORK_ERRORS = (IOError, ssl.SSLError)
+ARVADOS_ERRORS = NETWORK_ERRORS + (apierror.Error,)
+
+actor_class = pykka.ThreadingActor
+
+class NodeManagerConfig(ConfigParser.SafeConfigParser):
+    """Node Manager Configuration class.
+
+    This a standard Python ConfigParser, with additional helper methods to
+    create objects instantiated with configuration information.
+    """
+
+    LOGGING_NONLEVELS = frozenset(['file'])
+
+    def __init__(self, *args, **kwargs):
+        # Can't use super() because SafeConfigParser is an old-style class.
+        ConfigParser.SafeConfigParser.__init__(self, *args, **kwargs)
+        for sec_name, settings in {
+            'Arvados': {'insecure': 'no',
+                        'timeout': '15'},
+            'Daemon': {'min_nodes': '0',
+                       'max_nodes': '1',
+                       'poll_time': '60',
+                       'max_poll_time': '300',
+                       'poll_stale_after': '600',
+                       'boot_fail_after': str(sys.maxint),
+                       'node_stale_after': str(60 * 60 * 2)},
+            'Logging': {'file': '/dev/stderr',
+                        'level': 'WARNING'},
+        }.iteritems():
+            if not self.has_section(sec_name):
+                self.add_section(sec_name)
+            for opt_name, value in settings.iteritems():
+                if not self.has_option(sec_name, opt_name):
+                    self.set(sec_name, opt_name, value)
+
+    def get_section(self, section, transformer=None):
+        result = self._dict()
+        for key, value in self.items(section):
+            if transformer is not None:
+                try:
+                    value = transformer(value)
+                except (TypeError, ValueError):
+                    pass
+            result[key] = value
+        return result
+
+    def log_levels(self):
+        return {key: getattr(logging, self.get('Logging', key).upper())
+                for key in self.options('Logging')
+                if key not in self.LOGGING_NONLEVELS}
+
+    def dispatch_classes(self):
+        mod_name = 'arvnodeman.computenode.dispatch'
+        if self.has_option('Daemon', 'dispatcher'):
+            mod_name = '{}.{}'.format(mod_name,
+                                      self.get('Daemon', 'dispatcher'))
+        module = importlib.import_module(mod_name)
+        return (module.ComputeNodeSetupActor,
+                module.ComputeNodeShutdownActor,
+                module.ComputeNodeUpdateActor,
+                module.ComputeNodeMonitorActor)
+
+    def new_arvados_client(self):
+        if self.has_option('Daemon', 'certs_file'):
+            certs_file = self.get('Daemon', 'certs_file')
+        else:
+            certs_file = None
+        insecure = self.getboolean('Arvados', 'insecure')
+        http = httplib2.Http(timeout=self.getint('Arvados', 'timeout'),
+                             ca_certs=certs_file,
+                             disable_ssl_certificate_validation=insecure)
+        return arvados.api('v1',
+                           cache=False,  # Don't reuse an existing client.
+                           host=self.get('Arvados', 'host'),
+                           token=self.get('Arvados', 'token'),
+                           insecure=insecure,
+                           http=http)
+
+    def new_cloud_client(self):
+        module = importlib.import_module('arvnodeman.computenode.driver.' +
+                                         self.get('Cloud', 'provider'))
+        auth_kwargs = self.get_section('Cloud Credentials')
+        if 'timeout' in auth_kwargs:
+            auth_kwargs['timeout'] = int(auth_kwargs['timeout'])
+        return module.ComputeNodeDriver(auth_kwargs,
+                                        self.get_section('Cloud List'),
+                                        self.get_section('Cloud Create'))
+
+    def node_sizes(self, all_sizes):
+        size_kwargs = {}
+        for sec_name in self.sections():
+            sec_words = sec_name.split(None, 2)
+            if sec_words[0] != 'Size':
+                continue
+            size_kwargs[sec_words[1]] = self.get_section(sec_name, int)
+        return [(size, size_kwargs[size.id]) for size in all_sizes
+                if size.id in size_kwargs]
+
+    def shutdown_windows(self):
+        return [int(n)
+                for n in self.get('Cloud', 'shutdown_windows').split(',')]
diff --git a/services/nodemanager/arvnodeman/daemon.py b/services/nodemanager/arvnodeman/daemon.py
new file mode 100644 (file)
index 0000000..0e48078
--- /dev/null
@@ -0,0 +1,355 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import functools
+import logging
+import time
+
+import pykka
+
+from . import computenode as cnode
+from .computenode import dispatch
+from .config import actor_class
+
+class _ComputeNodeRecord(object):
+    def __init__(self, actor=None, cloud_node=None, arvados_node=None,
+                 assignment_time=float('-inf')):
+        self.actor = actor
+        self.cloud_node = cloud_node
+        self.arvados_node = arvados_node
+        self.assignment_time = assignment_time
+
+
+class _BaseNodeTracker(object):
+    def __init__(self):
+        self.nodes = {}
+        self.orphans = {}
+
+    # Proxy the methods listed below to self.nodes.
+    def _proxy_method(name):
+        method = getattr(dict, name)
+        @functools.wraps(method, ('__name__', '__doc__'))
+        def wrapper(self, *args, **kwargs):
+            return method(self.nodes, *args, **kwargs)
+        return wrapper
+
+    for _method_name in ['__contains__', '__getitem__', '__len__', 'get']:
+        locals()[_method_name] = _proxy_method(_method_name)
+
+    def record_key(self, record):
+        return self.item_key(getattr(record, self.RECORD_ATTR))
+
+    def add(self, record):
+        self.nodes[self.record_key(record)] = record
+
+    def update_record(self, key, item):
+        setattr(self.nodes[key], self.RECORD_ATTR, item)
+
+    def update_from(self, response):
+        unseen = set(self.nodes.iterkeys())
+        for item in response:
+            key = self.item_key(item)
+            if key in unseen:
+                unseen.remove(key)
+                self.update_record(key, item)
+            else:
+                yield key, item
+        self.orphans = {key: self.nodes.pop(key) for key in unseen}
+
+    def unpaired(self):
+        return (record for record in self.nodes.itervalues()
+                if getattr(record, self.PAIR_ATTR) is None)
+
+
+class _CloudNodeTracker(_BaseNodeTracker):
+    RECORD_ATTR = 'cloud_node'
+    PAIR_ATTR = 'arvados_node'
+    item_key = staticmethod(lambda cloud_node: cloud_node.id)
+
+
+class _ArvadosNodeTracker(_BaseNodeTracker):
+    RECORD_ATTR = 'arvados_node'
+    PAIR_ATTR = 'cloud_node'
+    item_key = staticmethod(lambda arvados_node: arvados_node['uuid'])
+
+    def find_stale_node(self, stale_time):
+        for record in self.nodes.itervalues():
+            node = record.arvados_node
+            if (not cnode.timestamp_fresh(cnode.arvados_node_mtime(node),
+                                          stale_time) and
+                  not cnode.timestamp_fresh(record.assignment_time,
+                                            stale_time)):
+                return node
+        return None
+
+
+class NodeManagerDaemonActor(actor_class):
+    """Node Manager daemon.
+
+    This actor subscribes to all information polls about cloud nodes,
+    Arvados nodes, and the job queue.  It creates a ComputeNodeMonitorActor
+    for every cloud node, subscribing them to poll updates
+    appropriately.  It creates and destroys cloud nodes based on job queue
+    demand, and stops the corresponding ComputeNode actors when their work
+    is done.
+    """
+    def __init__(self, server_wishlist_actor, arvados_nodes_actor,
+                 cloud_nodes_actor, cloud_update_actor, timer_actor,
+                 arvados_factory, cloud_factory,
+                 shutdown_windows, min_size, min_nodes, max_nodes,
+                 poll_stale_after=600,
+                 boot_fail_after=1800,
+                 node_stale_after=7200,
+                 node_setup_class=dispatch.ComputeNodeSetupActor,
+                 node_shutdown_class=dispatch.ComputeNodeShutdownActor,
+                 node_actor_class=dispatch.ComputeNodeMonitorActor):
+        super(NodeManagerDaemonActor, self).__init__()
+        self._node_setup = node_setup_class
+        self._node_shutdown = node_shutdown_class
+        self._node_actor = node_actor_class
+        self._cloud_updater = cloud_update_actor
+        self._timer = timer_actor
+        self._new_arvados = arvados_factory
+        self._new_cloud = cloud_factory
+        self._cloud_driver = self._new_cloud()
+        self._logger = logging.getLogger('arvnodeman.daemon')
+        self._later = self.actor_ref.proxy()
+        self.shutdown_windows = shutdown_windows
+        self.min_cloud_size = min_size
+        self.min_nodes = min_nodes
+        self.max_nodes = max_nodes
+        self.poll_stale_after = poll_stale_after
+        self.boot_fail_after = boot_fail_after
+        self.node_stale_after = node_stale_after
+        self.last_polls = {}
+        for poll_name in ['server_wishlist', 'arvados_nodes', 'cloud_nodes']:
+            poll_actor = locals()[poll_name + '_actor']
+            poll_actor.subscribe(getattr(self._later, 'update_' + poll_name))
+            setattr(self, '_{}_actor'.format(poll_name), poll_actor)
+            self.last_polls[poll_name] = -self.poll_stale_after
+        self.cloud_nodes = _CloudNodeTracker()
+        self.arvados_nodes = _ArvadosNodeTracker()
+        self.booting = {}       # Actor IDs to ComputeNodeSetupActors
+        self.booted = {}        # Cloud node IDs to _ComputeNodeRecords
+        self.shutdowns = {}     # Cloud node IDs to ComputeNodeShutdownActors
+        self._logger.debug("Daemon initialized")
+
+    def _update_poll_time(self, poll_key):
+        self.last_polls[poll_key] = time.time()
+
+    def _pair_nodes(self, node_record, arvados_node):
+        self._logger.info("Cloud node %s has associated with Arvados node %s",
+                          node_record.cloud_node.id, arvados_node['uuid'])
+        self._arvados_nodes_actor.subscribe_to(
+            arvados_node['uuid'], node_record.actor.update_arvados_node)
+        node_record.arvados_node = arvados_node
+        self.arvados_nodes.add(node_record)
+
+    def _new_node(self, cloud_node):
+        start_time = self._cloud_driver.node_start_time(cloud_node)
+        shutdown_timer = cnode.ShutdownTimer(start_time,
+                                             self.shutdown_windows)
+        actor = self._node_actor.start(
+            cloud_node=cloud_node,
+            cloud_node_start_time=start_time,
+            shutdown_timer=shutdown_timer,
+            update_actor=self._cloud_updater,
+            timer_actor=self._timer,
+            arvados_node=None,
+            poll_stale_after=self.poll_stale_after,
+            node_stale_after=self.node_stale_after).proxy()
+        actor.subscribe(self._later.node_can_shutdown)
+        self._cloud_nodes_actor.subscribe_to(cloud_node.id,
+                                             actor.update_cloud_node)
+        record = _ComputeNodeRecord(actor, cloud_node)
+        return record
+
+    def update_cloud_nodes(self, nodelist):
+        self._update_poll_time('cloud_nodes')
+        for key, node in self.cloud_nodes.update_from(nodelist):
+            self._logger.info("Registering new cloud node %s", key)
+            if key in self.booted:
+                record = self.booted.pop(key)
+            else:
+                record = self._new_node(node)
+            self.cloud_nodes.add(record)
+            for arv_rec in self.arvados_nodes.unpaired():
+                if record.actor.offer_arvados_pair(arv_rec.arvados_node).get():
+                    self._pair_nodes(record, arv_rec.arvados_node)
+                    break
+        for key, record in self.cloud_nodes.orphans.iteritems():
+            record.actor.stop()
+            record.cloud_node = None
+            self.shutdowns.pop(key, None)
+
+    def update_arvados_nodes(self, nodelist):
+        self._update_poll_time('arvados_nodes')
+        for key, node in self.arvados_nodes.update_from(nodelist):
+            self._logger.info("Registering new Arvados node %s", key)
+            record = _ComputeNodeRecord(arvados_node=node)
+            self.arvados_nodes.add(record)
+        for arv_rec in self.arvados_nodes.unpaired():
+            arv_node = arv_rec.arvados_node
+            for cloud_rec in self.cloud_nodes.unpaired():
+                if cloud_rec.actor.offer_arvados_pair(arv_node).get():
+                    self._pair_nodes(cloud_rec, arv_node)
+                    break
+
+    def _nodes_up(self):
+        return sum(len(nodelist) for nodelist in
+                   [self.cloud_nodes, self.booted, self.booting])
+
+    def _nodes_busy(self):
+        return sum(1 for idle in
+                   pykka.get_all(rec.actor.in_state('idle') for rec in
+                                 self.cloud_nodes.nodes.itervalues())
+                   if idle is False)
+
+    def _nodes_wanted(self):
+        up_count = self._nodes_up()
+        under_min = self.min_nodes - up_count
+        over_max = up_count - self.max_nodes
+        if over_max >= 0:
+            return -over_max
+        elif under_min > 0:
+            return under_min
+        else:
+            up_count -= len(self.shutdowns) + self._nodes_busy()
+            return len(self.last_wishlist) - up_count
+
+    def _nodes_excess(self):
+        up_count = self._nodes_up() - len(self.shutdowns)
+        over_min = up_count - self.min_nodes
+        if over_min <= 0:
+            return over_min
+        else:
+            return up_count - self._nodes_busy() - len(self.last_wishlist)
+
+    def update_server_wishlist(self, wishlist):
+        self._update_poll_time('server_wishlist')
+        self.last_wishlist = wishlist
+        nodes_wanted = self._nodes_wanted()
+        if nodes_wanted > 0:
+            self._later.start_node()
+        elif (nodes_wanted < 0) and self.booting:
+            self._later.stop_booting_node()
+
+    def _check_poll_freshness(orig_func):
+        """Decorator to inhibit a method when poll information is stale.
+
+        This decorator checks the timestamps of all the poll information the
+        daemon has received.  The decorated method is only called if none
+        of the timestamps are considered stale.
+        """
+        @functools.wraps(orig_func)
+        def wrapper(self, *args, **kwargs):
+            now = time.time()
+            if all(now - t < self.poll_stale_after
+                   for t in self.last_polls.itervalues()):
+                return orig_func(self, *args, **kwargs)
+            else:
+                return None
+        return wrapper
+
+    @_check_poll_freshness
+    def start_node(self):
+        nodes_wanted = self._nodes_wanted()
+        if nodes_wanted < 1:
+            return None
+        arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
+        try:
+            cloud_size = self.last_wishlist[self._nodes_up()]
+        except IndexError:
+            cloud_size = self.min_cloud_size
+        self._logger.info("Want %s more nodes.  Booting a %s node.",
+                          nodes_wanted, cloud_size.name)
+        new_setup = self._node_setup.start(
+            timer_actor=self._timer,
+            arvados_client=self._new_arvados(),
+            arvados_node=arvados_node,
+            cloud_client=self._new_cloud(),
+            cloud_size=cloud_size).proxy()
+        self.booting[new_setup.actor_ref.actor_urn] = new_setup
+        if arvados_node is not None:
+            self.arvados_nodes[arvados_node['uuid']].assignment_time = (
+                time.time())
+        new_setup.subscribe(self._later.node_up)
+        if nodes_wanted > 1:
+            self._later.start_node()
+
+    def _get_actor_attrs(self, actor, *attr_names):
+        return pykka.get_all([getattr(actor, name) for name in attr_names])
+
+    def node_up(self, setup_proxy):
+        cloud_node = setup_proxy.cloud_node.get()
+        del self.booting[setup_proxy.actor_ref.actor_urn]
+        setup_proxy.stop()
+        record = self.cloud_nodes.get(cloud_node.id)
+        if record is None:
+            record = self._new_node(cloud_node)
+            self.booted[cloud_node.id] = record
+        self._timer.schedule(time.time() + self.boot_fail_after,
+                             self._later.shutdown_unpaired_node, cloud_node.id)
+
+    @_check_poll_freshness
+    def stop_booting_node(self):
+        nodes_excess = self._nodes_excess()
+        if (nodes_excess < 1) or not self.booting:
+            return None
+        for key, node in self.booting.iteritems():
+            node.stop_if_no_cloud_node().get()
+            if not node.actor_ref.is_alive():
+                del self.booting[key]
+                if nodes_excess > 1:
+                    self._later.stop_booting_node()
+                break
+
+    def _begin_node_shutdown(self, node_actor, cancellable):
+        cloud_node_id = node_actor.cloud_node.get().id
+        if cloud_node_id in self.shutdowns:
+            return None
+        shutdown = self._node_shutdown.start(
+            timer_actor=self._timer, cloud_client=self._new_cloud(),
+            node_monitor=node_actor.actor_ref, cancellable=cancellable).proxy()
+        self.shutdowns[cloud_node_id] = shutdown
+        shutdown.subscribe(self._later.node_finished_shutdown)
+
+    @_check_poll_freshness
+    def node_can_shutdown(self, node_actor):
+        if self._nodes_excess() > 0:
+            self._begin_node_shutdown(node_actor, cancellable=True)
+
+    def shutdown_unpaired_node(self, cloud_node_id):
+        for record_dict in [self.cloud_nodes, self.booted]:
+            if cloud_node_id in record_dict:
+                record = record_dict[cloud_node_id]
+                break
+        else:
+            return None
+        if record.arvados_node is None:
+            self._begin_node_shutdown(record.actor, cancellable=False)
+
+    def node_finished_shutdown(self, shutdown_actor):
+        success, cloud_node = self._get_actor_attrs(shutdown_actor, 'success',
+                                                    'cloud_node')
+        shutdown_actor.stop()
+        cloud_node_id = cloud_node.id
+        if not success:
+            del self.shutdowns[cloud_node_id]
+        elif cloud_node_id in self.booted:
+            self.booted.pop(cloud_node_id).actor.stop()
+            del self.shutdowns[cloud_node_id]
+
+    def shutdown(self):
+        self._logger.info("Shutting down after signal.")
+        self.poll_stale_after = -1  # Inhibit starting/stopping nodes
+        for bootnode in self.booting.itervalues():
+            bootnode.stop_if_no_cloud_node()
+        self._later.await_shutdown()
+
+    def await_shutdown(self):
+        if any(node.actor_ref.is_alive() for node in self.booting.itervalues()):
+            self._timer.schedule(time.time() + 1, self._later.await_shutdown)
+        else:
+            self.stop()
diff --git a/services/nodemanager/arvnodeman/jobqueue.py b/services/nodemanager/arvnodeman/jobqueue.py
new file mode 100644 (file)
index 0000000..06f66b7
--- /dev/null
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import logging
+
+from . import clientactor
+from .config import ARVADOS_ERRORS
+
+class ServerCalculator(object):
+    """Generate cloud server wishlists from an Arvados job queue.
+
+    Instantiate this class with a list of cloud node sizes you're willing to
+    use, plus keyword overrides from the configuration.  Then you can pass
+    job queues to servers_for_queue.  It will return a list of node sizes
+    that would best satisfy the jobs, choosing the cheapest size that
+    satisfies each job, and ignoring jobs that can't be satisfied.
+    """
+
+    class CloudSizeWrapper(object):
+        def __init__(self, real_size, **kwargs):
+            self.real = real_size
+            for name in ['id', 'name', 'ram', 'disk', 'bandwidth', 'price',
+                         'extra']:
+                setattr(self, name, getattr(self.real, name))
+            self.cores = kwargs.pop('cores')
+            self.scratch = self.disk
+            for name, override in kwargs.iteritems():
+                if not hasattr(self, name):
+                    raise ValueError("unrecognized size field '%s'" % (name,))
+                setattr(self, name, override)
+
+        def meets_constraints(self, **kwargs):
+            for name, want_value in kwargs.iteritems():
+                have_value = getattr(self, name)
+                if (have_value != 0) and (have_value < want_value):
+                    return False
+            return True
+
+
+    def __init__(self, server_list, max_nodes=None):
+        self.cloud_sizes = [self.CloudSizeWrapper(s, **kws)
+                            for s, kws in server_list]
+        self.cloud_sizes.sort(key=lambda s: s.price)
+        self.max_nodes = max_nodes or float('inf')
+        self.logger = logging.getLogger('arvnodeman.jobqueue')
+        self.logged_jobs = set()
+
+    @staticmethod
+    def coerce_int(x, fallback):
+        try:
+            return int(x)
+        except (TypeError, ValueError):
+            return fallback
+
+    def cloud_size_for_constraints(self, constraints):
+        want_value = lambda key: self.coerce_int(constraints.get(key), 0)
+        wants = {'cores': want_value('min_cores_per_node'),
+                 'ram': want_value('min_ram_mb_per_node'),
+                 'scratch': want_value('min_scratch_mb_per_node')}
+        for size in self.cloud_sizes:
+            if size.meets_constraints(**wants):
+                return size
+        return None
+
+    def servers_for_queue(self, queue):
+        servers = []
+        seen_jobs = set()
+        for job in queue:
+            seen_jobs.add(job['uuid'])
+            constraints = job['runtime_constraints']
+            want_count = self.coerce_int(constraints.get('min_nodes'), 1)
+            cloud_size = self.cloud_size_for_constraints(constraints)
+            if cloud_size is None:
+                if job['uuid'] not in self.logged_jobs:
+                    self.logged_jobs.add(job['uuid'])
+                    self.logger.debug("job %s not satisfiable", job['uuid'])
+            elif (want_count <= self.max_nodes):
+                servers.extend([cloud_size.real] * max(1, want_count))
+        self.logged_jobs.intersection_update(seen_jobs)
+        return servers
+
+    def cheapest_size(self):
+        return self.cloud_sizes[0]
+
+
+class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
+    """Actor to generate server wishlists from the job queue.
+
+    This actor regularly polls Arvados' job queue, and uses the provided
+    ServerCalculator to turn that into a list of requested node sizes.  That
+    list is sent to subscribers on every poll.
+    """
+
+    CLIENT_ERRORS = ARVADOS_ERRORS
+    LOGGER_NAME = 'arvnodeman.jobqueue'
+
+    def __init__(self, client, timer_actor, server_calc, *args, **kwargs):
+        super(JobQueueMonitorActor, self).__init__(
+            client, timer_actor, *args, **kwargs)
+        self._calculator = server_calc
+
+    def _send_request(self):
+        return self._client.jobs().queue().execute()['items']
+
+    def _got_response(self, queue):
+        server_list = self._calculator.servers_for_queue(queue)
+        self._logger.debug("Sending server wishlist: %s",
+                           ', '.join(s.name for s in server_list) or "(empty)")
+        return super(JobQueueMonitorActor, self)._got_response(server_list)
diff --git a/services/nodemanager/arvnodeman/launcher.py b/services/nodemanager/arvnodeman/launcher.py
new file mode 100644 (file)
index 0000000..8801582
--- /dev/null
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import argparse
+import logging
+import signal
+import sys
+import time
+
+import daemon
+import pykka
+
+from . import config as nmconfig
+from .daemon import NodeManagerDaemonActor
+from .jobqueue import JobQueueMonitorActor, ServerCalculator
+from .nodelist import ArvadosNodeListMonitorActor, CloudNodeListMonitorActor
+from .timedcallback import TimedCallBackActor
+
+node_daemon = None
+
+def abort(msg, code=1):
+    print("arvados-node-manager: " + msg)
+    sys.exit(code)
+
+def parse_cli(args):
+    parser = argparse.ArgumentParser(
+        prog='arvados-node-manager',
+        description="Dynamically allocate Arvados cloud compute nodes")
+    parser.add_argument(
+        '--foreground', action='store_true', default=False,
+        help="Run in the foreground.  Don't daemonize.")
+    parser.add_argument(
+        '--config', help="Path to configuration file")
+    return parser.parse_args(args)
+
+def load_config(path):
+    if not path:
+        abort("No --config file specified", 2)
+    config = nmconfig.NodeManagerConfig()
+    try:
+        with open(path) as config_file:
+            config.readfp(config_file)
+    except (IOError, OSError) as error:
+        abort("Error reading configuration file {}: {}".format(path, error))
+    return config
+
+def setup_logging(path, level, **sublevels):
+    handler = logging.FileHandler(path)
+    handler.setFormatter(logging.Formatter(
+            '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',
+            '%Y-%m-%d %H:%M:%S'))
+    root_logger = logging.getLogger()
+    root_logger.addHandler(handler)
+    root_logger.setLevel(level)
+    for logger_name, sublevel in sublevels.iteritems():
+        sublogger = logging.getLogger(logger_name)
+        sublogger.setLevel(sublevel)
+
+def build_server_calculator(config):
+    cloud_size_list = config.node_sizes(config.new_cloud_client().list_sizes())
+    if not cloud_size_list:
+        abort("No valid node sizes configured")
+    return ServerCalculator(cloud_size_list,
+                            config.getint('Daemon', 'max_nodes'))
+
+def launch_pollers(config, server_calculator):
+    poll_time = config.getint('Daemon', 'poll_time')
+    max_poll_time = config.getint('Daemon', 'max_poll_time')
+
+    timer = TimedCallBackActor.start(poll_time / 10.0).proxy()
+    cloud_node_poller = CloudNodeListMonitorActor.start(
+        config.new_cloud_client(), timer, poll_time, max_poll_time).proxy()
+    arvados_node_poller = ArvadosNodeListMonitorActor.start(
+        config.new_arvados_client(), timer, poll_time, max_poll_time).proxy()
+    job_queue_poller = JobQueueMonitorActor.start(
+        config.new_arvados_client(), timer, server_calculator,
+        poll_time, max_poll_time).proxy()
+    return timer, cloud_node_poller, arvados_node_poller, job_queue_poller
+
+_caught_signals = {}
+def shutdown_signal(signal_code, frame):
+    current_count = _caught_signals.get(signal_code, 0)
+    _caught_signals[signal_code] = current_count + 1
+    if node_daemon is None:
+        pykka.ActorRegistry.stop_all()
+        sys.exit(-signal_code)
+    elif current_count == 0:
+        node_daemon.shutdown()
+    elif current_count == 1:
+        pykka.ActorRegistry.stop_all()
+    else:
+        sys.exit(-signal_code)
+
+def main(args=None):
+    global node_daemon
+    args = parse_cli(args)
+    config = load_config(args.config)
+
+    if not args.foreground:
+        daemon.DaemonContext().open()
+    for sigcode in [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]:
+        signal.signal(sigcode, shutdown_signal)
+
+    setup_logging(config.get('Logging', 'file'), **config.log_levels())
+    node_setup, node_shutdown, node_update, node_monitor = \
+        config.dispatch_classes()
+    server_calculator = build_server_calculator(config)
+    timer, cloud_node_poller, arvados_node_poller, job_queue_poller = \
+        launch_pollers(config, server_calculator)
+    cloud_node_updater = node_update.start(config.new_cloud_client).proxy()
+    node_daemon = NodeManagerDaemonActor.start(
+        job_queue_poller, arvados_node_poller, cloud_node_poller,
+        cloud_node_updater, timer,
+        config.new_arvados_client, config.new_cloud_client,
+        config.shutdown_windows(),
+        server_calculator.cheapest_size(),
+        config.getint('Daemon', 'min_nodes'),
+        config.getint('Daemon', 'max_nodes'),
+        config.getint('Daemon', 'poll_stale_after'),
+        config.getint('Daemon', 'boot_fail_after'),
+        config.getint('Daemon', 'node_stale_after'),
+        node_setup, node_shutdown, node_monitor).proxy()
+
+    signal.pause()
+    daemon_stopped = node_daemon.actor_ref.actor_stopped.is_set
+    while not daemon_stopped():
+        time.sleep(1)
+    pykka.ActorRegistry.stop_all()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/services/nodemanager/arvnodeman/nodelist.py b/services/nodemanager/arvnodeman/nodelist.py
new file mode 100644 (file)
index 0000000..83dd93f
--- /dev/null
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+from . import clientactor
+from . import config
+
+class ArvadosNodeListMonitorActor(clientactor.RemotePollLoopActor):
+    """Actor to poll the Arvados node list.
+
+    This actor regularly polls the list of Arvados node records, and
+    sends it to subscribers.
+    """
+    LOGGER_NAME = 'arvnodeman.arvados_nodes'
+
+    def is_common_error(self, exception):
+        return isinstance(exception, config.ARVADOS_ERRORS)
+
+    def _item_key(self, node):
+        return node['uuid']
+
+    def _send_request(self):
+        return self._client.nodes().list(limit=10000).execute()['items']
+
+
+class CloudNodeListMonitorActor(clientactor.RemotePollLoopActor):
+    """Actor to poll the cloud node list.
+
+    This actor regularly polls the cloud to get a list of running compute
+    nodes, and sends it to subscribers.
+    """
+    LOGGER_NAME = 'arvnodeman.cloud_nodes'
+
+    def is_common_error(self, exception):
+        return self._client.is_cloud_exception(exception)
+
+    def _item_key(self, node):
+        return node.id
+
+    def _send_request(self):
+        return self._client.list_nodes()
diff --git a/services/nodemanager/arvnodeman/timedcallback.py b/services/nodemanager/arvnodeman/timedcallback.py
new file mode 100644 (file)
index 0000000..615f798
--- /dev/null
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import heapq
+import time
+
+import pykka
+
+from .config import actor_class
+
+class TimedCallBackActor(actor_class):
+    """Send messages to other actors on a schedule.
+
+    Other actors can call the schedule() method to schedule delivery of a
+    message at a later time.  This actor runs the necessary event loop for
+    delivery.
+    """
+    def __init__(self, max_sleep=1):
+        super(TimedCallBackActor, self).__init__()
+        self._proxy = self.actor_ref.proxy()
+        self.messages = []
+        self.max_sleep = max_sleep
+
+    def schedule(self, delivery_time, receiver, *args, **kwargs):
+        if not self.messages:
+            self._proxy.deliver()
+        heapq.heappush(self.messages, (delivery_time, receiver, args, kwargs))
+
+    def deliver(self):
+        if not self.messages:
+            return None
+        til_next = self.messages[0][0] - time.time()
+        if til_next < 0:
+            t, receiver, args, kwargs = heapq.heappop(self.messages)
+            try:
+                receiver(*args, **kwargs)
+            except pykka.ActorDeadError:
+                pass
+        else:
+            time.sleep(min(til_next, self.max_sleep))
+        self._proxy.deliver()
diff --git a/services/nodemanager/bin/arvados-node-manager b/services/nodemanager/bin/arvados-node-manager
new file mode 100644 (file)
index 0000000..3a91288
--- /dev/null
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+from arvnodeman.launcher import main
+main()
diff --git a/services/nodemanager/doc/ec2.example.cfg b/services/nodemanager/doc/ec2.example.cfg
new file mode 100644 (file)
index 0000000..024ed2b
--- /dev/null
@@ -0,0 +1,136 @@
+# EC2 configuration for Arvados Node Manager.
+# All times are in seconds unless specified otherwise.
+
+[Daemon]
+# The dispatcher can customize the start and stop procedure for
+# cloud nodes.  For example, the SLURM dispatcher drains nodes
+# through SLURM before shutting them down.
+#dispatcher = slurm
+
+# Node Manager will ensure that there are at least this many nodes
+# running at all times.
+min_nodes = 0
+
+# Node Manager will not start any compute nodes when at least this
+# many are running.
+max_nodes = 8
+
+# Poll EC2 nodes and Arvados for new information every N seconds.
+poll_time = 60
+
+# Polls have exponential backoff when services fail to respond.
+# This is the longest time to wait between polls.
+max_poll_time = 300
+
+# If Node Manager can't succesfully poll a service for this long,
+# it will never start or stop compute nodes, on the assumption that its
+# information is too outdated.
+poll_stale_after = 600
+
+# If Node Manager boots a cloud node, and it does not pair with an Arvados
+# node before this long, assume that there was a cloud bootstrap failure and
+# shut it down.  Note that normal shutdown windows apply (see the Cloud
+# section), so this should be shorter than the first shutdown window value.
+boot_fail_after = 1800
+
+# "Node stale time" affects two related behaviors.
+# 1. If a compute node has been running for at least this long, but it
+# isn't paired with an Arvados node, do not shut it down, but leave it alone.
+# This prevents the node manager from shutting down a node that might
+# actually be doing work, but is having temporary trouble contacting the
+# API server.
+# 2. When the Node Manager starts a new compute node, it will try to reuse
+# an Arvados node that hasn't been updated for this long.
+node_stale_after = 14400
+
+# File path for Certificate Authorities
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+# Log file path
+file = /var/log/arvados/node-manager.log
+
+# Log level for most Node Manager messages.
+# Choose one of DEBUG, INFO, WARNING, ERROR, or CRITICAL.
+# WARNING lets you know when polling a service fails.
+# INFO additionally lets you know when a compute node is started or stopped.
+level = INFO
+
+# You can also set different log levels for specific libraries.
+# Pykka is the Node Manager's actor library.
+# Setting this to DEBUG will display tracebacks for uncaught
+# exceptions in the actors, but it's also very chatty.
+pykka = WARNING
+
+# Setting apiclient to INFO will log the URL of every Arvados API request.
+apiclient = WARNING
+
+[Arvados]
+host = zyxwv.arvadosapi.com
+token = ARVADOS_TOKEN
+timeout = 15
+
+# Accept an untrusted SSL certificate from the API server?
+insecure = no
+
+[Cloud]
+provider = ec2
+
+# It's usually most cost-effective to shut down compute nodes during narrow
+# windows of time.  For example, EC2 bills each node by the hour, so the best
+# time to shut down a node is right before a new hour of uptime starts.
+# Shutdown windows define these periods of time.  These are windows in
+# full minutes, separated by commas.  Counting from the time the node is
+# booted, the node WILL NOT shut down for N1 minutes; then it MAY shut down
+# for N2 minutes; then it WILL NOT shut down for N3 minutes; and so on.
+# For example, "54, 5, 1" means the node may shut down from the 54th to the
+# 59th minute of each hour of uptime.
+# Specify at least two windows.  You can add as many as you need beyond that.
+shutdown_windows = 54, 5, 1
+
+[Cloud Credentials]
+key = KEY
+secret = SECRET_KEY
+region = us-east-1
+timeout = 60
+
+[Cloud List]
+# This section defines filters that find compute nodes.
+# Tags that you specify here will automatically be added to nodes you create.
+# Replace colons in Amazon filters with underscores
+# (e.g., write "tag:mytag" as "tag_mytag").
+instance-state-name = running
+tag_arvados-class = dynamic-compute
+tag_cluster = zyxwv
+
+[Cloud Create]
+# New compute nodes will send pings to Arvados at this host.
+# You may specify a port, and use brackets to disambiguate IPv6 addresses.
+ping_host = hostname:port
+
+# Give the name of an SSH key on AWS...
+ex_keyname = string
+
+# ... or a file path for an SSH key that can log in to the compute node.
+# (One or the other, not both.)
+# ssh_key = path
+
+# The EC2 IDs of the image and subnet compute nodes should use.
+image_id = idstring
+subnet_id = idstring
+
+# Comma-separated EC2 IDs for the security group(s) assigned to each
+# compute node.
+security_groups = idstring1, idstring2
+
+[Size t2.medium]
+# You can define any number of Size sections to list EC2 sizes you're
+# willing to use.  The Node Manager should boot the cheapest size(s) that
+# can run jobs in the queue (N.B.: defining more than one size has not been
+# tested yet).
+# Each size section MUST define the number of cores it has.  You may also
+# want to define the number of mebibytes of scratch space for Crunch jobs.
+# You can also override Amazon's provided data fields by setting the same
+# names here.
+cores = 2
+scratch = 100
\ No newline at end of file
diff --git a/services/nodemanager/doc/local.example.cfg b/services/nodemanager/doc/local.example.cfg
new file mode 100644 (file)
index 0000000..314750e
--- /dev/null
@@ -0,0 +1,42 @@
+# You can use this configuration to run a development Node Manager for
+# testing.  It uses libcloud's dummy driver and your own development API server.
+# When new cloud nodes are created, you'll need to simulate the ping that
+# they send to the Arvados API server.  The easiest way I've found to do that
+# is through the API server Rails console: load the Node object, set its
+# IP address to 10.10.0.N (where N is the cloud node's ID), and save.
+
+[Daemon]
+min_nodes = 0
+max_nodes = 8
+poll_time = 15
+max_poll_time = 60
+poll_stale_after = 600
+node_stale_after = 300
+certs_file = /etc/ssl/certs/ca-certificates.crt
+
+[Logging]
+level = DEBUG
+pykka = DEBUG
+apiclient = WARNING
+
+[Arvados]
+host = localhost:3030
+# This is the token for the text fixture's admin user.
+token = 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h
+insecure = yes
+timeout = 15
+
+[Cloud]
+provider = dummy
+shutdown_windows = 1, 1
+timeout = 15
+
+[Cloud Credentials]
+creds = dummycreds
+
+[Cloud List]
+[Cloud Create]
+
+[Size 2]
+cores = 4
+scratch = 1234
diff --git a/services/nodemanager/setup.py b/services/nodemanager/setup.py
new file mode 100644 (file)
index 0000000..5fc4294
--- /dev/null
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+import os
+import subprocess
+import time
+
+from setuptools import setup, find_packages
+from setuptools.command.egg_info import egg_info
+
+SETUP_DIR = os.path.dirname(__file__) or "."
+
+class TagBuildWithCommit(egg_info):
+    """Tag the build with the sha1 and date of the last git commit.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def tags(self):
+        if self.tag_build is None:
+            git_tags = subprocess.check_output(
+                ['git', 'log', '--first-parent', '--max-count=1',
+                 '--format=format:%ct %h', SETUP_DIR]).split()
+            assert len(git_tags) == 2
+            git_tags[0] = time.strftime(
+                '%Y%m%d%H%M%S', time.gmtime(int(git_tags[0])))
+            self.tag_build = '.{}+{}'.format(*git_tags)
+        return egg_info.tags(self)
+
+
+setup(name='arvados-node-manager',
+      version='0.1',
+      description='Arvados compute node manager',
+      long_description=open(os.path.join(SETUP_DIR, 'README.rst')).read(),
+      author='Arvados',
+      author_email='info@arvados.org',
+      url="https://arvados.org",
+      license='GNU Affero General Public License, version 3.0',
+      packages=find_packages(),
+      install_requires=[
+        'apache-libcloud',
+        'arvados-python-client',
+        'pykka',
+        'python-daemon',
+        ],
+      scripts=['bin/arvados-node-manager'],
+      test_suite='tests',
+      tests_require=['mock>=1.0'],
+      zip_safe=False,
+      cmdclass={'egg_info': TagBuildWithCommit},
+      )
diff --git a/services/nodemanager/tests/__init__.py b/services/nodemanager/tests/__init__.py
new file mode 100644 (file)
index 0000000..c5eaf76
--- /dev/null
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+import logging
+import os
+
+# Set the ANMTEST_LOGLEVEL environment variable to enable logging at that level.
+loglevel = os.environ.get('ANMTEST_LOGLEVEL', 'CRITICAL')
+logging.basicConfig(level=getattr(logging, loglevel.upper()))
+
+# Set the ANM_TIMEOUT environment variable to the maximum amount of time to
+# wait for tested actors to respond to important messages.  The default value
+# is very conservative, because a small value may produce false negatives on
+# slower systems.  If you're debugging a known timeout issue, however, you may
+# want to set this lower to speed up tests.
+pykka_timeout = int(os.environ.get('ANMTEST_TIMEOUT', '10'))
diff --git a/services/nodemanager/tests/test_clientactor.py b/services/nodemanager/tests/test_clientactor.py
new file mode 100644 (file)
index 0000000..57a0d32
--- /dev/null
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import unittest
+
+import mock
+import pykka
+
+import arvnodeman.clientactor as clientactor
+from . import testutil
+
+class RemotePollLoopActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                  unittest.TestCase):
+    class MockClientError(Exception):
+        pass
+
+    class TestActor(clientactor.RemotePollLoopActor):
+        LOGGER_NAME = 'arvnodeman.testpoll'
+
+        def _send_request(self):
+            return self._client()
+    TestActor.CLIENT_ERRORS = (MockClientError,)
+    TEST_CLASS = TestActor
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(RemotePollLoopActorTestCase, self).build_monitor(*args, **kwargs)
+        self.client.side_effect = side_effect
+
+    def test_poll_loop_starts_after_subscription(self):
+        self.build_monitor(['test1'])
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with('test1')
+        self.assertTrue(self.timer.schedule.called)
+
+    def test_poll_loop_continues_after_failure(self):
+        self.build_monitor(self.MockClientError)
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(self.monitor),
+                        "poll loop died after error")
+        self.assertTrue(self.timer.schedule.called,
+                        "poll loop did not reschedule after error")
+        self.assertFalse(self.subscriber.called,
+                         "poll loop notified subscribers after error")
+
+    def test_late_subscribers_get_responses(self):
+        self.build_monitor(['pre_late_test', 'late_test'])
+        self.monitor.subscribe(lambda response: None).get(self.TIMEOUT)
+        self.monitor.subscribe(self.subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with('late_test')
+
+    def test_survive_dead_subscriptions(self):
+        self.build_monitor(['survive1', 'survive2'])
+        dead_subscriber = mock.Mock(name='dead_subscriber')
+        dead_subscriber.side_effect = pykka.ActorDeadError
+        self.monitor.subscribe(dead_subscriber)
+        self.monitor.subscribe(self.subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(self.monitor),
+                        "poll loop died from dead subscriber")
+        self.subscriber.assert_called_with('survive2')
+
+    def check_poll_timers(self, *test_times):
+        schedule_mock = self.timer.schedule
+        last_expect = None
+        with mock.patch('time.time') as time_mock:
+            for fake_time, expect_next in test_times:
+                time_mock.return_value = fake_time
+                self.monitor.poll(last_expect).get(self.TIMEOUT)
+                self.assertTrue(schedule_mock.called)
+                self.assertEqual(expect_next, schedule_mock.call_args[0][0])
+                schedule_mock.reset_mock()
+                last_expect = expect_next
+
+    def test_poll_timing_on_consecutive_successes_with_drift(self):
+        self.build_monitor(['1', '2'], poll_wait=3, max_poll_wait=14)
+        self.check_poll_timers((0, 3), (4, 6))
+
+    def test_poll_backoff_on_failures(self):
+        self.build_monitor(self.MockClientError, poll_wait=3, max_poll_wait=14)
+        self.check_poll_timers((0, 6), (6, 18), (18, 32))
+
+    def test_poll_timing_after_error_recovery(self):
+        self.build_monitor(['a', self.MockClientError(), 'b'],
+                           poll_wait=3, max_poll_wait=14)
+        self.check_poll_timers((0, 3), (4, 10), (10, 13))
+
+    def test_no_subscriptions_by_key_without_support(self):
+        self.build_monitor([])
+        with self.assertRaises(AttributeError):
+            self.monitor.subscribe_to('key')
+
+
+class RemotePollLoopActorWithKeysTestCase(testutil.RemotePollLoopActorTestMixin,
+                                          unittest.TestCase):
+    class TestActor(RemotePollLoopActorTestCase.TestActor):
+        def _item_key(self, item):
+            return item['key']
+    TEST_CLASS = TestActor
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(RemotePollLoopActorWithKeysTestCase, self).build_monitor(
+            *args, **kwargs)
+        self.client.side_effect = side_effect
+
+    def test_key_subscription(self):
+        self.build_monitor([[{'key': 1}, {'key': 2}]])
+        self.monitor.subscribe_to(2, self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with({'key': 2})
+
+    def test_survive_dead_key_subscriptions(self):
+        item = {'key': 3}
+        self.build_monitor([[item], [item]])
+        dead_subscriber = mock.Mock(name='dead_subscriber')
+        dead_subscriber.side_effect = pykka.ActorDeadError
+        self.monitor.subscribe_to(3, dead_subscriber)
+        self.monitor.subscribe_to(3, self.subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(self.monitor),
+                        "poll loop died from dead key subscriber")
+        self.subscriber.assert_called_with(item)
+
+    def test_mixed_subscriptions(self):
+        item = {'key': 4}
+        self.build_monitor([[item], [item]])
+        key_subscriber = mock.Mock(name='key_subscriber')
+        self.monitor.subscribe(self.subscriber)
+        self.monitor.subscribe_to(4, key_subscriber)
+        self.monitor.poll().get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with([item])
+        key_subscriber.assert_called_with(item)
+
+    def test_subscription_to_missing_key(self):
+        self.build_monitor([[]])
+        self.monitor.subscribe_to('nonesuch', self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with(None)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/services/nodemanager/tests/test_computenode.py b/services/nodemanager/tests/test_computenode.py
new file mode 100644 (file)
index 0000000..e22cccc
--- /dev/null
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import arvados.errors as arverror
+import mock
+
+import arvnodeman.computenode as cnode
+from . import testutil
+
+@mock.patch('time.time', return_value=1)
+class ShutdownTimerTestCase(unittest.TestCase):
+    def test_two_length_window(self, time_mock):
+        timer = cnode.ShutdownTimer(time_mock.return_value, [8, 2])
+        self.assertEqual(481, timer.next_opening())
+        self.assertFalse(timer.window_open())
+        time_mock.return_value += 500
+        self.assertEqual(1081, timer.next_opening())
+        self.assertTrue(timer.window_open())
+        time_mock.return_value += 200
+        self.assertEqual(1081, timer.next_opening())
+        self.assertFalse(timer.window_open())
+
+    def test_three_length_window(self, time_mock):
+        timer = cnode.ShutdownTimer(time_mock.return_value, [6, 3, 1])
+        self.assertEqual(361, timer.next_opening())
+        self.assertFalse(timer.window_open())
+        time_mock.return_value += 400
+        self.assertEqual(961, timer.next_opening())
+        self.assertTrue(timer.window_open())
+        time_mock.return_value += 200
+        self.assertEqual(961, timer.next_opening())
+        self.assertFalse(timer.window_open())
diff --git a/services/nodemanager/tests/test_computenode_dispatch.py b/services/nodemanager/tests/test_computenode_dispatch.py
new file mode 100644 (file)
index 0000000..a1dfde3
--- /dev/null
@@ -0,0 +1,349 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import arvados.errors as arverror
+import httplib2
+import mock
+import pykka
+
+import arvnodeman.computenode.dispatch as dispatch
+from . import testutil
+
+class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
+    def make_mocks(self, arvados_effect=None):
+        if arvados_effect is None:
+            arvados_effect = [testutil.arvados_node_mock()]
+        self.arvados_effect = arvados_effect
+        self.timer = testutil.MockTimer()
+        self.api_client = mock.MagicMock(name='api_client')
+        self.api_client.nodes().create().execute.side_effect = arvados_effect
+        self.api_client.nodes().update().execute.side_effect = arvados_effect
+        self.cloud_client = mock.MagicMock(name='cloud_client')
+        self.cloud_client.create_node.return_value = testutil.cloud_node_mock(1)
+
+    def make_actor(self, arv_node=None):
+        if not hasattr(self, 'timer'):
+            self.make_mocks(arvados_effect=[arv_node])
+        self.setup_actor = dispatch.ComputeNodeSetupActor.start(
+            self.timer, self.api_client, self.cloud_client,
+            testutil.MockSize(1), arv_node).proxy()
+
+    def test_creation_without_arvados_node(self):
+        self.make_actor()
+        self.assertEqual(self.arvados_effect[-1],
+                         self.setup_actor.arvados_node.get(self.TIMEOUT))
+        self.assertTrue(self.api_client.nodes().create().execute.called)
+        self.assertEqual(self.cloud_client.create_node(),
+                         self.setup_actor.cloud_node.get(self.TIMEOUT))
+
+    def test_creation_with_arvados_node(self):
+        self.make_actor(testutil.arvados_node_mock())
+        self.assertEqual(self.arvados_effect[-1],
+                         self.setup_actor.arvados_node.get(self.TIMEOUT))
+        self.assertTrue(self.api_client.nodes().update().execute.called)
+        self.assertEqual(self.cloud_client.create_node(),
+                         self.setup_actor.cloud_node.get(self.TIMEOUT))
+
+    def test_failed_arvados_calls_retried(self):
+        self.make_mocks([
+                arverror.ApiError(httplib2.Response({'status': '500'}), ""),
+                testutil.arvados_node_mock(),
+                ])
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'arvados_node')
+
+    def test_failed_cloud_calls_retried(self):
+        self.make_mocks()
+        self.cloud_client.create_node.side_effect = [
+            Exception("test cloud creation error"),
+            self.cloud_client.create_node.return_value,
+            ]
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+
+    def test_failed_post_create_retried(self):
+        self.make_mocks()
+        self.cloud_client.post_create_node.side_effect = [
+            Exception("test cloud post-create error"), None]
+        self.make_actor()
+        done = self.FUTURE_CLASS()
+        self.setup_actor.subscribe(done.set)
+        done.get(self.TIMEOUT)
+        self.assertEqual(2, self.cloud_client.post_create_node.call_count)
+
+    def test_stop_when_no_cloud_node(self):
+        self.make_mocks(
+            arverror.ApiError(httplib2.Response({'status': '500'}), ""))
+        self.make_actor()
+        self.setup_actor.stop_if_no_cloud_node()
+        self.assertTrue(
+            self.setup_actor.actor_ref.actor_stopped.wait(self.TIMEOUT))
+
+    def test_no_stop_when_cloud_node(self):
+        self.make_actor()
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.setup_actor.stop_if_no_cloud_node().get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(self.setup_actor),
+                        "actor was stopped by stop_if_no_cloud_node")
+
+    def test_subscribe(self):
+        self.make_mocks(
+            arverror.ApiError(httplib2.Response({'status': '500'}), ""))
+        self.make_actor()
+        subscriber = mock.Mock(name='subscriber_mock')
+        self.setup_actor.subscribe(subscriber)
+        self.api_client.nodes().create().execute.side_effect = [
+            testutil.arvados_node_mock()]
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.assertEqual(self.setup_actor.actor_ref.actor_urn,
+                         subscriber.call_args[0][0].actor_ref.actor_urn)
+
+    def test_late_subscribe(self):
+        self.make_actor()
+        subscriber = mock.Mock(name='subscriber_mock')
+        self.wait_for_assignment(self.setup_actor, 'cloud_node')
+        self.setup_actor.subscribe(subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.setup_actor)
+        self.assertEqual(self.setup_actor.actor_ref.actor_urn,
+                         subscriber.call_args[0][0].actor_ref.actor_urn)
+
+
+class ComputeNodeShutdownActorMixin(testutil.ActorTestMixin):
+    def make_mocks(self, cloud_node=None, arvados_node=None,
+                   shutdown_open=True):
+        self.timer = testutil.MockTimer()
+        self.shutdowns = testutil.MockShutdownTimer()
+        self.shutdowns._set_state(shutdown_open, 300)
+        self.cloud_client = mock.MagicMock(name='cloud_client')
+        self.updates = mock.MagicMock(name='update_mock')
+        if cloud_node is None:
+            cloud_node = testutil.cloud_node_mock()
+        self.cloud_node = cloud_node
+        self.arvados_node = arvados_node
+
+    def make_actor(self, cancellable=True):
+        if not hasattr(self, 'timer'):
+            self.make_mocks()
+        monitor_actor = dispatch.ComputeNodeMonitorActor.start(
+            self.cloud_node, time.time(), self.shutdowns, self.timer,
+            self.updates, self.arvados_node)
+        self.shutdown_actor = self.ACTOR_CLASS.start(
+            self.timer, self.cloud_client, monitor_actor, cancellable).proxy()
+        self.monitor_actor = monitor_actor.proxy()
+
+    def check_success_flag(self, expected, allow_msg_count=1):
+        # allow_msg_count is the number of internal messages that may
+        # need to be handled for shutdown to finish.
+        for try_num in range(1 + allow_msg_count):
+            last_flag = self.shutdown_actor.success.get(self.TIMEOUT)
+            if last_flag is expected:
+                break
+        else:
+            self.fail("success flag {} is not {}".format(last_flag, expected))
+
+    def test_uncancellable_shutdown(self, *mocks):
+        self.make_mocks(shutdown_open=False)
+        self.cloud_client.destroy_node.return_value = False
+        self.make_actor(cancellable=False)
+        self.check_success_flag(None, 0)
+        self.shutdowns._set_state(True, 600)
+        self.cloud_client.destroy_node.return_value = True
+        self.check_success_flag(True)
+
+
+class ComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
+                                       unittest.TestCase):
+    ACTOR_CLASS = dispatch.ComputeNodeShutdownActor
+
+    def test_easy_shutdown(self):
+        self.make_actor()
+        self.check_success_flag(True)
+        self.assertTrue(self.cloud_client.destroy_node.called)
+
+    def test_shutdown_cancelled_when_window_closes(self):
+        self.make_mocks(shutdown_open=False)
+        self.make_actor()
+        self.check_success_flag(False, 2)
+        self.assertFalse(self.cloud_client.destroy_node.called)
+
+    def test_shutdown_retries_when_cloud_fails(self):
+        self.make_mocks()
+        self.cloud_client.destroy_node.return_value = False
+        self.make_actor()
+        self.assertIsNone(self.shutdown_actor.success.get(self.TIMEOUT))
+        self.cloud_client.destroy_node.return_value = True
+        self.check_success_flag(True)
+
+    def test_late_subscribe(self):
+        self.make_actor()
+        subscriber = mock.Mock(name='subscriber_mock')
+        self.shutdown_actor.subscribe(subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.shutdown_actor)
+        self.assertTrue(subscriber.called)
+        self.assertEqual(self.shutdown_actor.actor_ref.actor_urn,
+                         subscriber.call_args[0][0].actor_ref.actor_urn)
+
+
+class ComputeNodeUpdateActorTestCase(testutil.ActorTestMixin,
+                                     unittest.TestCase):
+    def make_actor(self):
+        self.driver = mock.MagicMock(name='driver_mock')
+        self.updater = dispatch.ComputeNodeUpdateActor.start(self.driver).proxy()
+
+    def test_node_sync(self):
+        self.make_actor()
+        cloud_node = testutil.cloud_node_mock()
+        arv_node = testutil.arvados_node_mock()
+        self.updater.sync_node(cloud_node, arv_node).get(self.TIMEOUT)
+        self.driver().sync_node.assert_called_with(cloud_node, arv_node)
+
+
+class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
+                                      unittest.TestCase):
+    def make_mocks(self, node_num):
+        self.shutdowns = testutil.MockShutdownTimer()
+        self.shutdowns._set_state(False, 300)
+        self.timer = mock.MagicMock(name='timer_mock')
+        self.updates = mock.MagicMock(name='update_mock')
+        self.cloud_mock = testutil.cloud_node_mock(node_num)
+        self.subscriber = mock.Mock(name='subscriber_mock')
+
+    def make_actor(self, node_num=1, arv_node=None, start_time=None):
+        if not hasattr(self, 'cloud_mock'):
+            self.make_mocks(node_num)
+        if start_time is None:
+            start_time = time.time()
+        self.node_actor = dispatch.ComputeNodeMonitorActor.start(
+            self.cloud_mock, start_time, self.shutdowns, self.timer,
+            self.updates, arv_node).proxy()
+        self.node_actor.subscribe(self.subscriber).get(self.TIMEOUT)
+
+    def node_state(self, *states):
+        return self.node_actor.in_state(*states).get(self.TIMEOUT)
+
+    def test_in_state_when_unpaired(self):
+        self.make_actor()
+        self.assertIsNone(self.node_state('idle', 'alloc'))
+
+    def test_in_state_when_pairing_stale(self):
+        self.make_actor(arv_node=testutil.arvados_node_mock(
+                job_uuid=None, age=90000))
+        self.assertIsNone(self.node_state('idle', 'alloc'))
+
+    def test_in_state_when_no_state_available(self):
+        self.make_actor(arv_node=testutil.arvados_node_mock(info={}))
+        self.assertIsNone(self.node_state('idle', 'alloc'))
+
+    def test_in_idle_state(self):
+        self.make_actor(2, arv_node=testutil.arvados_node_mock(job_uuid=None))
+        self.assertTrue(self.node_state('idle'))
+        self.assertFalse(self.node_state('alloc'))
+        self.assertTrue(self.node_state('idle', 'alloc'))
+
+    def test_in_alloc_state(self):
+        self.make_actor(3, arv_node=testutil.arvados_node_mock(job_uuid=True))
+        self.assertFalse(self.node_state('idle'))
+        self.assertTrue(self.node_state('alloc'))
+        self.assertTrue(self.node_state('idle', 'alloc'))
+
+    def test_init_shutdown_scheduling(self):
+        self.make_actor()
+        self.assertTrue(self.timer.schedule.called)
+        self.assertEqual(300, self.timer.schedule.call_args[0][0])
+
+    def test_shutdown_window_close_scheduling(self):
+        self.make_actor()
+        self.shutdowns._set_state(False, 600)
+        self.timer.schedule.reset_mock()
+        self.node_actor.consider_shutdown().get(self.TIMEOUT)
+        self.stop_proxy(self.node_actor)
+        self.assertTrue(self.timer.schedule.called)
+        self.assertEqual(600, self.timer.schedule.call_args[0][0])
+        self.assertFalse(self.subscriber.called)
+
+    def test_shutdown_subscription(self):
+        self.make_actor()
+        self.shutdowns._set_state(True, 600)
+        self.node_actor.consider_shutdown().get(self.TIMEOUT)
+        self.assertTrue(self.subscriber.called)
+        self.assertEqual(self.node_actor.actor_ref.actor_urn,
+                         self.subscriber.call_args[0][0].actor_ref.actor_urn)
+
+    def test_shutdown_without_arvados_node(self):
+        self.make_actor()
+        self.shutdowns._set_state(True, 600)
+        self.assertTrue(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_no_shutdown_without_arvados_node_and_old_cloud_node(self):
+        self.make_actor(start_time=0)
+        self.shutdowns._set_state(True, 600)
+        self.assertFalse(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_no_shutdown_when_window_closed(self):
+        self.make_actor(3, testutil.arvados_node_mock(3, job_uuid=None))
+        self.assertFalse(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_no_shutdown_when_node_running_job(self):
+        self.make_actor(4, testutil.arvados_node_mock(4, job_uuid=True))
+        self.shutdowns._set_state(True, 600)
+        self.assertFalse(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_no_shutdown_when_node_state_unknown(self):
+        self.make_actor(5, testutil.arvados_node_mock(5, info={}))
+        self.shutdowns._set_state(True, 600)
+        self.assertFalse(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_no_shutdown_when_node_state_stale(self):
+        self.make_actor(6, testutil.arvados_node_mock(6, age=90000))
+        self.shutdowns._set_state(True, 600)
+        self.assertFalse(self.node_actor.shutdown_eligible().get(self.TIMEOUT))
+
+    def test_arvados_node_match(self):
+        self.make_actor(2)
+        arv_node = testutil.arvados_node_mock(
+            2, hostname='compute-two.zzzzz.arvadosapi.com')
+        pair_id = self.node_actor.offer_arvados_pair(arv_node).get(self.TIMEOUT)
+        self.assertEqual(self.cloud_mock.id, pair_id)
+        self.stop_proxy(self.node_actor)
+        self.updates.sync_node.assert_called_with(self.cloud_mock, arv_node)
+
+    def test_arvados_node_mismatch(self):
+        self.make_actor(3)
+        arv_node = testutil.arvados_node_mock(1)
+        self.assertIsNone(
+            self.node_actor.offer_arvados_pair(arv_node).get(self.TIMEOUT))
+
+    def test_update_cloud_node(self):
+        self.make_actor(1)
+        self.make_mocks(2)
+        self.cloud_mock.id = '1'
+        self.node_actor.update_cloud_node(self.cloud_mock)
+        current_cloud = self.node_actor.cloud_node.get(self.TIMEOUT)
+        self.assertEqual([testutil.ip_address_mock(2)],
+                         current_cloud.private_ips)
+
+    def test_missing_cloud_node_update(self):
+        self.make_actor(1)
+        self.node_actor.update_cloud_node(None)
+        current_cloud = self.node_actor.cloud_node.get(self.TIMEOUT)
+        self.assertEqual([testutil.ip_address_mock(1)],
+                         current_cloud.private_ips)
+
+    def test_update_arvados_node(self):
+        self.make_actor(3)
+        job_uuid = 'zzzzz-jjjjj-updatejobnode00'
+        new_arvados = testutil.arvados_node_mock(3, job_uuid)
+        self.node_actor.update_arvados_node(new_arvados)
+        current_arvados = self.node_actor.arvados_node.get(self.TIMEOUT)
+        self.assertEqual(job_uuid, current_arvados['job_uuid'])
+
+    def test_missing_arvados_node_update(self):
+        self.make_actor(4, testutil.arvados_node_mock(4))
+        self.node_actor.update_arvados_node(None)
+        current_arvados = self.node_actor.arvados_node.get(self.TIMEOUT)
+        self.assertEqual(testutil.ip_address_mock(4),
+                         current_arvados['ip_address'])
diff --git a/services/nodemanager/tests/test_computenode_dispatch_slurm.py b/services/nodemanager/tests/test_computenode_dispatch_slurm.py
new file mode 100644 (file)
index 0000000..93cc60d
--- /dev/null
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import subprocess
+import unittest
+
+import mock
+
+import arvnodeman.computenode.dispatch.slurm as slurm_dispatch
+from . import testutil
+from .test_computenode_dispatch import ComputeNodeShutdownActorMixin
+
+@mock.patch('subprocess.check_output')
+class SLURMComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
+                                            unittest.TestCase):
+    ACTOR_CLASS = slurm_dispatch.ComputeNodeShutdownActor
+
+    def check_slurm_got_args(self, proc_mock, *args):
+        self.assertTrue(proc_mock.called)
+        slurm_cmd = proc_mock.call_args[0][0]
+        for s in args:
+            self.assertIn(s, slurm_cmd)
+
+    def check_success_after_reset(self, proc_mock, end_state='drain\n'):
+        self.make_mocks(arvados_node=testutil.arvados_node_mock(63))
+        self.make_actor()
+        self.check_success_flag(None, 0)
+        self.check_success_flag(None, 0)
+        # Order is critical here: if the mock gets called when no return value
+        # or side effect is set, we may invoke a real subprocess.
+        proc_mock.return_value = end_state
+        proc_mock.side_effect = None
+        self.check_success_flag(True, 3)
+        self.check_slurm_got_args(proc_mock, 'compute63')
+
+    def make_wait_state_test(start_state='drng\n', end_state='drain\n'):
+        def test(self, proc_mock):
+            proc_mock.return_value = start_state
+            self.check_success_after_reset(proc_mock, end_state)
+        return test
+
+    for wait_state in ['alloc\n', 'drng\n', 'idle*\n']:
+        locals()['test_wait_while_' + wait_state.strip()
+                 ] = make_wait_state_test(start_state=wait_state)
+
+    for end_state in ['down\n', 'down*\n', 'drain\n', 'fail\n']:
+        locals()['test_wait_until_' + end_state.strip()
+                 ] = make_wait_state_test(end_state=end_state)
+
+    def test_retry_failed_slurm_calls(self, proc_mock):
+        proc_mock.side_effect = subprocess.CalledProcessError(1, ["mock"])
+        self.check_success_after_reset(proc_mock)
+
+    def test_slurm_bypassed_when_no_arvados_node(self, proc_mock):
+        # Test we correctly handle a node that failed to bootstrap.
+        proc_mock.return_value = 'idle\n'
+        self.make_actor()
+        self.check_success_flag(True)
+        self.assertFalse(proc_mock.called)
+
+    def test_node_undrained_when_shutdown_window_closes(self, proc_mock):
+        proc_mock.return_value = 'alloc\n'
+        self.make_mocks(arvados_node=testutil.arvados_node_mock(job_uuid=True))
+        self.make_actor()
+        self.check_success_flag(False, 2)
+        self.check_slurm_got_args(proc_mock, 'NodeName=compute99',
+                                  'State=RESUME')
diff --git a/services/nodemanager/tests/test_computenode_driver_ec2.py b/services/nodemanager/tests/test_computenode_driver_ec2.py
new file mode 100644 (file)
index 0000000..fae63a5
--- /dev/null
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import ssl
+import time
+import unittest
+
+import libcloud.common.types as cloud_types
+import mock
+
+import arvnodeman.computenode.driver.ec2 as ec2
+from . import testutil
+
+class EC2ComputeNodeDriverTestCase(unittest.TestCase):
+    def setUp(self):
+        self.driver_mock = mock.MagicMock(name='driver_mock')
+
+    def new_driver(self, auth_kwargs={}, list_kwargs={}, create_kwargs={}):
+        create_kwargs.setdefault('ping_host', '100::')
+        return ec2.ComputeNodeDriver(
+            auth_kwargs, list_kwargs, create_kwargs,
+            driver_class=self.driver_mock)
+
+    def test_driver_instantiation(self):
+        kwargs = {'key': 'testkey'}
+        driver = self.new_driver(auth_kwargs=kwargs)
+        self.assertTrue(self.driver_mock.called)
+        self.assertEqual(kwargs, self.driver_mock.call_args[1])
+
+    def test_list_kwargs_become_filters(self):
+        # We're also testing tag name translation.
+        driver = self.new_driver(list_kwargs={'tag_test': 'true'})
+        driver.list_nodes()
+        list_method = self.driver_mock().list_nodes
+        self.assertTrue(list_method.called)
+        self.assertEqual({'tag:test': 'true'},
+                          list_method.call_args[1].get('ex_filters'))
+
+    def test_create_location_loaded_at_initialization(self):
+        kwargs = {'location': 'testregion'}
+        driver = self.new_driver(create_kwargs=kwargs)
+        self.assertTrue(self.driver_mock().list_locations)
+
+    def test_create_image_loaded_at_initialization(self):
+        kwargs = {'image': 'testimage'}
+        driver = self.new_driver(create_kwargs=kwargs)
+        self.assertTrue(self.driver_mock().list_images)
+
+    def test_create_includes_ping_secret(self):
+        arv_node = testutil.arvados_node_mock(info={'ping_secret': 'ssshh'})
+        driver = self.new_driver()
+        driver.create_node(testutil.MockSize(1), arv_node)
+        create_method = self.driver_mock().create_node
+        self.assertTrue(create_method.called)
+        self.assertIn('ping_secret=ssshh',
+                      create_method.call_args[1].get('ex_userdata',
+                                                     'arg missing'))
+
+    def test_hostname_from_arvados_node(self):
+        arv_node = testutil.arvados_node_mock(8)
+        driver = self.new_driver()
+        self.assertEqual('compute8.zzzzz.arvadosapi.com',
+                         driver.arvados_create_kwargs(arv_node)['name'])
+
+    def test_default_hostname_from_new_arvados_node(self):
+        arv_node = testutil.arvados_node_mock(hostname=None)
+        driver = self.new_driver()
+        self.assertEqual('dynamic.compute.zzzzz.arvadosapi.com',
+                         driver.arvados_create_kwargs(arv_node)['name'])
+
+    def check_node_tagged(self, cloud_node, expected_tags):
+        tag_mock = self.driver_mock().ex_create_tags
+        self.assertTrue(tag_mock.called)
+        self.assertIs(cloud_node, tag_mock.call_args[0][0])
+        self.assertEqual(expected_tags, tag_mock.call_args[0][1])
+
+    def test_post_create_node_tags_from_list_kwargs(self):
+        expect_tags = {'key1': 'test value 1', 'key2': 'test value 2'}
+        list_kwargs = {('tag_' + key): value
+                       for key, value in expect_tags.iteritems()}
+        list_kwargs['instance-state-name'] = 'running'
+        cloud_node = testutil.cloud_node_mock()
+        driver = self.new_driver(list_kwargs=list_kwargs)
+        driver.post_create_node(cloud_node)
+        self.check_node_tagged(cloud_node, expect_tags)
+
+    def test_sync_node(self):
+        arv_node = testutil.arvados_node_mock(1)
+        cloud_node = testutil.cloud_node_mock(2)
+        driver = self.new_driver()
+        driver.sync_node(cloud_node, arv_node)
+        self.check_node_tagged(cloud_node,
+                               {'Name': 'compute1.zzzzz.arvadosapi.com'})
+
+    def test_node_create_time(self):
+        refsecs = int(time.time())
+        reftuple = time.gmtime(refsecs)
+        node = testutil.cloud_node_mock()
+        node.extra = {'launch_time': time.strftime('%Y-%m-%dT%H:%M:%S.000Z',
+                                                   reftuple)}
+        self.assertEqual(refsecs, ec2.ComputeNodeDriver.node_start_time(node))
+
+    def test_cloud_exceptions(self):
+        for error in [Exception("test exception"),
+                      IOError("test exception"),
+                      ssl.SSLError("test exception"),
+                      cloud_types.LibcloudError("test exception")]:
+            self.assertTrue(ec2.ComputeNodeDriver.is_cloud_exception(error),
+                            "{} not flagged as cloud exception".format(error))
+
+    def test_noncloud_exceptions(self):
+        self.assertFalse(
+            ec2.ComputeNodeDriver.is_cloud_exception(ValueError("test error")),
+            "ValueError flagged as cloud exception")
diff --git a/services/nodemanager/tests/test_config.py b/services/nodemanager/tests/test_config.py
new file mode 100644 (file)
index 0000000..d43491e
--- /dev/null
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import io
+import logging
+import unittest
+
+import arvnodeman.computenode.dispatch as dispatch
+import arvnodeman.computenode.dispatch.slurm as slurm_dispatch
+import arvnodeman.config as nmconfig
+
+class NodeManagerConfigTestCase(unittest.TestCase):
+    TEST_CONFIG = u"""
+[Cloud]
+provider = dummy
+shutdown_windows = 52, 6, 2
+
+[Cloud Credentials]
+creds = dummy_creds
+
+[Cloud List]
+[Cloud Create]
+
+[Size 1]
+cores = 1
+
+[Logging]
+file = /dev/null
+level = DEBUG
+testlogger = INFO
+"""
+
+    def load_config(self, config=None, config_str=None):
+        if config is None:
+            config = nmconfig.NodeManagerConfig()
+        if config_str is None:
+            config_str = self.TEST_CONFIG
+        with io.StringIO(config_str) as config_fp:
+            config.readfp(config_fp)
+        return config
+
+    def test_seeded_defaults(self):
+        config = nmconfig.NodeManagerConfig()
+        sec_names = set(config.sections())
+        self.assertIn('Arvados', sec_names)
+        self.assertIn('Daemon', sec_names)
+        self.assertFalse(any(name.startswith('Size ') for name in sec_names))
+
+    def test_list_sizes(self):
+        config = self.load_config()
+        client = config.new_cloud_client()
+        sizes = config.node_sizes(client.list_sizes())
+        self.assertEqual(1, len(sizes))
+        size, kwargs = sizes[0]
+        self.assertEqual('Small', size.name)
+        self.assertEqual(1, kwargs['cores'])
+
+    def test_shutdown_windows(self):
+        config = self.load_config()
+        self.assertEqual([52, 6, 2], config.shutdown_windows())
+
+    def test_log_levels(self):
+        config = self.load_config()
+        self.assertEqual({'level': logging.DEBUG,
+                          'testlogger': logging.INFO},
+                         config.log_levels())
+
+    def check_dispatch_classes(self, config, module):
+        setup, shutdown, update, monitor = config.dispatch_classes()
+        self.assertIs(setup, module.ComputeNodeSetupActor)
+        self.assertIs(shutdown, module.ComputeNodeShutdownActor)
+        self.assertIs(update, module.ComputeNodeUpdateActor)
+        self.assertIs(monitor, module.ComputeNodeMonitorActor)
+
+    def test_default_dispatch(self):
+        config = self.load_config()
+        self.check_dispatch_classes(config, dispatch)
+
+    def test_custom_dispatch(self):
+        config = self.load_config(
+            config_str=self.TEST_CONFIG + "[Daemon]\ndispatcher=slurm\n")
+        self.check_dispatch_classes(config, slurm_dispatch)
diff --git a/services/nodemanager/tests/test_daemon.py b/services/nodemanager/tests/test_daemon.py
new file mode 100644 (file)
index 0000000..96fcde9
--- /dev/null
@@ -0,0 +1,391 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import mock
+import pykka
+
+import arvnodeman.daemon as nmdaemon
+from arvnodeman.computenode.dispatch import ComputeNodeMonitorActor
+from . import testutil
+
+class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
+                                     unittest.TestCase):
+    def new_setup_proxy(self):
+        # Make sure that every time the daemon starts a setup actor,
+        # it gets a new mock object back.
+        self.last_setup = mock.MagicMock(name='setup_proxy_mock')
+        return self.last_setup
+
+    def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[],
+                    min_size=testutil.MockSize(1), min_nodes=0, max_nodes=8):
+        for name in ['cloud_nodes', 'arvados_nodes', 'server_wishlist']:
+            setattr(self, name + '_poller', mock.MagicMock(name=name + '_mock'))
+        self.arv_factory = mock.MagicMock(name='arvados_mock')
+        self.cloud_factory = mock.MagicMock(name='cloud_mock')
+        self.cloud_factory().node_start_time.return_value = time.time()
+        self.cloud_updates = mock.MagicMock(name='updates_mock')
+        self.timer = testutil.MockTimer(deliver_immediately=False)
+        self.node_setup = mock.MagicMock(name='setup_mock')
+        self.node_setup.start().proxy.side_effect = self.new_setup_proxy
+        self.node_setup.reset_mock()
+        self.node_shutdown = mock.MagicMock(name='shutdown_mock')
+        self.daemon = nmdaemon.NodeManagerDaemonActor.start(
+            self.server_wishlist_poller, self.arvados_nodes_poller,
+            self.cloud_nodes_poller, self.cloud_updates, self.timer,
+            self.arv_factory, self.cloud_factory,
+            [54, 5, 1], min_size, min_nodes, max_nodes, 600, 1800, 3600,
+            self.node_setup, self.node_shutdown).proxy()
+        if cloud_nodes is not None:
+            self.daemon.update_cloud_nodes(cloud_nodes).get(self.TIMEOUT)
+        if arvados_nodes is not None:
+            self.daemon.update_arvados_nodes(arvados_nodes).get(self.TIMEOUT)
+        if want_sizes is not None:
+            self.daemon.update_server_wishlist(want_sizes).get(self.TIMEOUT)
+
+    def monitor_list(self):
+        return pykka.ActorRegistry.get_by_class(ComputeNodeMonitorActor)
+
+    def alive_monitor_count(self):
+        return sum(1 for actor in self.monitor_list() if actor.is_alive())
+
+    def assertShutdownCancellable(self, expected=True):
+        self.assertTrue(self.node_shutdown.start.called)
+        self.assertIs(expected,
+                      self.node_shutdown.start.call_args[1]['cancellable'],
+                      "ComputeNodeShutdownActor incorrectly cancellable")
+
+    def test_easy_node_creation(self):
+        size = testutil.MockSize(1)
+        self.make_daemon(want_sizes=[size])
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_setup.start.called)
+
+    def check_monitors_arvados_nodes(self, *arv_nodes):
+        pairings = [monitor.proxy().arvados_node
+                    for monitor in self.monitor_list() if monitor.is_alive()]
+        self.assertItemsEqual(arv_nodes, pykka.get_all(pairings, self.TIMEOUT))
+
+    def test_node_pairing(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        arv_node = testutil.arvados_node_mock(1)
+        self.make_daemon([cloud_node], [arv_node])
+        self.stop_proxy(self.daemon)
+        self.check_monitors_arvados_nodes(arv_node)
+
+    def test_node_pairing_after_arvados_update(self):
+        cloud_node = testutil.cloud_node_mock(2)
+        self.make_daemon([cloud_node],
+                         [testutil.arvados_node_mock(2, ip_address=None)])
+        arv_node = testutil.arvados_node_mock(2)
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.check_monitors_arvados_nodes(arv_node)
+
+    def test_arvados_node_un_and_re_paired(self):
+        arv_node = testutil.arvados_node_mock(3)
+        self.make_daemon([testutil.cloud_node_mock(3)], [arv_node])
+        self.check_monitors_arvados_nodes(arv_node)
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.assertEqual(0, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([testutil.cloud_node_mock(3)])
+        self.stop_proxy(self.daemon)
+        self.check_monitors_arvados_nodes(arv_node)
+
+    def test_old_arvados_node_not_double_assigned(self):
+        arv_node = testutil.arvados_node_mock(3, age=9000)
+        size = testutil.MockSize(3)
+        self.make_daemon(arvados_nodes=[arv_node])
+        self.daemon.update_server_wishlist([size]).get(self.TIMEOUT)
+        self.daemon.update_server_wishlist([size, size]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        used_nodes = [call[1].get('arvados_node')
+                      for call in self.node_setup.start.call_args_list]
+        self.assertEqual(2, len(used_nodes))
+        self.assertIn(arv_node, used_nodes)
+        self.assertIn(None, used_nodes)
+
+    def test_node_count_satisfied(self):
+        self.make_daemon([testutil.cloud_node_mock()],
+                         want_sizes=[testutil.MockSize(1)])
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_setup.called)
+
+    def test_booting_nodes_counted(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        arv_node = testutil.arvados_node_mock(1)
+        server_wishlist = [testutil.MockSize(1)] * 2
+        self.make_daemon([cloud_node], [arv_node], server_wishlist)
+        self.daemon.max_nodes.get(self.TIMEOUT)
+        self.assertTrue(self.node_setup.start.called)
+        self.daemon.update_server_wishlist(server_wishlist).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(1, self.node_setup.start.call_count)
+
+    def test_boot_new_node_when_all_nodes_busy(self):
+        arv_node = testutil.arvados_node_mock(2, job_uuid=True)
+        self.make_daemon([testutil.cloud_node_mock(2)], [arv_node],
+                         [testutil.MockSize(2)])
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_setup.start.called)
+
+    def test_boot_new_node_below_min_nodes(self):
+        min_size = testutil.MockSize(1)
+        wish_size = testutil.MockSize(3)
+        self.make_daemon([], [], None, min_size=min_size, min_nodes=2)
+        self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual([wish_size, min_size],
+                         [call[1].get('cloud_size')
+                          for call in self.node_setup.start.call_args_list])
+
+    def test_no_new_node_when_ge_min_nodes_busy(self):
+        cloud_nodes = [testutil.cloud_node_mock(n) for n in range(1, 4)]
+        arv_nodes = [testutil.arvados_node_mock(n, job_uuid=True)
+                     for n in range(1, 4)]
+        self.make_daemon(cloud_nodes, arv_nodes, [], min_nodes=2)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(0, self.node_setup.start.call_count)
+
+    def test_no_new_node_when_max_nodes_busy(self):
+        self.make_daemon([testutil.cloud_node_mock(3)],
+                         [testutil.arvados_node_mock(3, job_uuid=True)],
+                         [testutil.MockSize(3)],
+                         max_nodes=1)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_setup.start.called)
+
+    def start_node_boot(self, cloud_node=None, arv_node=None, id_num=1):
+        if cloud_node is None:
+            cloud_node = testutil.cloud_node_mock(id_num)
+        if arv_node is None:
+            arv_node = testutil.arvados_node_mock(id_num)
+        self.make_daemon(want_sizes=[testutil.MockSize(id_num)])
+        self.daemon.max_nodes.get(self.TIMEOUT)
+        self.assertEqual(1, self.node_setup.start.call_count)
+        self.last_setup.cloud_node.get.return_value = cloud_node
+        self.last_setup.arvados_node.get.return_value = arv_node
+        return self.last_setup
+
+    def test_no_duplication_when_booting_node_listed_fast(self):
+        # Test that we don't start two ComputeNodeMonitorActors when
+        # we learn about a booting node through a listing before we
+        # get the "node up" message from CloudNodeSetupActor.
+        cloud_node = testutil.cloud_node_mock(1)
+        setup = self.start_node_boot(cloud_node)
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+
+    def test_no_duplication_when_booted_node_listed(self):
+        cloud_node = testutil.cloud_node_mock(2)
+        setup = self.start_node_boot(cloud_node, id_num=2)
+        self.daemon.node_up(setup)
+        self.daemon.update_cloud_nodes([cloud_node]).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+
+    def test_node_counted_after_boot_with_slow_listing(self):
+        # Test that, after we boot a compute node, we assume it exists
+        # even it doesn't appear in the listing (e.g., because of delays
+        # propagating tags).
+        setup = self.start_node_boot()
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+
+    def test_booted_unlisted_node_counted(self):
+        setup = self.start_node_boot(id_num=1)
+        self.daemon.node_up(setup)
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(1)]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertEqual(1, self.node_setup.start.call_count)
+
+    def test_booted_node_can_shutdown(self):
+        setup = self.start_node_boot()
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.update_server_wishlist([])
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_shutdown.start.called,
+                        "daemon did not shut down booted node on offer")
+
+    def test_booted_node_lifecycle(self):
+        cloud_node = testutil.cloud_node_mock(6)
+        setup = self.start_node_boot(cloud_node, id_num=6)
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.update_server_wishlist([])
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertShutdownCancellable(True)
+        shutdown = self.node_shutdown.start().proxy()
+        shutdown.cloud_node.get.return_value = cloud_node
+        self.daemon.node_finished_shutdown(shutdown).get(self.TIMEOUT)
+        self.assertTrue(shutdown.stop.called,
+                        "shutdown actor not stopped after finishing")
+        self.assertTrue(monitor.actor_ref.actor_stopped.wait(self.TIMEOUT),
+                        "monitor for booted node not stopped after shutdown")
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(2)]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_setup.start.called,
+                        "second node not started after booted node stopped")
+
+    def test_booted_node_shut_down_when_never_listed(self):
+        setup = self.start_node_boot()
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.assertFalse(self.node_shutdown.start.called)
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertShutdownCancellable(False)
+
+    def test_booted_node_shut_down_when_never_paired(self):
+        cloud_node = testutil.cloud_node_mock(2)
+        setup = self.start_node_boot(cloud_node)
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertShutdownCancellable(False)
+
+    def test_node_that_pairs_not_considered_failed_boot(self):
+        cloud_node = testutil.cloud_node_mock(3)
+        arv_node = testutil.arvados_node_mock(3)
+        setup = self.start_node_boot(cloud_node, arv_node)
+        self.daemon.node_up(setup).get(self.TIMEOUT)
+        self.assertEqual(1, self.alive_monitor_count())
+        self.daemon.update_cloud_nodes([cloud_node])
+        self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_booting_nodes_shut_down(self):
+        self.make_daemon(want_sizes=[testutil.MockSize(1)])
+        self.daemon.update_server_wishlist([]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.last_setup.stop_if_no_cloud_node.called)
+
+    def test_shutdown_declined_at_wishlist_capacity(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        size = testutil.MockSize(1)
+        self.make_daemon(cloud_nodes=[cloud_node], want_sizes=[size])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_shutdown_declined_below_min_nodes(self):
+        cloud_node = testutil.cloud_node_mock(1)
+        self.make_daemon(cloud_nodes=[cloud_node], min_nodes=1)
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_shutdown_accepted_below_capacity(self):
+        self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_shutdown.start.called)
+
+    def test_shutdown_declined_when_idle_and_job_queued(self):
+        cloud_nodes = [testutil.cloud_node_mock(n) for n in [3, 4]]
+        arv_nodes = [testutil.arvados_node_mock(3, job_uuid=True),
+                     testutil.arvados_node_mock(4, job_uuid=None)]
+        self.make_daemon(cloud_nodes, arv_nodes, [testutil.MockSize(1)])
+        self.assertEqual(2, self.alive_monitor_count())
+        for mon_ref in self.monitor_list():
+            monitor = mon_ref.proxy()
+            if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
+                break
+        else:
+            self.fail("monitor for idle node not found")
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_shutdown.start.called)
+
+    def test_node_shutdown_after_cancelled_shutdown(self):
+        cloud_node = testutil.cloud_node_mock(5)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(5)])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        shutdown_proxy = self.node_shutdown.start().proxy
+        shutdown_proxy().cloud_node.get.return_value = cloud_node
+        shutdown_proxy().success.get.return_value = False
+        shutdown_proxy.reset_mock()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(shutdown_proxy.called)
+        self.daemon.node_finished_shutdown(shutdown_proxy()).get(self.TIMEOUT)
+        shutdown_proxy().success.get.return_value = True
+        shutdown_proxy.reset_mock()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(shutdown_proxy.called)
+
+    def test_nodes_shutting_down_replaced_below_max_nodes(self):
+        cloud_node = testutil.cloud_node_mock(6)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(6)])
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(self.node_shutdown.start.called)
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(6)]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertTrue(self.node_setup.start.called)
+
+    def test_nodes_shutting_down_not_replaced_at_max_nodes(self):
+        cloud_node = testutil.cloud_node_mock(7)
+        self.make_daemon([cloud_node], [testutil.arvados_node_mock(7)],
+                         max_nodes=1)
+        self.assertEqual(1, self.alive_monitor_count())
+        monitor = self.monitor_list()[0].proxy()
+        self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+        self.assertTrue(self.node_shutdown.start.called)
+        self.daemon.update_server_wishlist(
+            [testutil.MockSize(7)]).get(self.TIMEOUT)
+        self.stop_proxy(self.daemon)
+        self.assertFalse(self.node_setup.start.called)
+
+    def test_nodes_shutting_down_count_against_excess(self):
+        cloud_nodes = [testutil.cloud_node_mock(n) for n in [8, 9]]
+        arv_nodes = [testutil.arvados_node_mock(n) for n in [8, 9]]
+        self.make_daemon(cloud_nodes, arv_nodes, [testutil.MockSize(8)])
+        self.assertEqual(2, self.alive_monitor_count())
+        for mon_ref in self.monitor_list():
+            self.daemon.node_can_shutdown(mon_ref.proxy()).get(self.TIMEOUT)
+        self.assertEqual(1, self.node_shutdown.start.call_count)
+
+    def test_clean_shutdown_waits_for_node_setup_finish(self):
+        new_node = self.start_node_boot()
+        self.daemon.shutdown().get(self.TIMEOUT)
+        self.assertTrue(new_node.stop_if_no_cloud_node.called)
+        self.daemon.node_up(new_node).get(self.TIMEOUT)
+        self.assertTrue(new_node.stop.called)
+        self.timer.deliver()
+        self.assertTrue(
+            self.daemon.actor_ref.actor_stopped.wait(self.TIMEOUT))
+
+    def test_wishlist_ignored_after_shutdown(self):
+        size = testutil.MockSize(2)
+        self.make_daemon(want_sizes=[size])
+        self.daemon.shutdown().get(self.TIMEOUT)
+        self.daemon.update_server_wishlist([size] * 2).get(self.TIMEOUT)
+        self.timer.deliver()
+        self.stop_proxy(self.daemon)
+        self.assertEqual(1, self.node_setup.start.call_count)
diff --git a/services/nodemanager/tests/test_jobqueue.py b/services/nodemanager/tests/test_jobqueue.py
new file mode 100644 (file)
index 0000000..4c97aed
--- /dev/null
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import unittest
+
+import arvnodeman.jobqueue as jobqueue
+from . import testutil
+
+class ServerCalculatorTestCase(unittest.TestCase):
+    def make_calculator(self, factors, **kwargs):
+        return jobqueue.ServerCalculator(
+            [(testutil.MockSize(n), {'cores': n}) for n in factors], **kwargs)
+
+    def calculate(self, servcalc, *constraints):
+        return servcalc.servers_for_queue(
+            [{'uuid': 'zzzzz-jjjjj-{:015x}'.format(index),
+              'runtime_constraints': cdict}
+             for index, cdict in enumerate(constraints)])
+
+    def test_empty_queue_needs_no_servers(self):
+        servcalc = self.make_calculator([1])
+        self.assertEqual([], servcalc.servers_for_queue([]))
+
+    def test_easy_server_count(self):
+        servcalc = self.make_calculator([1])
+        servlist = self.calculate(servcalc, {'min_nodes': 3})
+        self.assertEqual(3, len(servlist))
+
+    def test_implicit_server_count(self):
+        servcalc = self.make_calculator([1])
+        servlist = self.calculate(servcalc, {}, {'min_nodes': 3})
+        self.assertEqual(4, len(servlist))
+
+    def test_bad_min_nodes_override(self):
+        servcalc = self.make_calculator([1])
+        servlist = self.calculate(servcalc,
+                                  {'min_nodes': -2}, {'min_nodes': 'foo'})
+        self.assertEqual(2, len(servlist))
+
+    def test_ignore_unsatisfiable_jobs(self):
+        servcalc = self.make_calculator([1], max_nodes=9)
+        servlist = self.calculate(servcalc,
+                                  {'min_cores_per_node': 2},
+                                  {'min_ram_mb_per_node': 256},
+                                  {'min_nodes': 6},
+                                  {'min_nodes': 12},
+                                  {'min_scratch_mb_per_node': 200})
+        self.assertEqual(6, len(servlist))
+
+    def test_job_requesting_max_nodes_accepted(self):
+        servcalc = self.make_calculator([1], max_nodes=4)
+        servlist = self.calculate(servcalc, {'min_nodes': 4})
+        self.assertEqual(4, len(servlist))
+
+    def test_cheapest_size(self):
+        servcalc = self.make_calculator([2, 4, 1, 3])
+        self.assertEqual(testutil.MockSize(1), servcalc.cheapest_size())
+
+
+class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                   unittest.TestCase):
+    TEST_CLASS = jobqueue.JobQueueMonitorActor
+
+    class MockCalculator(object):
+        @staticmethod
+        def servers_for_queue(queue):
+            return [testutil.MockSize(n) for n in queue]
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(JobQueueMonitorActorTestCase, self).build_monitor(*args, **kwargs)
+        self.client.jobs().queue().execute.side_effect = side_effect
+
+    def test_subscribers_get_server_lists(self):
+        self.build_monitor([{'items': [1, 2]}], self.MockCalculator())
+        self.monitor.subscribe(self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with([testutil.MockSize(1),
+                                            testutil.MockSize(2)])
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/services/nodemanager/tests/test_nodelist.py b/services/nodemanager/tests/test_nodelist.py
new file mode 100644 (file)
index 0000000..5346e7a
--- /dev/null
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import unittest
+
+import arvnodeman.nodelist as nodelist
+from . import testutil
+
+class ArvadosNodeListMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                          unittest.TestCase):
+    TEST_CLASS = nodelist.ArvadosNodeListMonitorActor
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(ArvadosNodeListMonitorActorTestCase, self).build_monitor(
+            *args, **kwargs)
+        self.client.nodes().list().execute.side_effect = side_effect
+
+    def test_uuid_is_subscription_key(self):
+        node = testutil.arvados_node_mock()
+        self.build_monitor([{'items': [node]}])
+        self.monitor.subscribe_to(node['uuid'],
+                                  self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with(node)
+
+
+class CloudNodeListMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
+                                        unittest.TestCase):
+    TEST_CLASS = nodelist.CloudNodeListMonitorActor
+
+    class MockNode(object):
+        def __init__(self, count):
+            self.id = str(count)
+            self.name = 'test{}.example.com'.format(count)
+            self.private_ips = ['10.0.0.{}'.format(count)]
+            self.public_ips = []
+            self.size = None
+            self.state = 0
+
+
+    def build_monitor(self, side_effect, *args, **kwargs):
+        super(CloudNodeListMonitorActorTestCase, self).build_monitor(
+            *args, **kwargs)
+        self.client.list_nodes.side_effect = side_effect
+
+    def test_id_is_subscription_key(self):
+        node = self.MockNode(1)
+        self.build_monitor([[node]])
+        self.monitor.subscribe_to('1', self.subscriber).get(self.TIMEOUT)
+        self.stop_proxy(self.monitor)
+        self.subscriber.assert_called_with(node)
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/services/nodemanager/tests/test_timedcallback.py b/services/nodemanager/tests/test_timedcallback.py
new file mode 100644 (file)
index 0000000..1d1e6c3
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import time
+import unittest
+
+import mock
+import pykka
+
+import arvnodeman.timedcallback as timedcallback
+from . import testutil
+
+@testutil.no_sleep
+class TimedCallBackActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
+    def test_immediate_turnaround(self):
+        receiver = mock.Mock()
+        deliverer = timedcallback.TimedCallBackActor.start().proxy()
+        deliverer.schedule(time.time() - 1, receiver,
+                           'immediate').get(self.TIMEOUT)
+        self.stop_proxy(deliverer)
+        receiver.assert_called_with('immediate')
+
+    def test_delayed_turnaround(self):
+        receiver = mock.Mock()
+        with mock.patch('time.time', return_value=0) as mock_now:
+            deliverer = timedcallback.TimedCallBackActor.start().proxy()
+            deliverer.schedule(1, receiver, 'delayed')
+            deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+            self.assertFalse(receiver.called)
+            mock_now.return_value = 2
+            deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+            self.stop_proxy(deliverer)
+        receiver.assert_called_with('delayed')
+
+    def test_out_of_order_scheduling(self):
+        receiver = mock.Mock()
+        with mock.patch('time.time', return_value=1.5) as mock_now:
+            deliverer = timedcallback.TimedCallBackActor.start().proxy()
+            deliverer.schedule(2, receiver, 'second')
+            deliverer.schedule(1, receiver, 'first')
+            deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+            receiver.assert_called_with('first')
+            mock_now.return_value = 2.5
+            deliverer.schedule(3, receiver, 'failure').get(self.TIMEOUT)
+            self.stop_proxy(deliverer)
+        receiver.assert_called_with('second')
+
+    def test_dead_actors_ignored(self):
+        receiver = mock.Mock(name='dead_actor', spec=pykka.ActorRef)
+        receiver.tell.side_effect = pykka.ActorDeadError
+        deliverer = timedcallback.TimedCallBackActor.start().proxy()
+        deliverer.schedule(time.time() - 1, receiver.tell,
+                           'error').get(self.TIMEOUT)
+        self.assertTrue(self.stop_proxy(deliverer), "deliverer died")
+        receiver.tell.assert_called_with('error')
+
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/services/nodemanager/tests/testutil.py b/services/nodemanager/tests/testutil.py
new file mode 100644 (file)
index 0000000..30808ac
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import, print_function
+
+import threading
+import time
+
+import mock
+import pykka
+
+from . import pykka_timeout
+
+no_sleep = mock.patch('time.sleep', lambda n: None)
+
+def arvados_node_mock(node_num=99, job_uuid=None, age=0, **kwargs):
+    if job_uuid is True:
+        job_uuid = 'zzzzz-jjjjj-jobjobjobjobjob'
+    slurm_state = 'idle' if (job_uuid is None) else 'alloc'
+    node = {'uuid': 'zzzzz-yyyyy-{:015x}'.format(node_num),
+            'created_at': '2014-01-01T01:02:03Z',
+            'modified_at': time.strftime('%Y-%m-%dT%H:%M:%SZ',
+                                         time.gmtime(time.time() - age)),
+            'hostname': 'compute{}'.format(node_num),
+            'domain': 'zzzzz.arvadosapi.com',
+            'ip_address': ip_address_mock(node_num),
+            'job_uuid': job_uuid,
+            'info': {'slurm_state': slurm_state}}
+    node.update(kwargs)
+    return node
+
+def cloud_node_mock(node_num=99):
+    node = mock.NonCallableMagicMock(
+        ['id', 'name', 'state', 'public_ips', 'private_ips', 'driver', 'size',
+         'image', 'extra'],
+        name='cloud_node')
+    node.id = str(node_num)
+    node.name = node.id
+    node.public_ips = []
+    node.private_ips = [ip_address_mock(node_num)]
+    return node
+
+def ip_address_mock(last_octet):
+    return '10.20.30.{}'.format(last_octet)
+
+class MockShutdownTimer(object):
+    def _set_state(self, is_open, next_opening):
+        self.window_open = lambda: is_open
+        self.next_opening = lambda: next_opening
+
+
+class MockSize(object):
+    def __init__(self, factor):
+        self.id = 'z{}.test'.format(factor)
+        self.name = self.id
+        self.ram = 128 * factor
+        self.disk = 100 * factor
+        self.bandwidth = 16 * factor
+        self.price = float(factor)
+        self.extra = {}
+
+    def __eq__(self, other):
+        return self.id == other.id
+
+
+class MockTimer(object):
+    def __init__(self, deliver_immediately=True):
+        self.deliver_immediately = deliver_immediately
+        self.messages = []
+        self.lock = threading.Lock()
+
+    def deliver(self):
+        with self.lock:
+            to_deliver = self.messages
+            self.messages = []
+        for callback, args, kwargs in to_deliver:
+            callback(*args, **kwargs)
+
+    def schedule(self, want_time, callback, *args, **kwargs):
+        with self.lock:
+            self.messages.append((callback, args, kwargs))
+        if self.deliver_immediately:
+            self.deliver()
+
+
+class ActorTestMixin(object):
+    FUTURE_CLASS = pykka.ThreadingFuture
+    TIMEOUT = pykka_timeout
+
+    def tearDown(self):
+        pykka.ActorRegistry.stop_all()
+
+    def stop_proxy(self, proxy):
+        return proxy.actor_ref.stop(timeout=self.TIMEOUT)
+
+    def wait_for_assignment(self, proxy, attr_name, unassigned=None,
+                            timeout=TIMEOUT):
+        deadline = time.time() + timeout
+        while True:
+            loop_timeout = deadline - time.time()
+            if loop_timeout <= 0:
+                self.fail("actor did not assign {} in time".format(attr_name))
+            result = getattr(proxy, attr_name).get(loop_timeout)
+            if result is not unassigned:
+                return result
+
+
+class RemotePollLoopActorTestMixin(ActorTestMixin):
+    def build_monitor(self, *args, **kwargs):
+        self.timer = mock.MagicMock(name='timer_mock')
+        self.client = mock.MagicMock(name='client_mock')
+        self.subscriber = mock.Mock(name='subscriber_mock')
+        self.monitor = self.TEST_CLASS.start(
+            self.client, self.timer, *args, **kwargs).proxy()