Merge branch '15317-metrics'
authorTom Clegg <tom@curii.com>
Tue, 12 Mar 2024 15:12:37 +0000 (11:12 -0400)
committerTom Clegg <tom@curii.com>
Tue, 12 Mar 2024 15:12:37 +0000 (11:12 -0400)
closes #15317

Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curii.com>

308 files changed:
.licenseignore
build/README
build/build-dev-docker-jobs-image.sh
build/get-package-version.sh
build/package-build-dockerfiles/Makefile
build/package-build-dockerfiles/centos7/Dockerfile [deleted file]
build/package-build-dockerfiles/debian10/Dockerfile [deleted file]
build/package-build-dockerfiles/debian11/Dockerfile
build/package-build-dockerfiles/debian12/Dockerfile
build/package-build-dockerfiles/rocky8/Dockerfile
build/package-build-dockerfiles/ubuntu1804/Dockerfile [deleted file]
build/package-build-dockerfiles/ubuntu2004/Dockerfile
build/package-build-dockerfiles/ubuntu2204/Dockerfile
build/package-test-dockerfiles/Makefile
build/package-test-dockerfiles/centos7/Dockerfile [deleted file]
build/package-test-dockerfiles/centos7/localrepo.repo [deleted file]
build/package-test-dockerfiles/debian10/Dockerfile [deleted file]
build/package-test-dockerfiles/ubuntu1804/Dockerfile [deleted file]
build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados [deleted file]
build/package-testing/common-test-rails-server-package.sh
build/package-testing/rpm-common-test-packages.sh
build/package-testing/test-package-python3-arvados-python-client.sh
build/run-build-docker-images.sh
build/run-build-packages-one-target.sh
build/run-build-packages.sh
build/run-build-test-packages-one-target.sh
build/run-library.sh
build/run-tests.sh
cmd/arvados-server/arvados-controller.service
cmd/arvados-server/arvados-dispatch-cloud.service
cmd/arvados-server/arvados-dispatch-lsf.service
cmd/arvados-server/arvados-git-httpd.service
cmd/arvados-server/arvados-health.service
cmd/arvados-server/arvados-ws.service
cmd/arvados-server/crunch-dispatch-slurm.service
cmd/arvados-server/keep-balance.service
cmd/arvados-server/keep-web.service
cmd/arvados-server/keepproxy.service
cmd/arvados-server/keepstore.service
doc/README.textile
doc/Rakefile
doc/_config.yml
doc/_includes/_container_runtime_constraints.liquid
doc/admin/upgrading.html.textile.liquid
doc/install/install-shell-server.html.textile.liquid
doc/pysdk_pdoc.py
doc/sdk/cli/index.html.textile.liquid
doc/sdk/cli/install.html.textile.liquid
doc/sdk/cli/reference.html.textile.liquid
doc/sdk/cli/subcommands.html.textile.liquid
doc/sdk/fuse/install.html.textile.liquid [new file with mode: 0644]
doc/sdk/fuse/options.html.textile.liquid [new file with mode: 0644]
doc/sdk/index.html.textile.liquid
doc/sdk/java-v2/example.html.textile.liquid
doc/sdk/java-v2/index.html.textile.liquid
doc/sdk/java-v2/javadoc.html.textile.liquid
doc/sdk/python/arvados-fuse.html.textile.liquid [deleted file]
doc/sdk/python/sdk-python.html.textile.liquid
doc/user/cwl/crunchstat-summary.html.textile.liquid
doc/user/cwl/cwl-extensions.html.textile.liquid
doc/user/cwl/cwl-run-options.html.textile.liquid
doc/user/cwl/images/crunchstat-summary-html.png
doc/user/getting_started/setup-cli.html.textile.liquid
doc/user/tutorials/wgs-tutorial.html.textile.liquid
docker/jobs/Dockerfile
lib/cloud/ec2/ec2.go
lib/cloud/ec2/ec2_test.go
lib/config/config.default.yml
lib/controller/integration_test.go
lib/controller/localdb/login_ldap_docker_test.sh
lib/crunchrun/integration_test.go
lib/install/arvados.service
lib/install/arvadostest_docker_build.sh
lib/install/deps.go
lib/install/example_from_scratch.sh
lib/pam/docker_test.go
sdk/R/install_deps.R
sdk/cli/arvados-cli.gemspec
sdk/cwl/arvados_cwl/__init__.py
sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
sdk/cwl/arvados_cwl/arvcontainer.py
sdk/cwl/arvados_cwl/context.py
sdk/cwl/arvados_cwl/executor.py
sdk/cwl/arvados_version.py
sdk/cwl/setup.py
sdk/cwl/test_with_arvbox.sh
sdk/cwl/tests/arvados-tests.sh
sdk/cwl/tests/arvados-tests.yml
sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt [new file with mode: 0644]
sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt [new file with mode: 0644]
sdk/cwl/tests/oom/19975-oom-mispelled.cwl [new file with mode: 0644]
sdk/cwl/tests/oom/19975-oom.cwl
sdk/cwl/tests/oom/19975-oom3.cwl
sdk/cwl/tests/test_container.py
sdk/dev-jobs.dockerfile
sdk/go/arvados/api.go
sdk/go/arvados/client.go
sdk/go/keepclient/keepclient.go
sdk/go/keepclient/keepclient_test.go
sdk/go/keepclient/support.go
sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java [new file with mode: 0644]
sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java
sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
sdk/python/arvados/commands/_util.py
sdk/python/arvados/commands/keepdocker.py
sdk/python/arvados/http_to_keep.py
sdk/python/setup.py
sdk/python/tests/data/hello-world-ManifestV2-OCILayout.tar [new file with mode: 0644]
sdk/python/tests/data/hello-world-ManifestV2.tar [new file with mode: 0644]
sdk/python/tests/data/hello-world-README.txt [new file with mode: 0644]
sdk/python/tests/fed-migrate/jenkins.sh
sdk/python/tests/test_arv_keepdocker.py
sdk/python/tests/test_cmd_util.py [new file with mode: 0644]
sdk/python/tests/test_keep_client.py
sdk/ruby-google-api-client/arvados-google-api-client.gemspec
sdk/ruby-google-api-client/lib/google/api_client/version.rb
sdk/ruby/arvados.gemspec
services/api/Gemfile.lock
services/api/app/models/user.rb
services/api/fpm-info.sh
services/api/test/fixtures/collections.yml
services/api/test/fixtures/groups.yml
services/api/test/functional/arvados/v1/users_controller_test.rb
services/crunch-dispatch-local/crunch-dispatch-local.service
services/dockercleaner/arvados-docker-cleaner.service
services/dockercleaner/setup.py
services/fuse/arvados_fuse/command.py
services/fuse/arvados_fuse/fusedir.py
services/fuse/setup.py
services/fuse/tests/mount_test_base.py
services/fuse/tests/test_mount.py
services/fuse/tests/test_mount_filters.py [new file with mode: 0644]
services/keep-balance/change_set.go
services/keep-balance/change_set_test.go
services/keepproxy/keepproxy_test.go
services/keepstore/azure_blob_volume.go
services/keepstore/azure_blob_volume_test.go
services/keepstore/bufferpool.go
services/keepstore/bufferpool_test.go
services/keepstore/collision.go [deleted file]
services/keepstore/collision_test.go [deleted file]
services/keepstore/command.go
services/keepstore/count.go
services/keepstore/gocheck_test.go [deleted file]
services/keepstore/handler_test.go [deleted file]
services/keepstore/handlers.go [deleted file]
services/keepstore/hashcheckwriter.go [new file with mode: 0644]
services/keepstore/keepstore.go
services/keepstore/keepstore_test.go [new file with mode: 0644]
services/keepstore/metrics.go
services/keepstore/metrics_test.go [new file with mode: 0644]
services/keepstore/mock_mutex_for_test.go [deleted file]
services/keepstore/mounts_test.go
services/keepstore/perms.go [deleted file]
services/keepstore/perms_test.go [deleted file]
services/keepstore/pipe_adapters.go [deleted file]
services/keepstore/proxy_remote.go [deleted file]
services/keepstore/proxy_remote_test.go
services/keepstore/pull_worker.go
services/keepstore/pull_worker_integration_test.go [deleted file]
services/keepstore/pull_worker_test.go
services/keepstore/putprogress.go [new file with mode: 0644]
services/keepstore/router.go [new file with mode: 0644]
services/keepstore/router_test.go [new file with mode: 0644]
services/keepstore/s3_volume.go [moved from services/keepstore/s3aws_volume.go with 75% similarity]
services/keepstore/s3_volume_test.go [moved from services/keepstore/s3aws_volume_test.go with 77% similarity]
services/keepstore/status_test.go [deleted file]
services/keepstore/streamwriterat.go [new file with mode: 0644]
services/keepstore/streamwriterat_test.go [new file with mode: 0644]
services/keepstore/trash_worker.go
services/keepstore/trash_worker_test.go
services/keepstore/unix_volume.go
services/keepstore/unix_volume_test.go
services/keepstore/volume.go
services/keepstore/volume_generic_test.go
services/keepstore/volume_test.go
services/keepstore/work_queue.go [deleted file]
services/keepstore/work_queue_test.go [deleted file]
services/workbench2/Makefile
services/workbench2/README.md
services/workbench2/cypress.config.ts [new file with mode: 0644]
services/workbench2/cypress.json [deleted file]
services/workbench2/cypress/e2e/banner-tooltip.cy.js [new file with mode: 0644]
services/workbench2/cypress/e2e/collection.cy.js [moved from services/workbench2/cypress/integration/collection.spec.js with 99% similarity]
services/workbench2/cypress/e2e/create-workflow.cy.js [moved from services/workbench2/cypress/integration/create-workflow.spec.js with 99% similarity]
services/workbench2/cypress/e2e/delete-multiple-files.cy.js [moved from services/workbench2/cypress/integration/delete-multiple-files.spec.js with 97% similarity]
services/workbench2/cypress/e2e/favorites.cy.js [moved from services/workbench2/cypress/integration/favorites.spec.js with 99% similarity]
services/workbench2/cypress/e2e/group-manage.cy.js [moved from services/workbench2/cypress/integration/group-manage.spec.js with 98% similarity]
services/workbench2/cypress/e2e/login.cy.js [moved from services/workbench2/cypress/integration/login.spec.js with 98% similarity]
services/workbench2/cypress/e2e/multiselect-toolbar.cy.js [moved from services/workbench2/cypress/integration/multiselect-toolbar.spec.js with 91% similarity]
services/workbench2/cypress/e2e/page-not-found.cy.js [moved from services/workbench2/cypress/integration/page-not-found.spec.js with 87% similarity]
services/workbench2/cypress/e2e/process.cy.js [moved from services/workbench2/cypress/integration/process.spec.js with 92% similarity]
services/workbench2/cypress/e2e/project.cy.js [moved from services/workbench2/cypress/integration/project.spec.js with 91% similarity]
services/workbench2/cypress/e2e/search.cy.js [moved from services/workbench2/cypress/integration/search.spec.js with 98% similarity]
services/workbench2/cypress/e2e/sharing.cy.js [moved from services/workbench2/cypress/integration/sharing.spec.js with 98% similarity]
services/workbench2/cypress/e2e/side-panel.cy.js [moved from services/workbench2/cypress/integration/side-panel.spec.js with 98% similarity]
services/workbench2/cypress/e2e/user-profile.cy.js [moved from services/workbench2/cypress/integration/user-profile.spec.js with 100% similarity]
services/workbench2/cypress/e2e/virtual-machine-admin.cy.js [moved from services/workbench2/cypress/integration/virtual-machine-admin.spec.js with 100% similarity]
services/workbench2/cypress/e2e/workflow.cy.js [moved from services/workbench2/cypress/integration/workflow.spec.js with 99% similarity]
services/workbench2/cypress/integration/banner-tooltip.spec.js [deleted file]
services/workbench2/cypress/support/commands.js
services/workbench2/cypress/support/e2e.js [moved from services/workbench2/cypress/support/index.js with 100% similarity]
services/workbench2/docker/Dockerfile
services/workbench2/package.json
services/workbench2/src/components/data-explorer/data-explorer.tsx
services/workbench2/src/components/data-table/data-table.test.tsx
services/workbench2/src/components/data-table/data-table.tsx
services/workbench2/src/components/default-view/default-view.tsx
services/workbench2/src/components/details-attribute/details-attribute.tsx
services/workbench2/src/components/icon/icon.tsx
services/workbench2/src/components/loading/inline-pulser.tsx [new file with mode: 0644]
services/workbench2/src/components/multi-panel-view/multi-panel-view.tsx
services/workbench2/src/components/multiselect-toolbar/MultiselectToolbar.tsx
services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-menu.tsx [new file with mode: 0644]
services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-wrapper.tsx [new file with mode: 0644]
services/workbench2/src/components/search-input/search-input.tsx
services/workbench2/src/components/subprocess-progress-bar/subprocess-progress-bar.tsx
services/workbench2/src/models/group.ts
services/workbench2/src/store/all-processes-panel/all-processes-panel-middleware-service.ts
services/workbench2/src/store/data-explorer/data-explorer-action.ts
services/workbench2/src/store/data-explorer/data-explorer-reducer.ts
services/workbench2/src/store/favorite-panel/favorite-panel-middleware-service.ts
services/workbench2/src/store/group-details-panel/group-details-panel-members-middleware-service.test.js [new file with mode: 0644]
services/workbench2/src/store/group-details-panel/group-details-panel-members-middleware-service.ts
services/workbench2/src/store/group-details-panel/group-details-panel-permissions-middleware-service.test.js [new file with mode: 0644]
services/workbench2/src/store/group-details-panel/group-details-panel-permissions-middleware-service.ts
services/workbench2/src/store/groups-panel/groups-panel-middleware-service.test.ts [new file with mode: 0644]
services/workbench2/src/store/groups-panel/groups-panel-middleware-service.ts
services/workbench2/src/store/multiselect/multiselect-reducer.tsx
services/workbench2/src/store/process-panel/process-panel-actions.ts
services/workbench2/src/store/process-panel/process-panel-reducer.ts
services/workbench2/src/store/process-panel/process-panel.ts
services/workbench2/src/store/processes/processes-middleware-service.ts [new file with mode: 0644]
services/workbench2/src/store/project-panel/project-panel-middleware-service.ts
services/workbench2/src/store/resource-type-filters/resource-type-filters.ts
services/workbench2/src/store/search-bar/search-bar-actions.ts
services/workbench2/src/store/search-bar/search-bar-reducer.ts
services/workbench2/src/store/search-results-panel/search-results-middleware-service.ts
services/workbench2/src/store/store.ts
services/workbench2/src/store/subprocess-panel/subprocess-panel-middleware-service.ts
services/workbench2/src/store/workbench/workbench-actions.ts
services/workbench2/src/store/workflow-panel/workflow-middleware-service.ts
services/workbench2/src/store/workflow-panel/workflow-panel-actions.ts
services/workbench2/src/views-components/context-menu/action-sets/project-action-set.ts
services/workbench2/src/views-components/data-explorer/data-explorer.tsx
services/workbench2/src/views-components/data-explorer/renderers.test.tsx
services/workbench2/src/views-components/data-explorer/renderers.tsx
services/workbench2/src/views/all-processes-panel/all-processes-panel.tsx
services/workbench2/src/views/groups-panel/groups-panel.tsx
services/workbench2/src/views/process-panel/process-io-card.test.tsx [new file with mode: 0644]
services/workbench2/src/views/process-panel/process-io-card.tsx
services/workbench2/src/views/process-panel/process-log-code-snippet.tsx
services/workbench2/src/views/process-panel/process-panel-root.tsx
services/workbench2/src/views/process-panel/process-panel.tsx
services/workbench2/src/views/process-panel/process-resource-card.tsx
services/workbench2/src/views/project-panel/project-panel.tsx
services/workbench2/src/views/search-results-panel/search-results-panel-view.tsx
services/workbench2/src/views/workflow-panel/registered-workflow-panel.tsx
services/workbench2/src/views/workflow-panel/workflow-processes-panel-root.tsx [new file with mode: 0644]
services/workbench2/src/views/workflow-panel/workflow-processes-panel.tsx [new file with mode: 0644]
services/workbench2/yarn.lock
tools/arvbox/lib/arvbox/docker/Dockerfile.base
tools/arvbox/lib/arvbox/docker/common.sh
tools/arvbox/lib/arvbox/docker/createusers.sh
tools/arvbox/lib/arvbox/docker/edit_users.py
tools/arvbox/lib/arvbox/docker/service/doc/run-service
tools/arvbox/lib/arvbox/docker/service/sdk/run-service
tools/arvbox/lib/arvbox/docker/service/vm/run-service
tools/arvbox/lib/arvbox/docker/yml_override.py
tools/compute-images/scripts/base.sh
tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh
tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions.sh
tools/crunchstat-summary/crunchstat_summary/command.py
tools/crunchstat-summary/crunchstat_summary/dygraphs.js
tools/crunchstat-summary/crunchstat_summary/reader.py
tools/crunchstat-summary/crunchstat_summary/summarizer.py
tools/crunchstat-summary/crunchstat_summary/webchart.py
tools/crunchstat-summary/setup.py
tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk.txt.gz.report
tools/crunchstat-summary/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt.gz.report
tools/crunchstat-summary/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt.gz.report
tools/crunchstat-summary/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y.txt.gz.report
tools/crunchstat-summary/tests/crunchstat_error_messages.txt
tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz [deleted file]
tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz.report [deleted file]
tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz [deleted file]
tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz.report [deleted file]
tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz [deleted file]
tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz.report [deleted file]
tools/crunchstat-summary/tests/test_examples.py
tools/keep-block-check/keep-block-check_test.go
tools/keep-rsync/keep-rsync_test.go
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls
tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls
tools/salt-install/config_examples/single_host/single_hostname/states/snakeoil_certs.sls
tools/salt-install/provision.sh
tools/user-activity/setup.py

index d7faa0c3f181ce5c1e110ce5a2e1175c2c00c048..1e1c12a53a79a2a46a0865bf863f8933691e3ba6 100644 (file)
@@ -53,6 +53,8 @@ sdk/cwl/tests/tool/blub.txt
 sdk/cwl/tests/19109-upload-secondary/*
 sdk/cwl/tests/federation/data/*
 sdk/cwl/tests/fake-keep-mount/fake_collection_dir/.arvados#collection
+sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt
+sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt
 sdk/go/manifest/testdata/*_manifest
 sdk/java/.classpath
 sdk/java/pom.xml
index 66ca5095524665b1537e140d7f8d0c3cfd9842f0..e6d14cf66404009c8a6ec6f44606fb6e40f5d5d2 100644 (file)
@@ -11,15 +11,14 @@ In order to build packages, you will need:
 Quickstart
 ==========
 
-Build and test all the packages for debian10 on your architecture by
+Build and test all the packages for a distribution on your architecture by
 running:
 
-    ./run-build-test-packages-one-target.sh
+    ./run-build-test-packages-one-target.sh --target DISTRO
 
-This will build package build and test Docker images for debian10, build all
-packages in a build container, then test all packages in a test container.
-
-Use a different distro by adding the `--target TARGET` option.
+This will build package build and test Docker images for the named target
+distribution, build all packages in a build container, then test all
+packages in a test container.
 
 Limit the build to a single architecture by adding the `--arch ARCH`
 option. Supported architectures are amd64 and arm64. Note cross-compilation
index b0990d0c49e6dd5e4335797ff0de193457671620..583b7a54f7e015d4d071c0b6c1042703c95327b6 100755 (executable)
@@ -30,7 +30,7 @@ fi
 context_dir="$(mktemp --directory --tmpdir dev-jobs.XXXXXXXX)"
 trap 'rm -rf "$context_dir"' EXIT INT TERM QUIT
 
-for src_dir in "$WORKSPACE/sdk/python" "${CWLTOOL:-}" "${CWL_UTILS:-}" "${SALAD:-}" "$WORKSPACE/sdk/cwl"; do
+for src_dir in "$WORKSPACE/sdk/python" "${CWLTOOL:-}" "${CWL_UTILS:-}" "${SALAD:-}" "$WORKSPACE/tools/crunchstat-summary" "$WORKSPACE/sdk/cwl"; do
     if [[ -z "$src_dir" ]]; then
         continue
     fi
index 5147f7bba6adc9bbd0795ad76f6b6e16a8fcb130..390b5dd828f456fa1e8aa0867968814295f888fd 100755 (executable)
@@ -44,12 +44,6 @@ elif [[ "$TYPE_LANG" == "python3" ]]; then
 
   rm -rf dist/*
 
-  # Get the latest setuptools
-  if ! pip3 install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U 'setuptools<45'; then
-    echo "Error, unable to upgrade setuptools with"
-    echo "  pip3 install $DASHQ_UNLESS_DEBUG $CACHE_FLAG -U 'setuptools<45'"
-    exit 1
-  fi
   # filter a useless warning (when building the cwltest package) from the stderr output
   if ! python3 setup.py $DASHQ_UNLESS_DEBUG sdist 2> >(grep -v 'warning: no previously-included files matching' |grep -v 'for version number calculation'); then
     echo "Error, unable to run python3 setup.py sdist for $SRC_PATH"
index 1d8066312ba54269429974390655ebe255367f8e..be27fffab75037f4095bd5b17464b603095871db 100644 (file)
@@ -4,16 +4,6 @@
 
 SHELL := '/bin/bash'
 
-all: centos7/generated
-centos7/generated: common-generated-all
-       test -d centos7/generated || mkdir centos7/generated
-       cp -f -rlt centos7/generated common-generated/*
-
-all: debian10/generated
-debian10/generated: common-generated-all
-       test -d debian10/generated || mkdir debian10/generated
-       cp -f -rlt debian10/generated common-generated/*
-
 all: debian11/generated
 debian11/generated: common-generated-all
        test -d debian11/generated || mkdir debian11/generated
@@ -29,11 +19,6 @@ rocky8/generated: common-generated-all
        test -d rocky8/generated || mkdir rocky8/generated
        cp -f -rlt rocky8/generated common-generated/*
 
-all: ubuntu1804/generated
-ubuntu1804/generated: common-generated-all
-       test -d ubuntu1804/generated || mkdir ubuntu1804/generated
-       cp -f -rlt ubuntu1804/generated common-generated/*
-
 all: ubuntu2004/generated
 ubuntu2004/generated: common-generated-all
        test -d ubuntu2004/generated || mkdir ubuntu2004/generated
diff --git a/build/package-build-dockerfiles/centos7/Dockerfile b/build/package-build-dockerfiles/centos7/Dockerfile
deleted file mode 100644 (file)
index 3c73ad9..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-ARG HOSTTYPE
-ARG BRANCH
-ARG GOVERSION
-
-FROM centos:7 as build_x86_64
-ONBUILD ARG BRANCH
-# Install go
-ONBUILD ARG GOVERSION
-ONBUILD ADD generated/go${GOVERSION}.linux-amd64.tar.gz /usr/local/
-ONBUILD RUN ln -s /usr/local/go/bin/go /usr/local/bin/
-# Install nodejs and npm
-ONBUILD ADD generated/node-v12.22.12-linux-x64.tar.xz /usr/local/
-ONBUILD RUN ln -s /usr/local/node-v12.22.12-linux-x64/bin/* /usr/local/bin/
-ONBUILD RUN npm install -g yarn
-ONBUILD RUN ln -sf /usr/local/node-v12.22.12-linux-x64/bin/* /usr/local/bin/
-
-FROM centos:7 as build_aarch64
-ONBUILD ARG BRANCH
-# Install go
-ONBUILD ARG GOVERSION
-ONBUILD ADD generated/go${GOVERSION}.linux-arm64.tar.gz /usr/local/
-ONBUILD RUN ln -s /usr/local/go/bin/go /usr/local/bin/
-# Install nodejs and npm
-ONBUILD ADD generated/node-v12.22.12-linux-arm64.tar.xz /usr/local/
-ONBUILD RUN ln -s /usr/local/node-v12.22.12-linux-arm64/bin/* /usr/local/bin/
-ONBUILD RUN npm install -g yarn
-ONBUILD RUN ln -sf /usr/local/node-v12.22.12-linux-arm64/bin/* /usr/local/bin/
-
-FROM build_${HOSTTYPE}
-
-MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-SHELL ["/bin/bash", "-c"]
-# Install dependencies.
-RUN yum -q -y install make automake gcc gcc-c++ libyaml-devel patch readline-devel zlib-devel libffi-devel openssl-devel bzip2 libtool bison sqlite-devel rpm-build git libattr-devel nss-devel libcurl-devel which tar unzip scl-utils centos-release-scl postgresql-devel fuse-devel xz-libs git wget pam-devel
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN gpg --import --no-tty /tmp/mpapis.asc && \
-    gpg --import --no-tty /tmp/pkuczynski.asc && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.7 -j $(grep -c processor /proc/cpuinfo) && \
-    /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
-    echo "gem: --no-document" >> ~/.gemrc && \
-    /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19 && \
-    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.15.1
-
-# Install Bash 4.4.12 // see https://dev.arvados.org/issues/15612
-RUN cd /usr/local/src \
-&& wget http://ftp.gnu.org/gnu/bash/bash-4.4.12.tar.gz \
-&& wget http://ftp.gnu.org/gnu/bash/bash-4.4.12.tar.gz.sig \
-&& tar xzf bash-4.4.12.tar.gz \
-&& cd bash-4.4.12 \
-&& ./configure --prefix=/usr/local/$( basename $( pwd ) ) \
-&& make \
-&& make install \
-&& ln -sf /usr/local/src/bash-4.4.12/bash /bin/bash
-
-# Need to "touch" RPM database to workaround bug in interaction between
-# overlayfs and yum (https://bugzilla.redhat.com/show_bug.cgi?id=1213602)
-RUN touch /var/lib/rpm/* && yum -q -y install python3 python3-pip python3-devel
-
-# Install virtualenv
-RUN /usr/bin/pip3 install 'virtualenv<20'
-
-RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a)
-# Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/
-ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)"
-
-# Preseed the go module cache and the ruby gems, using the currently checked
-# out branch of the source tree. This avoids potential compatibility issues
-# between the version of Ruby and certain gems.
-RUN git clone git://git.arvados.org/arvados.git /tmp/arvados && \
-    cd /tmp/arvados && \
-    if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \
-    cd /tmp/arvados/services/api && \
-    /usr/local/rvm/bin/rvm-exec default bundle install && \
-    cd /tmp/arvados && \
-    go mod download
-
-# The version of setuptools that comes with CentOS is way too old
-RUN pip3 install 'setuptools<45'
-
-ENV WORKSPACE /arvados
-CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "centos7"]
diff --git a/build/package-build-dockerfiles/debian10/Dockerfile b/build/package-build-dockerfiles/debian10/Dockerfile
deleted file mode 100644 (file)
index d8b3d17..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-ARG HOSTTYPE
-ARG BRANCH
-ARG GOVERSION
-
-## dont use debian:10 here since the word 'buster' is used for rvm precompiled binaries
-FROM debian:buster as build_x86_64
-ONBUILD ARG BRANCH
-# Install go
-ONBUILD ARG GOVERSION
-ONBUILD ADD generated/go${GOVERSION}.linux-amd64.tar.gz /usr/local/
-ONBUILD RUN ln -s /usr/local/go/bin/go /usr/local/bin/
-# Install nodejs and npm
-ONBUILD ADD generated/node-v12.22.12-linux-x64.tar.xz /usr/local/
-ONBUILD RUN ln -s /usr/local/node-v12.22.12-linux-x64/bin/* /usr/local/bin/
-ONBUILD RUN npm install -g yarn
-ONBUILD RUN ln -sf /usr/local/node-v12.22.12-linux-x64/bin/* /usr/local/bin/
-# No cross compilation support for debian10 because of https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=983477
-
-FROM debian:buster as build_aarch64
-ONBUILD ARG BRANCH
-# Install go
-ONBUILD ARG GOVERSION
-ONBUILD ADD generated/go${GOVERSION}.linux-arm64.tar.gz /usr/local/
-ONBUILD RUN ln -s /usr/local/go/bin/go /usr/local/bin/
-# Install nodejs and npm
-ONBUILD ADD generated/node-v12.22.12-linux-arm64.tar.xz /usr/local/
-ONBUILD RUN ln -s /usr/local/node-v12.22.12-linux-arm64/bin/* /usr/local/bin/
-ONBUILD RUN npm install -g yarn
-ONBUILD RUN ln -sf /usr/local/node-v12.22.12-linux-arm64/bin/* /usr/local/bin/
-
-FROM build_${HOSTTYPE}
-
-MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-SHELL ["/bin/bash", "-c"]
-# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev unzip python3-venv python3-dev libpam-dev equivs
-
-# Install virtualenv
-RUN /usr/bin/pip3 install 'virtualenv<20'
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN gpg --import --no-tty /tmp/mpapis.asc && \
-    gpg --import --no-tty /tmp/pkuczynski.asc && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.7 -j $(grep -c processor /proc/cpuinfo) && \
-    /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
-    echo "gem: --no-document" >> ~/.gemrc && \
-    /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19 && \
-    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.15.1
-
-RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a)
-# Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/
-ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)"
-
-# Preseed the go module cache and the ruby gems, using the currently checked
-# out branch of the source tree. This avoids potential compatibility issues
-# between the version of Ruby and certain gems.
-RUN git clone git://git.arvados.org/arvados.git /tmp/arvados && \
-    cd /tmp/arvados && \
-    if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \
-    cd /tmp/arvados/services/api && \
-    /usr/local/rvm/bin/rvm-exec default bundle install && \
-    cd /tmp/arvados && \
-    go mod download
-
-ENV WORKSPACE /arvados
-CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "debian10"]
index be818587b322ee6c2492a4de25fd55b48a540463..5ca7e1f2434ad4db50e6aa5b587aea5a93de1b15 100644 (file)
@@ -46,14 +46,12 @@ ENV DEBIAN_FRONTEND noninteractive
 
 SHELL ["/bin/bash", "-c"]
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev unzip python3-venv python3-dev libpam-dev equivs
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev unzip python3-venv python3-dev libpam-dev equivs
 
-# Install virtualenv
-RUN /usr/bin/pip3 install 'virtualenv<20'
-
-# Install RVM
 ADD generated/mpapis.asc /tmp/
 ADD generated/pkuczynski.asc /tmp/
+# fpm depends on dotenv, but version 3.0 of that gem dropped support for
+# Ruby 2.7, so we need to specifically install an older version.
 RUN gpg --import --no-tty /tmp/mpapis.asc && \
     gpg --import --no-tty /tmp/pkuczynski.asc && \
     curl -L https://get.rvm.io | bash -s stable && \
@@ -61,6 +59,7 @@ RUN gpg --import --no-tty /tmp/mpapis.asc && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
     echo "gem: --no-document" >> ~/.gemrc && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19 && \
+    /usr/local/rvm/bin/rvm-exec default gem install dotenv --version '~> 2.8' && \
     /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.15.1
 
 RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a)
index 859c1a8597dcc52b8670b79bb4a7b17a39cdb473..fa1d095e79fa74f23c134e9d657056b281543a8f 100644 (file)
@@ -44,7 +44,7 @@ ENV DEBIAN_FRONTEND noninteractive
 
 SHELL ["/bin/bash", "-c"]
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-setuptools python3-pip libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev unzip python3-venv python3-virtualenv python3-dev libpam-dev equivs
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 libcurl4-gnutls-dev curl git procps libattr1-dev libfuse-dev libgnutls28-dev libpq-dev unzip python3-venv python3-dev libpam-dev equivs
 
 # Install RVM
 ADD generated/mpapis.asc /tmp/
index 0eab1f5d36311a3e97a6e6d1d2e6cfa5d679250e..a1038a9b8860d185db3db08305b32fcd2e8a6a88 100644 (file)
@@ -68,9 +68,10 @@ RUN microdnf --assumeyes --enablerepo=devel install \
     xz-libs \
     zlib-devel
 
-# Install RVM
 ADD generated/mpapis.asc /tmp/
 ADD generated/pkuczynski.asc /tmp/
+# fpm depends on dotenv, but version 3.0 of that gem dropped support for
+# Ruby 2.7, so we need to specifically install an older version.
 RUN gpg --import --no-tty /tmp/mpapis.asc && \
     gpg --import --no-tty /tmp/pkuczynski.asc && \
     curl -L https://get.rvm.io | bash -s stable && \
@@ -78,6 +79,7 @@ RUN gpg --import --no-tty /tmp/mpapis.asc && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
     echo "gem: --no-document" >> ~/.gemrc && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19 && \
+    /usr/local/rvm/bin/rvm-exec default gem install dotenv --version '~> 2.8' && \
     /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.15.1
 
 RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a)
diff --git a/build/package-build-dockerfiles/ubuntu1804/Dockerfile b/build/package-build-dockerfiles/ubuntu1804/Dockerfile
deleted file mode 100644 (file)
index 4754adb..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-ARG HOSTTYPE
-ARG BRANCH
-ARG GOVERSION
-
-FROM ubuntu:bionic as build_x86_64
-ONBUILD ARG BRANCH
-# Install go
-ONBUILD ARG GOVERSION
-ONBUILD ADD generated/go${GOVERSION}.linux-amd64.tar.gz /usr/local/
-ONBUILD RUN ln -s /usr/local/go/bin/go /usr/local/bin/
-# Install nodejs and npm
-ONBUILD ADD generated/node-v12.22.12-linux-x64.tar.xz /usr/local/
-ONBUILD RUN ln -s /usr/local/node-v12.22.12-linux-x64/bin/* /usr/local/bin/
-ONBUILD RUN npm install -g yarn
-ONBUILD RUN ln -sf /usr/local/node-v12.22.12-linux-x64/bin/* /usr/local/bin/
-# No cross compilation support for ubuntu1804 because of https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=983477
-
-FROM ubuntu:bionic as build_aarch64
-ONBUILD ARG BRANCH
-# Install go
-ONBUILD ARG GOVERSION
-ONBUILD ADD generated/go${GOVERSION}.linux-arm64.tar.gz /usr/local/
-ONBUILD RUN ln -s /usr/local/go/bin/go /usr/local/bin/
-# Install nodejs and npm
-ONBUILD ADD generated/node-v12.22.12-linux-arm64.tar.xz /usr/local/
-ONBUILD RUN ln -s /usr/local/node-v12.22.12-linux-arm64/bin/* /usr/local/bin/
-ONBUILD RUN npm install -g yarn
-ONBUILD RUN ln -sf /usr/local/node-v12.22.12-linux-arm64/bin/* /usr/local/bin/
-
-FROM build_${HOSTTYPE}
-
-MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-SHELL ["/bin/bash", "-c"]
-# Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3.8 python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3.8-venv python3.8-dev libpam-dev equivs
-
-# Install virtualenv
-RUN /usr/bin/pip3 install 'virtualenv<20'
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN gpg --import --no-tty /tmp/mpapis.asc && \
-    gpg --import --no-tty /tmp/pkuczynski.asc && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.7 -j $(grep -c processor /proc/cpuinfo) && \
-    /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
-    echo "gem: --no-document" >> ~/.gemrc && \
-    /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19 && \
-    /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.15.1
-
-RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a)
-# Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/
-ENV MAKE "make --jobs $(grep -c processor /proc/cpuinfo)"
-
-# Preseed the go module cache and the ruby gems, using the currently checked
-# out branch of the source tree. This avoids potential compatibility issues
-# between the version of Ruby and certain gems.
-RUN git clone git://git.arvados.org/arvados.git /tmp/arvados && \
-    cd /tmp/arvados && \
-    if [[ -n "${BRANCH}" ]]; then git checkout ${BRANCH}; fi && \
-    cd /tmp/arvados/services/api && \
-    /usr/local/rvm/bin/rvm-exec default bundle install && \
-    cd /tmp/arvados && \
-    go mod download
-
-ENV WORKSPACE /arvados
-CMD ["/usr/local/rvm/bin/rvm-exec", "default", "bash", "/jenkins/run-build-packages.sh", "--target", "ubuntu1804"]
index a21f8a2ab9e641f62e7014582ec727b94ad2f41e..576b6021c0595a0dbd9a72ed147f6dc289e5810a 100644 (file)
@@ -50,14 +50,12 @@ ENV DEBIAN_FRONTEND noninteractive
 
 SHELL ["/bin/bash", "-c"]
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3-venv python3-dev libpam-dev shared-mime-info equivs
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3-venv python3-dev libpam-dev shared-mime-info equivs
 
-# Install virtualenv
-RUN /usr/bin/pip3 install 'virtualenv<20'
-
-# Install RVM
 ADD generated/mpapis.asc /tmp/
 ADD generated/pkuczynski.asc /tmp/
+# fpm depends on dotenv, but version 3.0 of that gem dropped support for
+# Ruby 2.7, so we need to specifically install an older version.
 RUN gpg --import --no-tty /tmp/mpapis.asc && \
     gpg --import --no-tty /tmp/pkuczynski.asc && \
     curl -L https://get.rvm.io | bash -s stable && \
@@ -65,6 +63,7 @@ RUN gpg --import --no-tty /tmp/mpapis.asc && \
     /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
     echo "gem: --no-document" >> ~/.gemrc && \
     /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19 && \
+    /usr/local/rvm/bin/rvm-exec default gem install dotenv --version '~> 2.8' && \
     /usr/local/rvm/bin/rvm-exec default gem install fpm --version 1.15.1
 
 RUN /usr/local/rvm/bin/rvm-exec default bundle config --global jobs $(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a)
index 533cdd8641886d381d312cc4f4f09d598e62fd88..79664fea6b3572dcfd68adfb15c504b3ce223807 100644 (file)
@@ -44,10 +44,7 @@ ENV DEBIAN_FRONTEND noninteractive
 
 SHELL ["/bin/bash", "-c"]
 # Install dependencies.
-RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 python3-pip libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3-venv python3-dev libpam-dev shared-mime-info equivs
-
-# Install virtualenv
-RUN /usr/bin/pip3 install 'virtualenv<20'
+RUN /usr/bin/apt-get update && /usr/bin/apt-get install -q -y python3 libcurl4-gnutls-dev libgnutls28-dev curl git libattr1-dev libfuse-dev libpq-dev unzip tzdata python3-venv python3-dev libpam-dev shared-mime-info equivs
 
 # Install RVM
 ADD generated/mpapis.asc /tmp/
index f5287c53ea1fbdf339e968617ad21bfc36cee781..02e2846a2a66dc8c0627b229297e70b02089decc 100644 (file)
@@ -2,16 +2,6 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-all: centos7/generated
-centos7/generated: common-generated-all
-       test -d centos7/generated || mkdir centos7/generated
-       cp -f -rlt centos7/generated common-generated/*
-
-all: debian10/generated
-debian10/generated: common-generated-all
-       test -d debian10/generated || mkdir debian10/generated
-       cp -f -rlt debian10/generated common-generated/*
-
 all: debian11/generated
 debian11/generated: common-generated-all
        test -d debian11/generated || mkdir debian11/generated
@@ -27,11 +17,6 @@ rocky8/generated: common-generated-all
        test -d rocky8/generated || mkdir rocky8/generated
        cp -f -rlt rocky8/generated common-generated/*
 
-all: ubuntu1804/generated
-ubuntu1804/generated: common-generated-all
-       test -d ubuntu1804/generated || mkdir ubuntu1804/generated
-       cp -f -rlt ubuntu1804/generated common-generated/*
-
 all: ubuntu2004/generated
 ubuntu2004/generated: common-generated-all
        test -d ubuntu2004/generated || mkdir ubuntu2004/generated
diff --git a/build/package-test-dockerfiles/centos7/Dockerfile b/build/package-test-dockerfiles/centos7/Dockerfile
deleted file mode 100644 (file)
index 1010ef8..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-FROM centos:7
-MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
-
-# Install dependencies.
-RUN yum -q -y install scl-utils centos-release-scl which tar wget
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN touch /var/lib/rpm/* && \
-    gpg --import --no-tty /tmp/mpapis.asc && \
-    gpg --import --no-tty /tmp/pkuczynski.asc && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.7 -j $(grep -c processor /proc/cpuinfo) && \
-    /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
-    /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.9
-
-# Install Bash 4.4.12  // see https://dev.arvados.org/issues/15612
-RUN cd /usr/local/src \
-&& wget http://ftp.gnu.org/gnu/bash/bash-4.4.12.tar.gz \
-&& wget http://ftp.gnu.org/gnu/bash/bash-4.4.12.tar.gz.sig \
-&& tar xzf bash-4.4.12.tar.gz \
-&& cd bash-4.4.12 \
-&& ./configure --prefix=/usr/local/$( basename $( pwd ) ) \
-&& make \
-&& make install \
-&& ln -sf /usr/local/src/bash-4.4.12/bash /bin/bash
-
-# Add epel, we need it for the python-pam dependency
-RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
-RUN rpm -ivh epel-release-latest-7.noarch.rpm
-
-COPY localrepo.repo /etc/yum.repos.d/localrepo.repo
diff --git a/build/package-test-dockerfiles/centos7/localrepo.repo b/build/package-test-dockerfiles/centos7/localrepo.repo
deleted file mode 100644 (file)
index ebb8765..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-[localrepo]
-name=Arvados Test
-baseurl=file:///arvados/packages/centos7
-gpgcheck=0
-enabled=1
diff --git a/build/package-test-dockerfiles/debian10/Dockerfile b/build/package-test-dockerfiles/debian10/Dockerfile
deleted file mode 100644 (file)
index e4b7993..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-FROM debian:buster
-MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-# Install dependencies
-RUN apt-get update && \
-    apt-get -y install --no-install-recommends curl ca-certificates gpg procps gpg-agent
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN gpg --import --no-tty /tmp/mpapis.asc && \
-    gpg --import --no-tty /tmp/pkuczynski.asc && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.7 -j $(grep -c processor /proc/cpuinfo) && \
-    /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
-    /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19
-
-# udev daemon can't start in a container, so don't try.
-RUN mkdir -p /etc/udev/disabled
-
-RUN echo "deb file:///arvados/packages/debian10/ /" >>/etc/apt/sources.list
diff --git a/build/package-test-dockerfiles/ubuntu1804/Dockerfile b/build/package-test-dockerfiles/ubuntu1804/Dockerfile
deleted file mode 100644 (file)
index 64894d7..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-FROM ubuntu:bionic
-MAINTAINER Arvados Package Maintainers <packaging@arvados.org>
-
-ENV DEBIAN_FRONTEND noninteractive
-
-# Install dependencies
-RUN apt-get update && \
-    apt-get -y install --no-install-recommends curl ca-certificates gnupg2
-
-# Install RVM
-ADD generated/mpapis.asc /tmp/
-ADD generated/pkuczynski.asc /tmp/
-RUN gpg --import --no-tty /tmp/mpapis.asc && \
-    gpg --import --no-tty /tmp/pkuczynski.asc && \
-    curl -L https://get.rvm.io | bash -s stable && \
-    /usr/local/rvm/bin/rvm install 2.7 -j $(grep -c processor /proc/cpuinfo) && \
-    /usr/local/rvm/bin/rvm alias create default ruby-2.7 && \
-    /usr/local/rvm/bin/rvm-exec default gem install bundler --version 2.2.19
-
-# udev daemon can't start in a container, so don't try.
-RUN mkdir -p /etc/udev/disabled
-
-RUN echo "deb [trusted=yes] file:///arvados/packages/ubuntu1804/ /" >>/etc/apt/sources.list
-
-# Add preferences file for the Arvados packages. This pins Arvados
-# packages at priority 501, so that older python dependency versions
-# are preferred in those cases where we need them
-ADD etc-apt-preferences.d-arvados /etc/apt/preferences.d/arvados
diff --git a/build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados b/build/package-test-dockerfiles/ubuntu1804/etc-apt-preferences.d-arvados
deleted file mode 100644 (file)
index 9e24695..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-Package: *
-Pin: release o=Arvados
-Pin-Priority: 501
index d50bd50bb50e79678204947839c74e99f41f1781..ee855d8012d6adccfb0670492f73152b5c78e5be 100755 (executable)
@@ -19,10 +19,6 @@ case "$TARGET" in
         apt-get install -y nginx
         dpkg-reconfigure "$PACKAGE_NAME"
         ;;
-    centos*)
-        yum install --assumeyes httpd
-        yum reinstall --assumeyes "$PACKAGE_NAME"
-        ;;
     rocky*)
         microdnf --assumeyes install httpd
         microdnf --assumeyes reinstall "$PACKAGE_NAME"
index fb13eff33cfdc33051d1d14c58fca95d16747dd5..cd41f1d920f9e787a3b5bd75fdfea5e6c83fd8ea 100755 (executable)
@@ -17,41 +17,16 @@ fi
 target="$(basename "$0" .sh)"
 target="${target##*-}"
 
-case "$target" in
-    centos*) yum -q clean all ;;
-    rocky*) microdnf --assumeyes clean all ;;
-esac
+microdnf --assumeyes clean all
 touch /var/lib/rpm/*
 
 export ARV_PACKAGES_DIR="/arvados/packages/$target"
 
 rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.before"
-
-case "$target" in
-    centos*) yum install --assumeyes -e 0 $1 ;;
-    rocky*) microdnf --assumeyes install $1 ;;
-esac
-
+microdnf --assumeyes install "$1"
 rpm -qa | sort > "$ARV_PACKAGES_DIR/$1.after"
-
 diff "$ARV_PACKAGES_DIR/$1".{before,after} >"$ARV_PACKAGES_DIR/$1.diff" || true
 
-# Enable any Software Collections that the package depended on.
-if [[ -d /opt/rh ]]; then
-    # We have to stage the list to a file, because `ls | while read` would
-    # make a subshell, causing the `source` lines to have no effect.
-    scl_list=$(mktemp)
-    ls /opt/rh >"$scl_list"
-
-    # SCL scripts aren't designed to run with -eu.
-    set +eu
-    while read scl; do
-        source scl_source enable "$scl"
-    done <"$scl_list"
-    set -eu
-    rm "$scl_list"
-fi
-
 mkdir -p /tmp/opts
 cd /tmp/opts
 
index 1e294fe0a8be0e4b67511e7f4648116f822f5562..71668d099c44c9161ea7d6cb0158d28081457c0d 100755 (executable)
@@ -7,9 +7,7 @@ set -e
 
 arv-put --version >/dev/null
 
-PYTHON=`ls /usr/share/python3*/dist/python3-arvados-python-client/bin/python3 |head -n1`
-
-$PYTHON << EOF
+/usr/lib/python3-arvados-python-client/bin/python <<EOF
 import arvados
 print("Successfully imported arvados")
 EOF
index 00ef2de417d77a230d5abad0975c4376317b5872..d7ee41743f99836b52fde679f2a0f818663515db 100755 (executable)
@@ -90,25 +90,28 @@ docker_push () {
 
     if [[ ! -z "$tags" ]]
     then
-        for tag in $( echo $tags|tr "," " " )
+        for tag in $(echo $tags|tr "," " " )
         do
              $DOCKER tag $1:$GITHEAD $1:$tag
         done
     fi
 
-    # Sometimes docker push fails; retry it a few times if necessary.
-    for i in `seq 1 5`; do
-        $DOCKER push $*
-        ECODE=$?
-        if [[ "$ECODE" == "0" ]]; then
-            break
-        fi
+    for tag in $(echo $tags|tr "," " " )
+    do
+       # Sometimes docker push fails; retry it a few times if necessary.
+       for i in `seq 1 5`; do
+             $DOCKER push $1:$tag
+             ECODE=$?
+             if [[ "$ECODE" == "0" ]]; then
+                break
+             fi
+       done
+
+       if [[ "$ECODE" != "0" ]]; then
+            title "!!!!!! docker push $1:$tag failed !!!!!!"
+            EXITCODE=$(($EXITCODE + $ECODE))
+       fi
     done
-
-    if [[ "$ECODE" != "0" ]]; then
-        title "!!!!!! docker push $* failed !!!!!!"
-        EXITCODE=$(($EXITCODE + $ECODE))
-    fi
 }
 
 timer_reset() {
index be97ef0d130e1c197bfbdf6d9cc4a2b79002998e..37fe7052413c95b118f97b7a390f5bbfb14dee0f 100755 (executable)
@@ -7,10 +7,10 @@ read -rd "\000" helpmessage <<EOF
 $(basename $0): Orchestrate run-build-packages.sh for one target
 
 Syntax:
-        WORKSPACE=/path/to/arvados $(basename $0) [options]
+        WORKSPACE=/path/to/arvados $(basename $0) --target <target> [options]
 
 --target <target>
-    Distribution to build packages for (default: debian10)
+    Distribution to build packages for
 --command
     Build command to execute (default: use built-in Docker image command)
 --test-packages
@@ -64,10 +64,10 @@ if [ $? -ne 0 ]; then
     exit 1
 fi
 
-TARGET=debian10
 FORCE_BUILD=0
 COMMAND=
 DEBUG=
+TARGET=
 
 eval set -- "$PARSEDOPTS"
 while [ $# -gt 0 ]; do
@@ -139,6 +139,14 @@ done
 set -e
 orig_umask="$(umask)"
 
+if [[ -z "$TARGET" ]]; then
+    echo "FATAL: --target must be specified" >&2
+    exit 2
+elif [[ ! -d "$WORKSPACE/build/package-build-dockerfiles/$TARGET" ]]; then
+    echo "FATAL: unknown build target '$TARGET'" >&2
+    exit 2
+fi
+
 if [[ -n "$ARVADOS_BUILDING_VERSION" ]]; then
     echo "build version='$ARVADOS_BUILDING_VERSION', package iteration='$ARVADOS_BUILDING_ITERATION'"
 fi
@@ -234,22 +242,20 @@ if test -z "$packages" ; then
         crunch-dispatch-local
         crunch-dispatch-slurm
         crunch-run
-        keepproxy
-        keepstore
         keep-balance
         keep-block-check
-        keep-rsync
         keep-exercise
         keep-rsync
-        keep-block-check
         keep-web
+        keepproxy
+        keepstore
         libpam-arvados-go
-        python3-cwltest
+        python3-arvados-cwl-runner
         python3-arvados-fuse
         python3-arvados-python-client
-        python3-arvados-cwl-runner
+        python3-arvados-user-activity
         python3-crunchstat-summary
-        python3-arvados-user-activity"
+        python3-cwltest"
 fi
 
 FINAL_EXITCODE=0
index df7031fca593393b733d4617d72180a6b30fdd1c..77ce054318eb24c1437a2eeeaacd1e7d793f51b1 100755 (executable)
@@ -9,7 +9,7 @@ read -rd "\000" helpmessage <<EOF
 $(basename "$0"): Build Arvados packages
 
 Syntax:
-        WORKSPACE=/path/to/arvados $(basename "$0") [options]
+        WORKSPACE=/path/to/arvados $(basename "$0") --target <target> [options]
 
 Options:
 
@@ -18,7 +18,7 @@ Options:
 --debug
     Output debug information (default: false)
 --target <target>
-    Distribution to build packages for (default: debian10)
+    Distribution to build packages for
 --only-build <package>
     Build only a specific package (or ONLY_BUILD from environment)
 --arch <arch>
@@ -47,8 +47,8 @@ VENDOR="The Arvados Project"
 DEBUG=${ARVADOS_DEBUG:-0}
 FORCE_BUILD=${FORCE_BUILD:-0}
 EXITCODE=0
-TARGET=debian10
 COMMAND=
+TARGET=
 
 PARSEDOPTS=$(getopt --name "$0" --longoptions \
     help,build-bundle-packages,debug,target:,only-build:,arch:,force-build \
@@ -93,6 +93,14 @@ while [ $# -gt 0 ]; do
     shift
 done
 
+if [[ -z "$TARGET" ]]; then
+    echo "FATAL: --target must be specified" >&2
+    exit 2
+elif [[ ! -d "$WORKSPACE/build/package-build-dockerfiles/$TARGET" ]]; then
+    echo "FATAL: unknown build target '$TARGET'" >&2
+    exit 2
+fi
+
 if [[ "$COMMAND" != "" ]]; then
   COMMAND="/usr/local/rvm/bin/rvm-exec default bash /jenkins/$COMMAND --target $TARGET"
 fi
index 9af95d13d34de9dba9658156762244e540a8a55f..d1217162e6109ad74bddb27ba8ea847092d074cb 100755 (executable)
@@ -7,10 +7,10 @@ read -rd "\000" helpmessage <<EOF
 $(basename $0): Build, test and (optionally) upload packages for one target
 
 Syntax:
-        WORKSPACE=/path/to/arvados $(basename $0) [options]
+        WORKSPACE=/path/to/arvados $(basename $0) --target <target> [options]
 
 --target <target>
-    Distribution to build packages for (default: debian10)
+    Distribution to build packages for
 --only-build <package>
     Build only a specific package (or ONLY_BUILD from environment)
 --arch <arch>
@@ -61,10 +61,10 @@ if [ $? -ne 0 ]; then
     exit 1
 fi
 
-TARGET=debian10
 UPLOAD=0
 RC=0
 DEBUG=
+TARGET=
 
 declare -a build_args=()
 
@@ -117,6 +117,14 @@ while [ $# -gt 0 ]; do
     shift
 done
 
+if [[ -z "$TARGET" ]]; then
+    echo "FATAL: --target must be specified" >&2
+    exit 2
+elif [[ ! -d "$WORKSPACE/build/package-build-dockerfiles/$TARGET" ]]; then
+    echo "FATAL: unknown build target '$TARGET'" >&2
+    exit 2
+fi
+
 build_args+=(--target "$TARGET")
 
 if [[ -n "$ONLY_BUILD" ]]; then
index bb224a71724ed79a91e87e99eb91c96c06817a78..a395db8b773b30a781a5606e736ec92c4b33c875 100755 (executable)
@@ -190,12 +190,12 @@ package_go_binary() {
   fi
 
   case "$package_format-$TARGET" in
-    # Older Debian/Ubuntu do not support cross compilation because the
+    # Ubuntu 20.04 does not support cross compilation because the
     # libfuse package does not support multiarch. See
     # <https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=983477>.
     # Red Hat-based distributions do not support native cross compilation at
     # all (they use a qemu-based solution we haven't implemented yet).
-    deb-debian10|deb-ubuntu1804|deb-ubuntu2004|rpm-*)
+    deb-ubuntu2004|rpm-*)
       cross_compilation=0
       if [[ "$native_arch" == "amd64" ]] && [[ -n "$target_arch" ]] && [[ "$native_arch" != "$target_arch" ]]; then
         echo "Error: no cross compilation support for Go on $native_arch for $TARGET, can not build $prog for $target_arch"
@@ -440,10 +440,8 @@ test_package_presence() {
       echo "Package $full_pkgname build forced with --force-build, building"
     elif [[ "$FORMAT" == "deb" ]]; then
       declare -A dd
-      dd[debian10]=buster
       dd[debian11]=bullseye
       dd[debian12]=bookworm
-      dd[ubuntu1804]=bionic
       dd[ubuntu2004]=focal
       dd[ubuntu2204]=jammy
       D=${dd[$TARGET]}
@@ -469,7 +467,6 @@ test_package_presence() {
     else
       local rpm_root
       case "$TARGET" in
-        centos7) rpm_root="CentOS/7/dev" ;;
         rocky8) rpm_root="CentOS/8/dev" ;;
         *)
           echo "FIXME: Don't know RPM URL path for $TARGET, building"
@@ -705,7 +702,7 @@ fpm_build_virtualenv_worker () {
   cd $WORKSPACE/$PKG_DIR
 
   rm -rf dist/*
-  local venv_dir="dist/build/usr/share/python$PYTHON3_VERSION/dist/$PYTHON_PKG"
+  local venv_dir="dist/build/usr/lib/$PYTHON_PKG"
   echo "Creating virtualenv..."
   if ! "$PYTHON3_EXECUTABLE" -m venv "$venv_dir"; then
     printf "Error, unable to run\n  %s -m venv %s\n" "$PYTHON3_EXECUTABLE" "$venv_dir"
@@ -868,17 +865,18 @@ fpm_build_virtualenv_worker () {
   # make sure the systemd service file ends up in the right place
   # used by arvados-docker-cleaner
   if [[ -e "${systemd_unit}" ]]; then
-    COMMAND_ARR+=("usr/share/python$PYTHON3_VERSION/dist/$PKG/share/doc/$PKG/$PKG.service=/lib/systemd/system/$PKG.service")
+    COMMAND_ARR+=("$sys_venv_dir/share/doc/$PKG/$PKG.service=/lib/systemd/system/$PKG.service")
   fi
 
   COMMAND_ARR+=("${fpm_args[@]}")
 
-  # Make sure to install all our package binaries in /usr/bin.
-  # We have to walk $WORKSPACE/$PKG_DIR/bin rather than
-  # $WORKSPACE/build/usr/share/$python/dist/$PYTHON_PKG/bin/ to get the list
-  # because the latter also includes all the python binaries for the virtualenv.
-  # We have to take the copies of our binaries from the latter directory, though,
-  # because those are the ones we rewrote the shebang line of, above.
+  # Make sure to install all our package binaries in /usr/bin. We have to
+  # walk $WORKSPACE/$PKG_DIR/bin rather than $venv_dir/bin to get the list
+  # because the latter also includes scripts installed by all the
+  # dependencies in the virtualenv, which may conflict with other
+  # packages. We have to take the copies of our binaries from the latter
+  # directory, though, because those are the ones we rewrote the shebang
+  # line of, above.
   if [[ -e "$WORKSPACE/$PKG_DIR/bin" ]]; then
     for binary in `ls $WORKSPACE/$PKG_DIR/bin`; do
       COMMAND_ARR+=("$sys_venv_dir/bin/$binary=/usr/bin/")
index 70437bd21cdff07da8f279a3999b8f58bb0fb3ac..28e9e1cf7be17d98704a312f8841fa946db9fd43 100755 (executable)
@@ -208,7 +208,8 @@ sanity_checks() {
     find /usr/include -path '*gnutls/gnutls.h' | egrep --max-count=1 . \
         || fatal "No gnutls/gnutls.h. Try: apt-get install libgnutls28-dev"
     echo -n 'virtualenv: '
-    python3 -m venv -h | egrep --max-count=1 . \
+    python3 -m venv --help | grep -q '^usage: venv ' \
+        && echo "venv module found" \
         || fatal "No virtualenv. Try: apt-get install python3-venv"
     echo -n 'Python3 pyconfig.h: '
     find /usr/include -path '*/python3*/pyconfig.h' | egrep --max-count=1 . \
@@ -986,14 +987,13 @@ install_services/workbench2() {
 }
 
 test_doc() {
-    (
-        set -e
-        cd "$WORKSPACE/doc"
-        ARVADOS_API_HOST=pirca.arvadosapi.com
-        # Make sure python-epydoc is installed or the next line won't
-        # do much good!
-        PYTHONPATH=$WORKSPACE/sdk/python/ "$bundle" exec rake linkchecker baseurl=file://$WORKSPACE/doc/.site/ arvados_workbench_host=https://workbench.$ARVADOS_API_HOST arvados_api_host=$ARVADOS_API_HOST
-    )
+    local arvados_api_host=pirca.arvadosapi.com && \
+        env -C "$WORKSPACE/doc" \
+        "$bundle" exec rake linkchecker \
+        arvados_api_host="$arvados_api_host" \
+        arvados_workbench_host="https://workbench.$arvados_api_host" \
+        baseurl="file://$WORKSPACE/doc/.site/" \
+        ${testargs[doc]}
 }
 
 test_gofmt() {
index f96532de5ef30d167944dfc23b958a16e26bcce9..a66db787a765241c551e85a63d9a8e187ad9a4c8 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados controller
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index 11887b8f8c5ca99324cd4c82075d83a11325042c..09b0ba94a91eb4eb30665cfa46d5d4ab4e88a074 100644 (file)
@@ -7,8 +7,6 @@ Description=arvados-dispatch-cloud
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index f90cd9033d4b444932519cb8c230f242eaea5c1c..a683e856885a5d1696507a1fe87b410374701027 100644 (file)
@@ -7,8 +7,6 @@ Description=arvados-dispatch-lsf
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index 6e5b0dc8e284d64ff5bf2b50bfceded013dfbcd0..517a75c03d027d66268773fbcf1e7c711c8dfd26 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados git server
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index ef145e26ebcb1989dc3c3f60d6e7a7e69f8cb0b5..899bfac219c879fec682cdcb71b0725b10445f05 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados healthcheck server
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index 2e884495998280936a4dc4375bcac6d9d26006ae..fc6eb4978a156a28fec5df3ce4a362a151a70152 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados websocket server
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -20,8 +18,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index d2a2fb39d9dca39a3df99f57989ae9d7db1c4fbf..83933c17604376a45589c76edcbcbea930735515 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados Crunch Dispatcher for SLURM
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index f282f0a65021ad78e7f628ee3d94fef976231836..1d759d623786cbfb89a55351bcc32fbeb67568d3 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados Keep Balance
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -22,8 +20,5 @@ RestartSec=10s
 Nice=19
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index 4ecd0b49782561da91c49f0a3dea38e9306a2b7b..d94124c6de69c4c520037144e1c63f45a53bf9d5 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados Keep WebDAV and S3 gateway
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index 139df1c3fade823cf8950e8b8e7d3c4678a80c19..c4083f23c95d2ef22150ef7f2c80a6d6b72b0a42 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados Keep Proxy
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index de0fd1dbd7e989a9ad7e33e93ee5149c034b4217..aa5e013dee5dd81e30a7bb39d29e440032c41437 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados Keep Storage Daemon
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -25,8 +23,5 @@ Restart=always
 RestartSec=1
 RestartPreventExitStatus=2
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index 4c807a3dd61180d4d041bb729d6e0ab41dc0dff6..84275955454c2f3f40ca7d3827539d98d84dbe69 100644 (file)
@@ -12,21 +12,37 @@ Additional information is available on the "'Documentation' page on the Arvados
 
 h2. Install dependencies
 
+To build the core Arvados documentation:
+
 <pre>
 arvados/doc$ sudo apt-get install build-essential libcurl4-openssl-dev libgnutls28-dev libssl-dev
 arvados/doc$ bundle install
 </pre>
 
-To generate the Python SDK documentation, these additional dependencies are needed:
+SDK reference documentation has additional, optional build requirements.
+
+h3. Java SDK documentation
 
 <pre>
-arvados/doc$ sudo apt install python3-pip python3-venv
+$ sudo apt install gradle
+</pre>
+
+h3. Python SDK documentation
+
+<pre>
+arvados/doc$ sudo apt install python3-venv
 arvados/doc$ python3 -m venv .venv
-arvados/doc$ .venv/bin/pip install pdoc
+arvados/doc$ .venv/bin/pip install pdoc setuptools
 </pre>
 
 Then you must activate the virtualenv (e.g., run @. .venv/bin/activate@) before you run the @bundle exec rake@ commands below.
 
+h3. R SDK documentation
+
+<pre>
+$ sudo apt install r-cran-devtools r-cran-roxygen2 r-cran-knitr r-cran-markdown r-cran-xml
+</pre>
+
 h2. Generate HTML pages
 
 <pre>
@@ -39,6 +55,18 @@ Alternately, to make the documentation browsable on the local filesystem:
 arvados/doc$ bundle exec rake generate baseurl=$PWD/.site
 </pre>
 
+h3. Selecting SDK documentation to build
+
+By default, the build process will try to detect what SDK documentation it can build, build all that, and skip the rest. You can specify exactly what you want to build using the @sdks@ environment variable. This is a list of comma- or space-separated SDKs you wanted to build documentation for. Valid values are @java@, @python@, @r@, @all@, or @none@. @all@ is a shortcut for listing all the valid SDKs. @none@ means do not build documentation for any SDK. For example, to build documentation for the Java and Python SDKs, but skip R:
+
+<pre>
+arvados/doc$ bundle exec rake generate baseurl=$PWD/.site sdks=java,python
+</pre>
+
+Specifying @sdks@ skips the build detection logic. If the Rakefile cannot build the requested SDK documentation, the build will fail.
+
+For backwards compatibility, if you do not specify @sdks@, but the @NO_SDK@ environment variable is set, or the @no-sdk@ file exists, the build will run as if you set @sdks=none@.
+
 h2. Run linkchecker
 
 If you have "Linkchecker":http://wummel.github.io/linkchecker/ installed on
index 13e87167b67726c8d21ce798f19a04110ce1c4b8..f2932284d9715d8fb95471b20e7ca4c240bf5458 100644 (file)
 #
 # and then visit http://localhost:8000 in a browser.
 
+require "uri"
+
 require "rubygems"
 require "colorize"
 
+def can_run?(*command, **options)
+  options = {
+    :in => :close,
+    :out => [File::NULL, "w"],
+  }.merge(options)
+  system(*command, **options)
+end
+
+class JavaSDK
+  def self.build_path
+    "sdk/java-v2"
+  end
+
+  def self.can_build?
+    can_run?("gradle", "--version")
+  end
+end
+
+class PythonSDK
+  def self.build_path
+    "sdk/python/arvados"
+  end
+
+  def self.can_build?
+    can_run?("./pysdk_pdoc.py", "--version")
+  end
+end
+
+class RSDK
+  def self.build_path
+    "sdk/R"
+  end
+
+  def self.can_build?
+    can_run?("R", "--version")
+  end
+end
+
+$build_sdks = begin
+  no_sdk_env = ENV.fetch("NO_SDK", "")
+  sdks_env = ENV.fetch("sdks", "")
+  all_sdks = Hash[[JavaSDK, PythonSDK, RSDK].map { |c| [c.name, c] }]
+
+  if no_sdk_env != "" and sdks_env != ""
+    fail "both NO_SDK and sdks defined in environment"
+  elsif sdks_env != ""
+    # Nothing to do
+  elsif no_sdk_env != "" or File.exist?("no-sdk")
+    sdks_env = "none"
+  end
+
+  if sdks_env == ""
+    all_sdks.each_pair.filter_map do |name, sdk|
+      if sdk.can_build?
+        sdk
+      else
+        puts "Warning: cannot build #{name.gsub(/SDK$/, ' SDK')} documentation, skipping".colorize(:light_red)
+      end
+    end
+  else
+    wanted_sdks = []
+    sdks_env.split(/\s*[,\s]\s*/).each do |key|
+      key = "#{key.capitalize}SDK"
+      if key == "AllSDK"
+        wanted_sdks = all_sdks.values
+      elsif key == "NoneSDK"
+        wanted_sdks.clear
+      elsif sdk = all_sdks[key]
+        wanted_sdks << sdk
+      else
+        fail "cannot build documentation for unknown #{key}"
+      end
+    end
+    wanted_sdks
+  end
+end
+
 module Zenweb
   class Site
     @binary_files = %w[png jpg gif eot svg ttf woff2? ico pdf m4a t?gz xlsx]
@@ -51,46 +130,33 @@ file ["install/new_cluster_checklist_Azure.xlsx", "install/new_cluster_checklist
 end
 
 file "sdk/python/arvados.html" do |t|
-  if ENV['NO_SDK'] || File.exist?("no-sdk")
-    next
-  end
-  # pysdk_pdoc.py is a wrapper around the pdoc CLI. `which pdoc` is an easy
-  # and good-enough test to check whether it's installed at all.
-  `which pdoc`
-  if $? == 0
-    raise unless system("python3", "setup.py", "build",
-                        chdir: "../sdk/python", out: :err)
-    raise unless system("python3", "pysdk_pdoc.py",
-                        out: :err)
-  else
-    puts "Warning: pdoc not found, Python documentation will not be generated".colorize(:light_red)
-  end
+  next unless $build_sdks.include?(PythonSDK)
+  raise unless system("python3", "setup.py", "build",
+                      chdir: "../sdk/python", out: :err)
+  raise unless system("python3", "pysdk_pdoc.py",
+                      out: :err)
 end
 
 file "sdk/R/arvados/index.html" do |t|
-  if ENV['NO_SDK'] || File.exist?("no-sdk")
-    next
-  end
-  `which R`
-  if $? == 0
-    tgt = Dir.pwd
-    Dir.mkdir("sdk/R")
-    Dir.mkdir("sdk/R/arvados")
-    puts("tgt", tgt)
-    cp('css/R.css', 'sdk/R/arvados')
-    docfiles = []
-    Dir.chdir("../sdk/R/") do
-      Dir.entries("man").each do |rd|
-        if rd[-3..-1] == ".Rd"
-          htmlfile = "#{rd[0..-4]}.html"
-          `R CMD Rdconv -t html man/#{rd} > #{tgt}/sdk/R/arvados/#{htmlfile}`
-          docfiles << htmlfile
-        end
+  next unless $build_sdks.include?(RSDK)
+  tgt = Dir.pwd
+  Dir.mkdir("sdk/R")
+  Dir.mkdir("sdk/R/arvados")
+  puts("tgt", tgt)
+  cp('css/R.css', 'sdk/R/arvados')
+  docfiles = []
+  Dir.chdir("../sdk/R/") do
+    Dir.entries("man").each do |rd|
+      if rd[-3..-1] == ".Rd"
+        htmlfile = "#{rd[0..-4]}.html"
+        `R CMD Rdconv -t html man/#{rd} > #{tgt}/sdk/R/arvados/#{htmlfile}`
+        docfiles << htmlfile
       end
     end
-    raise if $? != 0
+  end
+  raise if $? != 0
 
-    File.open("../sdk/R/README.md", "r") do |rd|
+  File.open("../sdk/R/README.md", "r") do |rd|
     File.open("sdk/R/index.html.md", "w") do |fn|
       fn.write(<<-EOF
 ---
@@ -103,11 +169,11 @@ title: "R SDK Overview"
 #{rd.read}
 EOF
               )
-      end
     end
+  end
 
-    File.open("sdk/R/arvados/index.html.textile.liquid", "w") do |fn|
-      fn.write(<<-EOF
+  File.open("sdk/R/arvados/index.html.textile.liquid", "w") do |fn|
+    fn.write(<<-EOF
 ---
 layout: default
 navsection: sdk
@@ -121,53 +187,46 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 EOF
-              )
-
-      docfiles.sort.each do |d|
-        fn.write("* \"#{d[0..-6]}\":#{d}\n")
-      end
-
+            )
+    docfiles.sort.each do |d|
+      fn.write("* \"#{d[0..-6]}\":#{d}\n")
     end
-  else
-    puts "Warning: R not found, R documentation will not be generated".colorize(:light_red)
   end
 end
 
 file "sdk/java-v2/javadoc/index.html" do |t|
-  if ENV['NO_SDK'] || File.exist?("no-sdk")
-    next
-  end
-  `which java`
-  if $? == 0
-    `which gradle`
-    if $? != 0
-      puts "Warning: gradle not found, java sdk documentation will not be generated".colorize(:light_red)
-    else
-      tgt = Dir.pwd
-      docfiles = []
-      Dir.chdir("../sdk/java-v2") do
-        STDERR.puts `gradle javadoc 2>&1`
-        raise if $? != 0
-        puts `sed -i "s/@import.*dejavu.css.*//g" build/docs/javadoc/stylesheet.css`
-        raise if $? != 0
-      end
-      cp_r("../sdk/java-v2/build/docs/javadoc", "sdk/java-v2")
-      raise if $? != 0
-    end
-  else
-    puts "Warning: java not found, java sdk documentation will not be generated".colorize(:light_red)
+  next unless $build_sdks.include?(JavaSDK)
+  tgt = Dir.pwd
+  docfiles = []
+  Dir.chdir("../sdk/java-v2") do
+    STDERR.puts `gradle javadoc 2>&1`
+    raise if $? != 0
+    puts `sed -i "s/@import.*dejavu.css.*//g" build/docs/javadoc/stylesheet.css`
+    raise if $? != 0
   end
+  cp_r("../sdk/java-v2/build/docs/javadoc", "sdk/java-v2")
+  raise if $? != 0
 end
 
 task :linkchecker => [ :generate ] do
-  Dir.chdir(".site") do
-    `which linkchecker`
-    if $? == 0
-      # we need --check-extern to check relative links, weird but true
-      system "linkchecker index.html --check-extern --ignore-url='!file://'" or exit $?.exitstatus
-    else
-      puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
-    end
+  # we need --check-extern to check relative links, weird but true
+  opts = [
+    "--check-extern",
+    "--ignore-url=!^file://",
+  ]
+  ([JavaSDK, PythonSDK, RSDK] - $build_sdks).map(&:build_path).each do |sdk_path|
+    sdk_url = URI.join(ENV["baseurl"], sdk_path)
+    url_re = Regexp.escape(sdk_url.to_s)
+    opts << "--ignore-url=^#{url_re}[./]"
+  end
+  result = system(
+    "linkchecker", *opts, "index.html",
+    chdir: ".site",
+  )
+  if result.nil?
+    fail "could not run linkchecker command (is it installed?)"
+  elsif !result
+    fail "linkchecker exited #{$?.exitstatus}"
   end
 end
 
index ad2864ca3a2361e526197c046bf184bc4eda8e18..053922a24a4890ad4c26f21dd0f34036aaf5aa36 100644 (file)
@@ -81,28 +81,29 @@ navbar:
       - sdk/python/api-client.html.textile.liquid
       - sdk/python/cookbook.html.textile.liquid
       - sdk/python/python.html.textile.liquid
-      - sdk/python/arvados-fuse.html.textile.liquid
       - sdk/python/arvados-cwl-runner.html.textile.liquid
       - sdk/python/events.html.textile.liquid
-    - CLI:
+    - Command line tools (CLI SDK):
       - sdk/cli/install.html.textile.liquid
       - sdk/cli/index.html.textile.liquid
       - sdk/cli/reference.html.textile.liquid
       - sdk/cli/subcommands.html.textile.liquid
-      - sdk/cli/project-management.html.textile.liquid
+    - FUSE Driver:
+      - sdk/fuse/install.html.textile.liquid
+      - sdk/fuse/options.html.textile.liquid
     - Go:
       - sdk/go/index.html.textile.liquid
       - sdk/go/example.html.textile.liquid
+    - Java:
+      - sdk/java-v2/index.html.textile.liquid
+      - sdk/java-v2/example.html.textile.liquid
+      - sdk/java-v2/javadoc.html.textile.liquid
     - R:
       - sdk/R/index.html.md
       - sdk/R/arvados/index.html.textile.liquid
     - Ruby:
       - sdk/ruby/index.html.textile.liquid
       - sdk/ruby/example.html.textile.liquid
-    - Java v2:
-      - sdk/java-v2/index.html.textile.liquid
-      - sdk/java-v2/example.html.textile.liquid
-      - sdk/java-v2/javadoc.html.textile.liquid
   api:
     - Concepts:
       - api/index.html.textile.liquid
index f6f42d25502f259c1e4c2a0ebc3c630c095a9e4e..1c62dbb239f5b0ed81f52e622f1d33d8b8f2ea00 100644 (file)
@@ -20,6 +20,7 @@ table(table table-bordered table-condensed).
 h3. CUDA GPU support
 
 table(table table-bordered table-condensed).
+|_. Key|_. Type|_. Description|_. Notes|
 |device_count|int|Number of GPUs to request.|Count greater than 0 enables CUDA GPU support.|
 |driver_version|string|Minimum CUDA driver version, in "X.Y" format.|Required when device_count > 0|
 |hardware_capability|string|Minimum CUDA hardware capability, in "X.Y" format.|Required when device_count > 0|
index c80209822508e10c5dfd0d0c06b44e8c4e3dc12c..64a113b6f8aba840814119be4486de02870984dc 100644 (file)
@@ -32,6 +32,30 @@ h2(#main). development main
 
 "previous: Upgrading to 2.7.1":#v2_7_1
 
+h3. Virtual environments inside distribution Python packages have moved
+
+The distribution packages that we publish for Python packages include an entire virtualenv with all required libraries. In Arvados 3.0 these virtualenvs have moved from @/usr/share/python3/dist/PACKAGE_NAME@ to @/usr/lib/PACKAGE_NAME@ to prevent conflicts with distribution packages and better conform to filesystem standards.
+
+If you only run the executables installed by these packages, you don't need to change anything. Those are still installed under @/usr/bin@ and will use the new location when you upgrade. If you have written your own scripts or tools that rely on these virtualenvs, you may need to update those with the new location. For example, if you have a shell script that activates the virtualenv by running:
+
+<pre><code class="shell">source /usr/share/python3/dist/python3-arvados-python-client/bin/activate</code></pre>
+
+You must update it to:
+
+<notextile>
+<pre><code class="shell">source <span class="userinput">/usr/lib/python3-arvados-python-client</span>/bin/activate</code></pre>
+</notextile>
+
+If you have a Python script with this shebang line:
+
+<pre><code class="shell">#!/usr/share/python3/dist/python3-arvados-python-client/bin/python</code></pre>
+
+You must update it to:
+
+<notextile>
+<pre><code class="shell">#!<span class="userinput">/usr/lib/python3-arvados-python-client</span>/bin/python</code></pre>
+</notextile>
+
 h3. WebDAV service uses @/var/cache@ for file content
 
 @keep-web@ now stores copies of recently accessed data blocks in @/var/cache/arvados/keep@ instead of in memory. That directory will be created automatically. The default cache size is 10% of the filesystem size. Use the new @Collections.WebDAVCache.DiskCacheSize@ config to specify a different percentage or an absolute size.
index 57b79d2042311805b1b7ea909e17d2e6e7e8fcc4..f864f37563ba42b83cb8e7c54fbfcae2c425b3e6 100644 (file)
@@ -35,7 +35,7 @@ h2(#dependencies). Install Dependencies and SDKs
 
 # "Install Ruby and Bundler":ruby.html
 # "Install the Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html
-# "Install the FUSE driver":{{site.baseurl}}/sdk/python/arvados-fuse.html
+# "Install the FUSE driver":{{site.baseurl}}/sdk/fuse/install.html
 # "Install the CLI":{{site.baseurl}}/sdk/cli/install.html
 # "Install the R SDK":{{site.baseurl}}/sdk/R/index.html (optional)
 # "Install Docker":install-docker.html (optional)
index 0fe584d08b9abda9a0fc6bd065f1a72d8cdb8d8a..b246a83fd6b567bbf5931cccb827cc23d7046888 100755 (executable)
@@ -17,11 +17,17 @@ import functools
 import os
 import sys
 
-import pdoc
-import pdoc.__main__
-import pdoc.markdown2
-import pdoc.render
-import pdoc.render_helpers
+try:
+    import pdoc.__main__
+    import pdoc.markdown2
+    import pdoc.render_helpers
+except ImportError as err:
+    if __name__ == '__main__':
+        _imp_err = err
+    else:
+        raise
+else:
+    _imp_err = None
 
 DEFAULT_ARGLIST = [
     '--output-directory=sdk/python',
@@ -33,17 +39,19 @@ MD_EXTENSIONS = {
 }
 
 def main(arglist=None):
+    if _imp_err is not None:
+        print("error: failed to import pdoc:", _imp_err, file=sys.stderr)
+        return os.EX_SOFTWARE
+    # Ensure markdown2 is new enough to support our desired extras.
+    elif pdoc.markdown2.__version_info__ < (2, 4, 3):
+        print("error: need markdown2>=2.4.3 to render admonitions", file=sys.stderr)
+        return os.EX_SOFTWARE
+
     # Configure pdoc to use extras we want.
     pdoc.render_helpers.markdown_extensions = collections.ChainMap(
         pdoc.render_helpers.markdown_extensions,
         MD_EXTENSIONS,
     )
-
-    # Ensure markdown2 is new enough to support our desired extras.
-    if pdoc.markdown2.__version_info__ < (2, 4, 3):
-        print("error: need markdown2>=2.4.3 to render admonitions", file=sys.stderr)
-        return os.EX_SOFTWARE
-
     pdoc.__main__.cli(arglist)
     return os.EX_OK
 
index 511a41e0b82043dd1197e4f0fc9aa902e35f4a71..ea10c830bc44006363d6270e91cb9bf951b40c38 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: CLI
+navmenu: Command line tools (CLI SDK)
 title: "Overview"
 
 ...
index 9657d236addf3c2dd89d154ac9dd28b801cfd064..e0d50b874b9251ea3074e31a3f91538bb282f4ab 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: CLI
+navmenu: Command line tools (CLI SDK)
 title: "Installation"
 ...
 {% comment %}
index 735ba5ca8719af5b39fb876bfde9e4b1a45f9ecb..307fecd9a045e6708902b4ac67fecadebd20f3bb 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: CLI
+navmenu: Command line tools (CLI SDK)
 title: "arv reference"
 ...
 {% comment %}
index 5dda77ab5ee65cdf3700be3404f53455c0c25f28..dadb1d56c728404a9a8134478a60961d2966696b 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: CLI
+navmenu: Command line tools (CLI SDK)
 title: "arv subcommands"
 
 ...
diff --git a/doc/sdk/fuse/install.html.textile.liquid b/doc/sdk/fuse/install.html.textile.liquid
new file mode 100644 (file)
index 0000000..52ffb2b
--- /dev/null
@@ -0,0 +1,42 @@
+---
+layout: default
+navsection: sdk
+navmenu: FUSE Driver
+title: Installing the FUSE Driver
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados FUSE driver is a Python utility that allows you to browse Arvados projects and collections in a filesystem, so you can access that data using existing Unix tools.
+
+h2. Installation
+
+If you are logged in to a managed Arvados VM, the @arv-mount@ utility should already be installed.
+
+To use the FUSE driver elsewhere, you can install from a distribution package or pip.
+
+h2. Option 1: Install from distribution packages
+
+First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/packages.html.
+
+{% assign arvados_component = 'python3-arvados-fuse' %}
+
+{% include 'install_packages' %}
+
+h2. Option 2: Install with pip
+
+Run @pip install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
+
+Note: The FUSE driver depends on the @libcurl@ and @libfuse@ C libraries.  To install the module you may need to install development headers from your distribution.  On Debian-based distributions you can install them by running:
+
+<notextile>
+<pre><code># <span class="userinput">apt install build-essential python3-dev libcurl4-openssl-dev libfuse-dev libssl-dev</span>
+</code></pre>
+</notextile>
+
+h2. Usage
+
+For an introduction of how to mount and navigate data, refer to the "Access Keep as a GNU/Linux filesystem":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html tutorial.
diff --git a/doc/sdk/fuse/options.html.textile.liquid b/doc/sdk/fuse/options.html.textile.liquid
new file mode 100644 (file)
index 0000000..1ebfa24
--- /dev/null
@@ -0,0 +1,193 @@
+---
+layout: default
+navsection: sdk
+navmenu: FUSE Driver
+title: arv-mount options
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+This page documents all available @arv-mount@ options with some usage examples.
+
+# "Mount contents":#contents
+# "Mount custom layout and filtering":#layout
+## "@--filters@ usage and limitations":#filters
+# "Mount access and permissions":#access
+# "Mount lifecycle management":#lifecycle
+# "Mount logging and statistics":#logging
+# "Mount local cache setup":#cache
+# "Mount interactions with Arvados and Linux":#plumbing
+# "Examples":#examples
+## "Using @--exec@":#exec
+## "Running arv-mount as a systemd service":#systemd
+
+h2(#contents). Mount contents
+
+table(table table-bordered table-condensed).
+|_. Option(s)|_. Description|
+|@--all@|Mount a subdirectory for each mode: @home@, @shared@, @by_id@, and @by_tag@ (default if no @--mount-*@ options are given)|
+|@--custom@|Mount a subdirectory for each mode specified by a @--mount-*@ option (default if any @--mount-*@ options are given; see "Mount custom layout and filtering":#layout section)|
+|@--collection UUID_OR_PDH@|Mount the specified collection|
+|@--home@|Mount your home project|
+|@--project UUID@|Mount the specified project|
+|@--shared@|Mount a subdirectory for each project shared with you|
+|@--by-id@|Mount a magic directory where collections and projects are accessible through subdirectories named after their UUID or portable data hash|
+|@--by-pdh@|Mount a magic directory where collections are accessible through subdirectories named after their portable data hash|
+|@--by-tag@|Mount a subdirectory for each tag attached to a collection or project|
+
+h2(#layout). Mount custom layout and filtering
+
+table(table table-bordered table-condensed).
+|_. Option(s)|_. Description|
+|@--filters FILTERS@|Filters to apply to all project, shared, and tag directory contents. Pass filters as either a JSON string or a path to a JSON file. The JSON object should be a list of filters in "Arvados API list filter syntax":{{ site.baseurl }}/api/methods.html#filters. See the "example filters":#filters.|
+|@--mount-home PATH@|Make your home project available under the mount at @PATH@|
+|@--mount-shared PATH@|Make projects shared with you available under the mount at @PATH@|
+|@--mount-tmp PATH@|Make a new temporary writable collection available under the mount at @PATH@. This collection is deleted when the mount is unmounted.|
+|@--mount-by-id PATH@|Make a magic directory available under the mount at @PATH@ where collections and projects are accessible through subdirectories named after their UUID or portable data hash|
+|@--mount-by-pdh PATH@|Make a magic directory available under the mount at @PATH@ where collections are accessible through subdirectories named after portable data hash|
+|@--mount-by-tag PATH@|Make a subdirectory for each tag attached to a collection or project available under the mount at @PATH@|
+
+h3(#filters). @--filters@ usage and limitations
+
+Your argument to @--filters@ should be a JSON list of filters in "Arvados API list filter syntax":{{ site.baseurl }}/api/methods.html#filters. If your filter checks any field besides @uuid@, you should prefix it with the @<resource type>.@ Taken together, here's an example that mounts your home directory excluding filter groups, workflow intermediate output collections, and workflow log collections:
+
+<notextile>
+<pre><code>$ arv-mount --home <span class="userinput">--filters '[["groups.group_class", "!=", "filter"], ["collections.properties.type", "not in", ["intermediate", "log"]]]'</span> ...
+</code></pre>
+</notextile>
+
+Because filters can be awkward to write on the command line, you can also write them in a file, and pass that file path to the @--filters@ option. This example does the same filtering:
+
+<notextile>
+<pre><code>$ <span class="userinput">cat &gt;~/arv-mount-filters.json &lt;&lt;EOF
+[
+  [
+    "groups.group_class",
+    "!=",
+    "filter"
+  ],
+  [
+    "collections.properties.type",
+    "not in",
+    [
+      "intermediate",
+      "log"
+    ]
+  ]
+]
+EOF</span>
+$ arv-mount --home <span class="userinput">--filters ~/arv-mount-filters.json</span> ...
+</code></pre>
+</notextile>
+
+The current implementation of @--filters@ has a few limitations. These may be lifted in a future release:
+
+* You can always access any project or collection by UUID or portable data hash under a magic directory. If you access a project this way, your filters _will_ apply to the project contents.
+* Tag directory listings are generated by querying tags alone. Only filters that apply to @links@ will affect these listings.
+
+h2(#access). Mount access and permissions
+
+table(table table-bordered table-condensed).
+|_. Option(s)|_. Description|
+|@--allow-other@|Let other users on this system read mounted data (default false)|
+|@--read-only@|Mounted data cannot be modified from the mount (default)|
+|@--read-write@|Mounted data can be modified from the mount|
+
+h2(#lifecycle). Mount lifecycle management
+
+table(table table-bordered table-condensed).
+|_. Option(s)|_. Description|
+|@--exec ...@|Mount data, run the specified command, then unmount and exit. @--exec@ reads all remaining options as the command to run, so it must be the last option you specify. Either end your command arguments (and other options) with a @--@ argument, or specify @--exec@ after your mount point.|
+|@--foreground@|Run mount process in the foreground instead of daemonizing (default false)|
+|@--subtype SUBTYPE@|Set mounted filesystem type to @fuse.SUBTYPE@ (default is just @fuse@)|
+|@--replace@|If a FUSE mount is already mounted at the given directory, unmount it before mounting the requested data. If @--subtype@ is specified, unmount only if the mount has that subtype. WARNING: This command can affect any kind of FUSE mount, not just arv-mount.|
+|@--unmount@|If a FUSE mount is already mounted at the given directory, unmount it and exit. If @--subtype@ is specified, unmount only if the mount has that subtype. WARNING: This command can affect any kind of FUSE mount, not just arv-mount.|
+|@--unmount-all@|Unmount all FUSE mounts at or below the given directory, then exit. If @--subtype@ is specified, unmount only if the mount has that subtype. WARNING: This command can affect any kind of FUSE mount, not just arv-mount.|
+|@--unmount-timeout SECONDS@|The number of seconds to wait for a clean unmount after an @--exec@ command has exited (default 2.0). After this time, the mount will be forcefully unmounted.|
+
+h2(#logging). Mount logging and statistics
+
+table(table table-bordered table-condensed).
+|_. Option(s)|_. Description|
+|@--crunchstat-interval SECONDS@|Write stats to stderr every N seconds (default disabled)|
+|@--debug@|Log debug information|
+|@--logfile LOGFILE@|Write debug logs and errors to the specified file (default stderr)|
+
+h2(#cache). Mount local cache setup
+
+table(table table-bordered table-condensed).
+|_. Option(s)|_. Description|
+|@--disk-cache@|Cache data on the local filesystem (default)|
+|@--ram-cache@|Cache data in memory|
+|@--disk-cache-dir DIRECTORY@|Filesystem cache location (default @~/.cache/arvados/keep@)|
+|@--directory-cache BYTES@|Size of directory data cache in bytes (default 128 MiB)|
+|@--file-cache BYTES@|Size of file data cache in bytes (default 8 GiB for filesystem cache, 256 MiB for memory cache)|
+
+h2(#plumbing). Mount interactions with Arvados and Linux
+
+table(table table-bordered table-condensed).
+|_. Option(s)|_. Description|
+|@--disable-event-listening@|Don't subscribe to events on the API server to update mount contents|
+|@--encoding ENCODING@|Filesystem character encoding (default 'utf-8'; specify a name from the "Python codec registry":https://docs.python.org/3/library/codecs.html#standard-encodings)|
+|@--retries RETRIES@|Maximum number of times to retry server requests that encounter temporary failures (e.g., server down). Default 10.|
+|@--storage-classes CLASSES@|Comma-separated list of storage classes to request for new collections|
+
+h2(#examples). Examples
+
+h3(#exec). Using @--exec@
+
+There are a couple of details that are important to understand when you use @--exec@:
+
+* @--exec@ reads all remaining options as the command to run, so it must be the last option you specify. Either end your command arguments (and other options) with a @--@ argument, or specify @--exec@ after your mount point.
+* The command you specify runs from the same directory that you started @arv-mount@ from. To access data inside the mount, you will generally need to pass the path to the mount as an argument.
+
+For example, this generates a recursive listing of all the projects and collections under your home project:
+
+<notextile>
+<pre><code>$ <span class="userinput">arv-mount --home --exec find -type d ArvadosHome -- ArvadosHome</span>
+</code></pre>
+</notextile>
+
+The first @ArvadosHome@ is a path argument to @find@. The second is the mount point argument to @arv-mount@.
+
+h3(#systemd). Running arv-mount as a systemd service
+
+If you want to run @arv-mount@ as a long-running service, it's easy to write a systemd service definition for it. We do not publish one because the entire definition tends to be site-specific, but you can start from this template. You must change the @ExecStart@ path. Comments detail other changes you might want to make.
+
+<notextile>
+<pre><code>[Unit]
+Description=Arvados FUSE mount
+Documentation={{ site.baseurl }}/sdk/fuse/options.html
+
+[Service]
+Type=simple
+CacheDirectory=arvados/keep
+CacheDirectoryMode=0700
+
+# This unit makes the mount available as `Arvados` under the runtime directory root.
+# If this is a system service installed under /etc/systemd/system,
+# the mount will be at /run/Arvados.
+# If this is a user service installed under ~/.config/systemd/user,
+# the mount will be at $XDG_RUNTIME_DIR/Arvados.
+# If you want to mount at another location on the filesystem, remove RuntimeDirectory
+# and replace both instances of %t/Arvados with your desired path.
+RuntimeDirectory=Arvados
+# The arv-mount path must be the absolute path where you installed the command.
+# If you installed from a distribution package, make this /usr/bin/arv-mount.
+# If you installed from pip, replace ... with the path to your virtualenv.
+# You can add options to select what gets mounted, access permissions,
+# cache size, log level, etc.
+ExecStart=<span class="userinput">...</span>/bin/arv-mount --foreground --disk-cache-dir %C/arvados/keep %t/Arvados
+ExecStop=/usr/bin/fusermount -u %t/Arvados
+
+# This unit assumes the running user has a ~/.config/arvados/settings.conf
+# with ARVADOS_API_HOST and ARVADOS_API_TOKEN defined.
+# If not, you can write those in a separate file
+# and set its path as EnvironmentFile.
+# Make sure that file is owned and only readable by the running user (mode 0600).
+#EnvironmentFile=...
+</code></pre>
+</notextile>
index b733d03bfc37d5152afdb3a3d515a9e66e4e4d23..9abfa9789f381c395fe4bf1760b38ef643a96ebb 100644 (file)
@@ -9,13 +9,18 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This section documents language bindings for the "Arvados API":{{site.baseurl}}/api/index.html and Keep that are available for various programming languages.  Not all features are available in every SDK.  The most complete SDK is the Python SDK.  Note that this section only gives a high level overview of each SDK.  Consult the "Arvados API":{{site.baseurl}}/api/index.html section for detailed documentation about Arvados API calls available on each resource.
+This section documents client tools and language bindings for the "Arvados API":{{site.baseurl}}/api/index.html and Keep that are available for various programming languages. The most mature, popular packages are:
+
+* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html (also includes essential command line tools such as @arv-put@ and @arv-get@)
+* "Command line SDK":{{site.baseurl}}/sdk/cli/install.html (includes the @arv@ tool)
+
+Many Arvados Workbench pages provide examples of using the Python SDK and command line tools to access a given resource. Open "API details" from the action menu and open the tab with the example you're interested in.
+
+We provide API bindings for several other languages, but these SDKs may be missing some features or documentation:
 
-* "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html (also includes essential command line tools such as "arv-put" and "arv-get")
-* "Command line SDK":{{site.baseurl}}/sdk/cli/install.html ("arv")
 * "Go SDK":{{site.baseurl}}/sdk/go/index.html
+* "Java SDK":{{site.baseurl}}/sdk/java-v2/index.html
 * "R SDK":{{site.baseurl}}/sdk/R/index.html
 * "Ruby SDK":{{site.baseurl}}/sdk/ruby/index.html
-* "Java SDK v2":{{site.baseurl}}/sdk/java-v2/index.html
 
-Many Arvados Workbench pages, under the *Advanced* tab, provide examples of API and SDK use for accessing the current resource .
+Consult the "Arvados API":{{site.baseurl}}/api/index.html section for detailed documentation about Arvados API calls available on each resource.
index 8d2fc2f4af086db6072c282ee59fc027fb11e9b3..a0841ec432faf5cb206513d444f4dc29ba877521 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: Java SDK v2
+navmenu: Java
 title: Examples
 ...
 {% comment %}
index ad9f0e1a9d1a7e3679f8409a478f47e67faa5d0e..aca9c4807856b3ae0e7b0d5f4ecaa062bd3521f9 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: Java SDK v2
+navmenu: Java
 title: "Installation"
 ...
 {% comment %}
index 872150f62518956d15584c7a89c57a0d4350f90b..686cd2440f039e212d2c7937f048b656d05a1c0b 100644 (file)
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: sdk
-navmenu: Java v2
+navmenu: Java
 title: "Javadoc Reference"
 
 no_nav_left: true
diff --git a/doc/sdk/python/arvados-fuse.html.textile.liquid b/doc/sdk/python/arvados-fuse.html.textile.liquid
deleted file mode 100644 (file)
index 254294a..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: default
-navsection: sdk
-navmenu: Python
-title: Arvados FUSE driver
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-The Arvados FUSE driver is a Python utility that allows you to see the Keep service as a normal filesystem, so that data can be accessed using standard tools. This driver requires the Python SDK installed in order to access Arvados services.
-
-h2. Installation
-
-If you are logged in to a managed Arvados VM, the @arv-mount@ utility should already be installed.
-
-To use the FUSE driver elsewhere, you can install from a distribution package, or PyPI.
-
-h2. Option 1: Install from distribution packages
-
-First, "add the appropriate package repository for your distribution":{{ site.baseurl }}/install/packages.html
-
-{% assign arvados_component = 'python-arvados-fuse' %}
-
-{% include 'install_packages' %}
-
-h2. Option 2: Install with pip
-
-Run @pip install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.
-
-Note:
-
-The FUSE driver uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian-based distributions you can install them by running:
-
-<notextile>
-<pre><code># <span class="userinput">apt install git build-essential python3-dev libcurl4-openssl-dev libssl-dev</span>
-</code></pre>
-</notextile>
-
-h3. Usage
-
-Please refer to the "Accessing Keep from GNU/Linux":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html tutorial for more information.
index fd1ff36c5ed9e2ea89443c9d9f21973f181b3f80..4a6ba029feeca0bcdb53628a2caaecfc4bfafc5c 100644 (file)
@@ -19,7 +19,7 @@ If you are logged in to an Arvados VM, the Python SDK should be installed.
 To use the Python SDK elsewhere, you can install it "from an Arvados distribution package":#package-install or "from PyPI using pip":#pip-install.
 
 {% include 'notebox_begin_warning' %}
-As of Arvados 2.2, the Python SDK requires Python 3.6+.  The last version to support Python 2.7 is Arvados 2.0.4.
+As of Arvados 3.0, the Python SDK requires Python 3.8+.
 {% include 'notebox_end' %}
 
 h2(#package-install). Install from a distribution package
@@ -32,14 +32,10 @@ First, configure the "Arvados package repositories":../../install/packages.html
 
 {% include 'install_packages' %}
 
-{% include 'notebox_begin_warning' %}
-If you are on Ubuntu 18.04, please note that the Arvados packages that use Python depend on the python-3.8 package. This means they are installed under @/usr/share/python3.8@, not @/usr/share/python3@. You will need to update the commands below accordingly.
-{% include 'notebox_end' %}
-
 The package includes a virtualenv, which means the correct Python environment needs to be loaded before the Arvados SDK can be imported. You can test the installation by doing that, then creating a client object. Ensure your "@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ credentials are set up correctly":{{site.baseurl}}/user/reference/api-tokens.html. Then you should be able to run the following without any errors:
 
 <notextile>
-<pre>~$ <code class="userinput">source /usr/share/python3/dist/python3-arvados-python-client/bin/activate</code>
+<pre>~$ <code class="userinput">source /usr/lib/python3-arvados-python-client/bin/activate</code>
 (python-arvados-python-client) ~$ <code class="userinput">python</code>
 Python 3.7.3 (default, Jul 25 2020, 13:03:44)
 [GCC 8.3.0] on linux
@@ -53,7 +49,7 @@ Type "help", "copyright", "credits" or "license" for more information.
 Alternatively, you can run the Python executable inside the @virtualenv@ directly:
 
 <notextile>
-<pre>~$ <code class="userinput">/usr/share/python3/dist/python3-arvados-python-client/bin/python</code>
+<pre>~$ <code class="userinput">/usr/lib/python3-arvados-python-client/bin/python</code>
 Python 3.7.3 (default, Jul 25 2020, 13:03:44)
 [GCC 8.3.0] on linux
 Type "help", "copyright", "credits" or "license" for more information.
index 55bc702b6a9db151b8ecff018f0594a70be14255..a28acd56ec5621a4631cfcbdeb11fb580958d01a 100644 (file)
@@ -11,6 +11,8 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 {% include 'tutorial_expectations' %}
 
+*Note:* Starting from Arvados 2.7.2, these reports are generated automatically by @arvados-cwl-runner@ and can be found as @usage_report.html@ in a container request's log collection.
+
 The @crunchstat-summary@ tool can be used to analyze workflow and container performance. It can be installed from packages (@apt install python3-crunchstat-summary@ or @yum install rh-python36-python-crunchstat-summary@), or in a Python virtualenv (@pip install crunchstat_summary@). @crunchstat-summary@ analyzes the crunchstat lines from the logs of a container or workflow and generates a report in text or html format.
 
 h2(#syntax). Syntax
@@ -48,105 +50,110 @@ optional arguments:
 </code></pre>
 </notextile>
 
+When @crunchstat-summary@ is given a container or container request uuid for a toplevel workflow runner container, it will generate a report for the whole workflow. If the workflow is big, it can take a long time to generate the report.
+
 h2(#examples). Examples
 
 @crunchstat-summary@ prints to stdout. The html report, in particular, should be redirected to a file and then loaded in a browser.
 
-An example text report for a single workflow step:
+The html report can be generated as follows:
 
 <notextile>
-<pre><code>~$ <span class="userinput">crunchstat-summary --container-request pirca-xvhdp-rs0ef250emtmbj8 --format text</span>
-category  metric  task_max  task_max_rate job_total
-blkio:0:0 read  63067755822 53687091.20 63067755822
-blkio:0:0 write 64484253320 16376234.80 64484253320
-cpu cpus  16  - -
-cpu sys 2147.29 0.60  2147.29
-cpu user  549046.22 15.99 549046.22
-cpu user+sys  551193.51 16.00 551193.51
-fuseop:create count 1 0.10  1
-fuseop:create time  0.01  0.00  0.01
-fuseop:destroy  count 0 0 0
-fuseop:destroy  time  0 0 0.00
-fuseop:flush  count 12  0.70  12
-fuseop:flush  time  0.00  0.00  0.00
-fuseop:forget count 0 0 0
-fuseop:forget time  0 0 0.00
-fuseop:getattr  count 40  2.70  40
-fuseop:getattr  time  0.00  0.00  0.00
-fuseop:lookup count 36  2.90  36
-fuseop:lookup time  0.67  0.07  0.67
-fuseop:mkdir  count 0 0 0
-fuseop:mkdir  time  0 0 0.00
-fuseop:on_event count 0 0 0
-fuseop:on_event time  0 0 0.00
-fuseop:open count 9 0.30  9
-fuseop:open time  0.00  0.00  0.00
-fuseop:opendir  count 0 0 0
-fuseop:opendir  time  0 0 0.00
-fuseop:read count 481185  409.60  481185
-fuseop:read time  370.11  2.14  370.11
-fuseop:readdir  count 0 0 0
-fuseop:readdir  time  0 0 0.00
-fuseop:release  count 7 0.30  7
-fuseop:release  time  0.00  0.00  0.00
-fuseop:rename count 0 0 0
-fuseop:rename time  0 0 0.00
-fuseop:rmdir  count 0 0 0
-fuseop:rmdir  time  0 0 0.00
-fuseop:setattr  count 0 0 0
-fuseop:setattr  time  0 0 0.00
-fuseop:statfs count 0 0 0
-fuseop:statfs time  0 0 0.00
-fuseop:unlink count 0 0 0
-fuseop:unlink time  0 0 0.00
-fuseop:write  count 5414406 1123.00 5414406
-fuseop:write  time  475.04  0.11  475.04
-fuseops read  481185  409.60  481185
-fuseops write 5414406 1123.00 5414406
-keepcache hit 961402  819.20  961402
-keepcache miss  946 0.90  946
-keepcalls get 962348  820.00  962348
-keepcalls put 961 0.30  961
-mem cache 22748987392 - -
-mem pgmajfault  0 - 0
-mem rss 27185491968 - -
-net:docker0 rx  0 - 0
-net:docker0 tx  0 - 0
-net:docker0 tx+rx 0 - 0
-net:ens5  rx  1100398604  - 1100398604
-net:ens5  tx  1445464 - 1445464
-net:ens5  tx+rx 1101844068  - 1101844068
-net:keep0 rx  63086467386 53687091.20 63086467386
-net:keep0 tx  64482237590 20131128.60 64482237590
-net:keep0 tx+rx 127568704976  53687091.20 127568704976
-statfs  available 398721179648  - 398721179648
-statfs  total 400289181696  - 400289181696
-statfs  used  1568198656  0 1568002048
-time  elapsed 34820 - 34820
-# Number of tasks: 1
-# Max CPU time spent by a single task: 551193.51s
-# Max CPU usage in a single interval: 1599.52%
-# Overall CPU usage: 1582.98%
-# Max memory used by a single task: 27.19GB
-# Max network traffic in a single task: 127.57GB
-# Max network speed in a single interval: 53.69MB/s
-# Keep cache miss rate 0.10%
-# Keep cache utilization 99.97%
-# Temp disk utilization 0.39%
-#!! bwamem-samtools-view max RSS was 25927 MiB -- try reducing runtime_constraints to "ram":27541477785
-#!! bwamem-samtools-view max temp disk utilization was 0% of 381746 MiB -- consider reducing "tmpdirMin" and/or "outdirMin"
+<pre><code>~$ <span class="userinput">crunchstat-summary --container-request pirca-xvhdp-rs0ef250emtmbj8 --format html > report.html</span>
 </code></pre>
 </notextile>
 
-When @crunchstat-summary@ is given a container or container request uuid for a toplevel workflow runner container, it will generate a report for the whole workflow. If the workflow is big, it can take a long time to generate the report.
+When loaded in a browser:
+
+!(full-width)images/crunchstat-summary-html.png!
+
+<br>
 
-The equivalent html report can be generated as follows:
+Using @--format text@ will print detailed usage and summary:
 
 <notextile>
-<pre><code>~$ <span class="userinput">crunchstat-summary --container-request pirca-xvhdp-rs0ef250emtmbj8 --format html > report.html</span>
+<pre><code>~$ <span class="userinput">crunchstat-summary --container-request pirca-xvhdp-rs0ef250emtmbj8 --format text</span>
+category       metric  task_max        task_max_rate   job_total
+blkio:0:0      read    63067755822     53687091.20     63067755822
+blkio:0:0      write   64484253320     16376234.80     64484253320
+cpu    cpus    16      -       -
+cpu    sys     2147.29 0.60    2147.29
+cpu    user    549046.22       15.99   549046.22
+cpu    user+sys        551193.51       16.00   551193.51
+fuseop:create  count   1       0.10    1
+fuseop:create  time    0.01    0.00    0.01
+fuseop:destroy count   0       0       0
+fuseop:destroy time    0       0       0.00
+fuseop:flush   count   12      0.70    12
+fuseop:flush   time    0.00    0.00    0.00
+fuseop:forget  count   0       0       0
+fuseop:forget  time    0       0       0.00
+fuseop:getattr count   40      2.70    40
+fuseop:getattr time    0.00    0.00    0.00
+fuseop:lookup  count   36      2.90    36
+fuseop:lookup  time    0.67    0.07    0.67
+fuseop:mkdir   count   0       0       0
+fuseop:mkdir   time    0       0       0.00
+fuseop:on_event        count   0       0       0
+fuseop:on_event        time    0       0       0.00
+fuseop:open    count   9       0.30    9
+fuseop:open    time    0.00    0.00    0.00
+fuseop:opendir count   0       0       0
+fuseop:opendir time    0       0       0.00
+fuseop:read    count   481185  409.60  481185
+fuseop:read    time    370.11  2.14    370.11
+fuseop:readdir count   0       0       0
+fuseop:readdir time    0       0       0.00
+fuseop:release count   7       0.30    7
+fuseop:release time    0.00    0.00    0.00
+fuseop:rename  count   0       0       0
+fuseop:rename  time    0       0       0.00
+fuseop:rmdir   count   0       0       0
+fuseop:rmdir   time    0       0       0.00
+fuseop:setattr count   0       0       0
+fuseop:setattr time    0       0       0.00
+fuseop:statfs  count   0       0       0
+fuseop:statfs  time    0       0       0.00
+fuseop:unlink  count   0       0       0
+fuseop:unlink  time    0       0       0.00
+fuseop:write   count   5414406 1123.00 5414406
+fuseop:write   time    475.04  0.11    475.04
+fuseops        read    481185  409.60  481185
+fuseops        write   5414406 1123.00 5414406
+keepcache      hit     961402  819.20  961402
+keepcache      miss    946     0.90    946
+keepcalls      get     962348  820.00  962348
+keepcalls      put     961     0.30    961
+mem    cache   22748987392     -       -
+mem    pgmajfault      0       -       0
+mem    rss     27185491968     -       -
+net:docker0    rx      0       -       0
+net:docker0    tx      0       -       0
+net:docker0    tx+rx   0       -       0
+net:ens5       rx      1100398604      -       1100398604
+net:ens5       tx      1445464 -       1445464
+net:ens5       tx+rx   1101844068      -       1101844068
+net:keep0      rx      63086467386     53687091.20     63086467386
+net:keep0      tx      64482237590     20131128.60     64482237590
+net:keep0      tx+rx   127568704976    53687091.20     127568704976
+statfs available       398721179648    -       398721179648
+statfs total   400289181696    -       400289181696
+statfs used    1568198656      0       1568002048
+time   elapsed 34820   -       34820
+# Elapsed time: 9h 40m 20s
+# Assigned instance type: m5.4xlarge
+# Instance hourly price: $0.768
+# Max CPU usage in a single interval: 1599.52%
+# Overall CPU usage: 1582.98%
+# Requested CPU cores: 16
+# Instance VCPUs: 16
+# Max memory used: 25926.11MB
+# Requested RAM: 50000.00MB
+# Maximum RAM request for this instance type: 61736.70MB
+# Max network traffic: 127.57GB
+# Max network speed in a single interval: 53.69MB/s
+# Keep cache miss rate: 0.10%
+# Keep cache utilization: 99.97%
+# Temp disk utilization: 0.39%
 </code></pre>
 </notextile>
-
-When loaded in a browser:
-
-!(full-width)images/crunchstat-summary-html.png!
index e05072ddf6843a9a01599d4854317a49da62b4a1..3c8366721d86da6aafed9071729ad5f9d720034a 100644 (file)
@@ -73,7 +73,7 @@ hints:
     usePreemptible: true
 
   arv:OutOfMemoryRetry:
-    memoryRetryMultipler: 2
+    memoryRetryMultiplier: 2
     memoryErrorRegex: "custom memory error"
 {% endcodeblock %}
 
@@ -195,7 +195,7 @@ table(table table-bordered table-condensed).
 
 h2(#OutOfMemoryRetry). arv:OutOfMemoryRetry
 
-Specify that when a workflow step appears to have failed because it did not request enough RAM, it should be re-submitted with more RAM.  Out of memory conditions are detected either by the container being unexpectedly killed (exit code 137) or by matching a pattern in the container's output (see @memoryErrorRegex@).  Retrying will increase the base RAM request by the value of @memoryRetryMultipler@.  For example, if the original RAM request was 10 GiB and the multiplier is 1.5, then it will re-submit with 15 GiB.
+Specify that when a workflow step appears to have failed because it did not request enough RAM, it should be re-submitted with more RAM.  Out of memory conditions are detected either by the container being unexpectedly killed (exit code 137) or by matching a pattern in the container's output (see @memoryErrorRegex@).  Retrying will increase the base RAM request by the value of @memoryRetryMultiplier@.  For example, if the original RAM request was 10 GiB and the multiplier is 1.5, then it will re-submit with 15 GiB.
 
 Containers are only re-submitted once.  If it fails a second time after increasing RAM, then the worklow step will still fail.
 
@@ -203,7 +203,7 @@ Also note that expressions that use @$(runtime.ram)@ (such as dynamic command li
 
 table(table table-bordered table-condensed).
 |_. Field |_. Type |_. Description |
-|memoryRetryMultipler|float|Required, the retry will multiply the base memory request by this factor to get the retry memory request.|
+|memoryRetryMultiplier|float|Optional, default value is 2.  The retry will multiply the base memory request by this factor to get the retry memory request.|
 |memoryErrorRegex|string|Optional, a custom regex that, if found in the stdout, stderr or crunch-run logging of a program, will trigger a retry with greater RAM.  If not provided, the default pattern matches "out of memory" (with or without spaces), "memory error" (with or without spaces), "bad_alloc" and "container using over 90% of memory".|
 
 h2. arv:dockerCollectionPDH
index 703ec89139baf45afcafc8cf84c93b304ecee2b1..27db90fbd359aed61c5bbb323950777c1aeed6cd 100644 (file)
@@ -74,7 +74,8 @@ table(table table-bordered table-condensed).
 |==--skip-schemas==|      Skip loading of schemas|
 |==--trash-intermediate==|Immediately trash intermediate outputs on workflow success.|
 |==--no-trash-intermediate==|Do not trash intermediate outputs (default).|
-
+|==--enable-usage-report==|Create usage_report.html with a summary of each step's resource usage.|
+|==--disable-usage-report==|Disable usage report.|
 
 h3(#names). Specify workflow and output names
 
index 488541b3a745cd2f9458605e68b323775725d1fc..3832734e708de4b2424673279c8d5695a1901755 100644 (file)
Binary files a/doc/user/cwl/images/crunchstat-summary-html.png and b/doc/user/cwl/images/crunchstat-summary-html.png differ
index 999f848c13c5b188fd4330f8bc70128967378183..18f675d04e17c9b69973b9fa6d832b3352892320 100644 (file)
@@ -35,7 +35,7 @@ Here are the client packages you can install on your system. You can skip any yo
 
 * "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html: This provides an Arvados API client in Python, as well as low-level command line tools.
 * "Command-line SDK":{{site.baseurl}}/sdk/cli/install.html: This provides the high-level @arv@ command and user interface to the Arvados API.
-* "FUSE Driver":{{site.baseurl}}/sdk/python/arvados-fuse.html: This provides the @arv-mount@ command and FUSE driver that lets you access Keep using standard Linux filesystem tools.
+* "FUSE Driver":{{site.baseurl}}/sdk/fuse/install.html: This provides the @arv-mount@ command and FUSE driver that lets you access Keep using standard Linux filesystem tools.
 * "CWL Runner":{{site.baseurl}}/sdk/python/arvados-cwl-runner.html: This provides the @arvados-cwl-runner@ command to register and run workflows in Crunch.
 * "crunchstat-summary":{{site.baseurl}}/user/cwl/crunchstat-summary.html: This tool provides performance reports for Crunch containers.
 * "arvados-client":{{site.baseurl}}/user/debugging/container-shell-access.html: This tool provides subcommands for inspecting Crunch containers, both interactively while they're running and after they've finished.
index e3e4310cbb644791655f34b29d5abc2b36e55386..b64dc828bd2fc53e5ff1ed125df588ee9be454ad 100644 (file)
@@ -222,7 +222,7 @@ Once your workflow has finished, you can see how long it took the workflow to ru
 
 If we click on the outputs of the workflow, we will see the output collection. It contains the GVCF, tabix index file, and HTML ClinVar report for each analyzed sample (e.g., set of FASTQs). You can open a report in the browser by selecting it from the listing. You can also download a file to your local machine by right-clicking a file and selecting "Download" from the context menu, or from the action menu available from the far right of each listing.
 
-Logs for the main process can be found back on the workflow process page. Selecting the "LOGS" button at the top navigates down to the logs. You can view the logs directly through that panel, or in the upper right-hand corner select the button with hover-over text "Go to Log collection". 
+Logs for the main process can be found back on the workflow process page. Selecting the "LOGS" button at the top navigates down to the logs. You can view the logs directly through that panel, or in the upper right-hand corner select the button with hover-over text "Go to Log collection".
 
 There are several logs available, so here is a basic summary of what some of the more commonly used logs contain.  Let's first define a few terms that will help us understand what the logs are tracking.
 
@@ -238,6 +238,7 @@ node.json gives a high level overview about the instance such as name, price, an
 * @crunch-run.txt@ and @crunchstat.txt@
 ** @crunch-run.txt@ has info about how the container's execution environment was set up (e.g., time spent loading the docker image) and timing/results of copying output data to Keep (if applicable)
 ** @crunchstat.txt@ has info about resource consumption (RAM, cpu, disk, network) by the container while it was running.
+* @usage_report.html@ can be viewed directly in the browser by clicking on it.  It provides a summary and chart of the resource consumption derived from the raw data in @crunchstat.txt@.  (Available starting with @arvados-cwl-runner@ 2.7.2).
 * @container.json@
 ** Describes the container (unit of work to be done), contains CWL code, runtime constraints (RAM, vcpus) amongst other details
 * @arv-mount.txt@
index 371b9cc984ce224ed4d0f9e78296ed607057c8f4..05d8547c52a5126f36ecc79ae85e3a9f4658ac44 100644 (file)
@@ -20,4 +20,4 @@ RUN /usr/sbin/adduser --disabled-password \
     /usr/bin/install --directory --owner=crunch --group=crunch --mode=0700 /keep /tmp/crunch-src /tmp/crunch-job
 
 USER crunch
-ENV PATH=/usr/share/python3.9/dist/python3-arvados-cwl-runner/bin:/usr/local/bin:/usr/bin:/bin
+ENV PATH=/usr/lib/python3-arvados-cwl-runner/bin:/usr/local/bin:/usr/bin:/bin
index 07a146d99f080ab8a5294626062d47fc22a803f4..9a3f784b51e77d9d90d214183fb071b1190a39c6 100644 (file)
@@ -251,6 +251,12 @@ func (instanceSet *ec2InstanceSet) Create(
                                ResourceType: aws.String("instance"),
                                Tags:         ec2tags,
                        }},
+               MetadataOptions: &ec2.InstanceMetadataOptionsRequest{
+                       // Require IMDSv2, as described at
+                       // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html
+                       HttpEndpoint: aws.String(ec2.InstanceMetadataEndpointStateEnabled),
+                       HttpTokens:   aws.String(ec2.HttpTokensStateRequired),
+               },
                UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
        }
 
index 4b830058963b93cfc508ee1795e65d22d3d70af9..d342f0fb3010ee59e0a12363f8339f76d22b2b91 100644 (file)
@@ -277,6 +277,12 @@ func (*EC2InstanceSetSuite) TestCreate(c *check.C) {
        if *live == "" {
                c.Check(ap.client.(*ec2stub).describeKeyPairsCalls, check.HasLen, 1)
                c.Check(ap.client.(*ec2stub).importKeyPairCalls, check.HasLen, 1)
+
+               runcalls := ap.client.(*ec2stub).runInstancesCalls
+               if c.Check(runcalls, check.HasLen, 1) {
+                       c.Check(runcalls[0].MetadataOptions.HttpEndpoint, check.DeepEquals, aws.String("enabled"))
+                       c.Check(runcalls[0].MetadataOptions.HttpTokens, check.DeepEquals, aws.String("required"))
+               }
        }
 }
 
index c8e854b7e26854a7cd24526cef82c66504957efa..e3b67f725932a8f899e95354064a3b10e6977fe0 100644 (file)
@@ -365,34 +365,59 @@ Clusters:
       # false.
       ActivatedUsersAreVisibleToOthers: true
 
-      # The e-mail address of the user you would like to become marked as an admin
-      # user on their first login.
+      # If a user creates an account with this email address, they
+      # will be automatically set to admin.
       AutoAdminUserWithEmail: ""
 
       # If AutoAdminFirstUser is set to true, the first user to log in when no
       # other admin users exist will automatically become an admin user.
       AutoAdminFirstUser: false
 
-      # Email address to notify whenever a user creates a profile for the
-      # first time
+      # Recipient for notification email sent out when a user sets a
+      # profile on their account.
       UserProfileNotificationAddress: ""
+
+      # When sending a NewUser, NewInactiveUser, or UserProfile
+      # notification, this is the 'From' address to use
       AdminNotifierEmailFrom: arvados@example.com
+
+      # Prefix for email subjects for NewUser and NewInactiveUser emails
       EmailSubjectPrefix: "[ARVADOS] "
+
+      # When sending a welcome email to the user, the 'From' address to use
       UserNotifierEmailFrom: arvados@example.com
-      UserNotifierEmailBcc: {}
-      NewUserNotificationRecipients: {}
-      NewInactiveUserNotificationRecipients: {}
+
+      # The welcome email sent to new users will be blind copied to
+      # these addresses.
+      UserNotifierEmailBcc:
+        SAMPLE: {}
+
+      # Recipients for notification email sent out when a user account
+      # is created and already set up to be able to log in
+      NewUserNotificationRecipients:
+        SAMPLE: {}
+
+      # Recipients for notification email sent out when a user account
+      # has been created but the user cannot log in until they are
+      # set up by an admin.
+      NewInactiveUserNotificationRecipients:
+        SAMPLE: {}
 
       # Set AnonymousUserToken to enable anonymous user access. Populate this
       # field with a random string at least 50 characters long.
       AnonymousUserToken: ""
 
-      # If a new user has an alternate email address (local@domain)
-      # with the domain given here, its local part becomes the new
-      # user's default username. Otherwise, the user's primary email
-      # address is used.
+      # The login provider for a user may supply a primary email
+      # address and one or more alternate email addresses.  If a new
+      # user has an alternate email address with the domain given
+      # here, use the username from the alternate email to generate
+      # the user's Arvados username. Otherwise, the username from
+      # user's primary email address is used for the Arvados username.
+      # Currently implemented for OpenID Connect only.
       PreferDomainForUsername: ""
 
+      # Ruby ERB template used for the email sent out to users when
+      # they have been set up.
       UserSetupMailText: |
         <% if not @user.full_name.empty? -%>
         <%= @user.full_name %>,
@@ -1800,8 +1825,18 @@ Clusters:
           Serialize: false
 
     Mail:
-      MailchimpAPIKey: ""
-      MailchimpListID: ""
+      # In order to send mail, Arvados expects a default SMTP server
+      # on localhost:25.  It cannot require authentication on
+      # connections from localhost.  That server should be configured
+      # to relay mail to a "real" SMTP server that is able to send
+      # email on behalf of your domain.
+
+      # See also the "Users" configuration section for additional
+      # email-related options.
+
+      # When a user has been set up (meaning they are able to log in)
+      # they will receive an email using the template specified
+      # earlier in Users.UserSetupMailText
       SendUserSetupNotificationEmail: true
 
       # Bug/issue report notification to and from addresses
@@ -1811,6 +1846,10 @@ Clusters:
 
       # Generic issue email from
       EmailFrom: "arvados@example.com"
+
+      # No longer supported, to be removed.
+      MailchimpAPIKey: ""
+      MailchimpListID: ""
     RemoteClusters:
       "*":
         Host: ""
index fc1f705175479d0e42fa3523617592cebafff3d5..4bf7a03447980ccd4d92637baf0ce8c1ca514a6c 100644 (file)
@@ -971,8 +971,8 @@ func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {
                        "hostname": "example",
                },
                })
+       c.Assert(err, check.IsNil)
        c.Check(outVM.UUID[0:5], check.Equals, "z3333")
-       c.Check(err, check.IsNil)
 
        // Make sure z3333 user list is up to date
        _, err = conn3.UserList(rootctx3, arvados.ListOptions{Limit: 1000})
index 6fc6dd9444bf0f44d30f220a9381a5a994684b0f..c539e0e60b124c75c3dd14792b4402e84f6c8756 100755 (executable)
@@ -208,7 +208,7 @@ docker run --detach --rm --name=${ctrlctr} \
        -v "${tmpdir}/arvados-server":/bin/arvados-server:ro \
        -v "${tmpdir}/zzzzz.yml":/etc/arvados/config.yml:ro \
        -v $(realpath "${PWD}/../../.."):/arvados:ro \
-       debian:10 \
+       debian:11 \
        bash -c "${setup_pam_ldap:-true} && arvados-server controller"
 docker logs --follow ${ctrlctr} 2>$debug >$debug &
 ctrlhostports=$(docker port ${ctrlctr} 9999/tcp)
index d569020824c22373d5098e0afd4c14d6156dd773..4f0100b2677f956b1af9dadcbd5b6082a1be0ab0 100644 (file)
@@ -20,7 +20,6 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "git.arvados.org/arvados.git/sdk/go/keepclient"
-       "git.arvados.org/arvados.git/services/keepstore"
        . "gopkg.in/check.v1"
 )
 
@@ -195,7 +194,9 @@ func (s *integrationSuite) TestRunTrivialContainerWithLocalKeepstore(c *C) {
                        volume.Replication = 2
                        cluster.Volumes[uuid] = volume
 
-                       var v keepstore.UnixVolume
+                       var v struct {
+                               Root string
+                       }
                        err = json.Unmarshal(volume.DriverParameters, &v)
                        c.Assert(err, IsNil)
                        err = os.Mkdir(v.Root, 0777)
index 3b68f31e9fac07208c8b4dcff46ee58f4e99deda..f536001f77ca9abdb3b1883ad8dd9051ab202961 100644 (file)
@@ -7,8 +7,6 @@ Description=Arvados server
 Documentation=https://doc.arvados.org/
 After=network.target
 AssertPathExists=/etc/arvados/config.yml
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -21,8 +19,5 @@ Restart=always
 RestartSec=1
 LimitNOFILE=65536
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index e0defa888a2cec5c0d2c3d20f320f7ebad0fb3a4..3f0245293e5f1a39475613e2afe7c7f0ca6d980a 100755 (executable)
@@ -1,4 +1,8 @@
 #!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
 
 set -ex -o pipefail
 
@@ -7,7 +11,7 @@ SRC=$(realpath $(dirname ${BASH_SOURCE[0]})/../..)
 ctrname=arvadostest
 ctrbase=${ctrname}
 if [[ "${1}" != "--update" ]] || ! docker images --format={{.Repository}} | grep -x ${ctrbase}; then
-    ctrbase=debian:10
+    ctrbase=debian:11
 fi
 
 if docker ps -a --format={{.Names}} | grep -x ${ctrname}; then
index 439289b58887efef1874c7adcb71c103b4ec0862..f9795cf8728da376d3013f93d6acdebb83ed4c26 100644 (file)
@@ -40,7 +40,7 @@ const (
        pjsversion                = "1.9.8"
        geckoversion              = "0.24.0"
        gradleversion             = "5.3.1"
-       defaultNodejsVersion      = "12.22.12"
+       defaultNodejsVersion      = "14.21.3"
        devtestDatabasePassword   = "insecure_arvados_test"
 )
 
@@ -249,7 +249,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                        pkgs = append(pkgs, "g++", "libcurl4", "libcurl4-openssl-dev")
                case osv.Debian || osv.Ubuntu:
                        pkgs = append(pkgs, "g++", "libcurl3", "libcurl3-openssl-dev")
-               case osv.Centos:
+               case osv.RedHat:
                        pkgs = append(pkgs, "gcc", "gcc-c++", "libcurl-devel", "postgresql-devel")
                }
                cmd := exec.CommandContext(ctx, "apt-get")
@@ -273,8 +273,6 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                } else if osv.Debian {
                        var codename string
                        switch osv.Major {
-                       case 10:
-                               codename = "buster"
                        case 11:
                                codename = "bullseye"
                        case 12:
@@ -286,7 +284,7 @@ func (inst *installCommand) RunCommand(prog string, args []string, stdin io.Read
                        err = inst.runBash(`
 rm -f /usr/share/keyrings/docker-archive-keyring.gpg
 curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
-echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian/ `+codename+` stable' | \
+echo 'deb [arch=`+runtime.GOARCH+` signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian/ `+codename+` stable' | \
     tee /etc/apt/sources.list.d/docker.list
 apt-get update
 DEBIAN_FRONTEND=noninteractive apt-get --yes --no-install-recommends install docker-ce
@@ -363,7 +361,7 @@ fi
                        err = inst.runBash(`
 cd /tmp
 rm -rf /var/lib/arvados/go/
-wget --progress=dot:giga -O- https://storage.googleapis.com/golang/go`+goversion+`.linux-amd64.tar.gz | tar -C /var/lib/arvados -xzf -
+wget --progress=dot:giga -O- https://storage.googleapis.com/golang/go`+goversion+`.linux-`+runtime.GOARCH+`.tar.gz | tar -C /var/lib/arvados -xzf -
 ln -sfv /var/lib/arvados/go/bin/* /usr/local/bin/
 `, stdout, stderr)
                        if err != nil {
@@ -539,15 +537,23 @@ setcap "cap_sys_admin+pei cap_sys_chroot+pei" /var/lib/arvados/bin/nsenter
                }
        }
 
+       var njsArch string
+       switch runtime.GOARCH {
+       case "amd64":
+               njsArch = "x64"
+       default:
+               njsArch = runtime.GOARCH
+       }
+
        if !prod {
                if havenodejsversion, err := exec.Command("/usr/local/bin/node", "--version").CombinedOutput(); err == nil && string(havenodejsversion) == "v"+inst.NodejsVersion+"\n" {
                        logger.Print("nodejs " + inst.NodejsVersion + " already installed")
                } else {
                        err = inst.runBash(`
 NJS=v`+inst.NodejsVersion+`
-rm -rf /var/lib/arvados/node-*-linux-x64
-wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-x64.tar.xz | sudo tar -C /var/lib/arvados -xJf -
-ln -sfv /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
+rm -rf /var/lib/arvados/node-*-linux-`+njsArch+`
+wget --progress=dot:giga -O- https://nodejs.org/dist/${NJS}/node-${NJS}-linux-`+njsArch+`.tar.xz | sudo tar -C /var/lib/arvados -xJf -
+ln -sfv /var/lib/arvados/node-${NJS}-linux-`+njsArch+`/bin/{node,npm} /usr/local/bin/
 `, stdout, stderr)
                        if err != nil {
                                return 1
@@ -559,7 +565,7 @@ ln -sfv /var/lib/arvados/node-${NJS}-linux-x64/bin/{node,npm} /usr/local/bin/
                } else {
                        err = inst.runBash(`
 npm install -g yarn
-ln -sfv /var/lib/arvados/node-v`+inst.NodejsVersion+`-linux-x64/bin/{yarn,yarnpkg} /usr/local/bin/
+ln -sfv /var/lib/arvados/node-v`+inst.NodejsVersion+`-linux-`+njsArch+`/bin/{yarn,yarnpkg} /usr/local/bin/
 `, stdout, stderr)
                        if err != nil {
                                return 1
@@ -793,7 +799,7 @@ rsync -a --delete-after "$tmp/build/" "$dst/"
 type osversion struct {
        Debian bool
        Ubuntu bool
-       Centos bool
+       RedHat bool
        Major  int
 }
 
@@ -831,10 +837,24 @@ func identifyOS() (osversion, error) {
                osv.Ubuntu = true
        case "debian":
                osv.Debian = true
-       case "centos":
-               osv.Centos = true
        default:
-               return osv, fmt.Errorf("unsupported ID in /etc/os-release: %q", kv["ID"])
+               idLikeMatched := false
+               for _, idLike := range strings.Split(kv["ID_LIKE"], " ") {
+                       switch idLike {
+                       case "debian":
+                               osv.Debian = true
+                               idLikeMatched = true
+                       case "rhel":
+                               osv.RedHat = true
+                               idLikeMatched = true
+                       }
+                       if idLikeMatched {
+                               break
+                       }
+               }
+               if !idLikeMatched {
+                       return osv, fmt.Errorf("no supported ID found in /etc/os-release")
+               }
        }
        vstr := kv["VERSION_ID"]
        if i := strings.Index(vstr, "."); i > 0 {
@@ -895,7 +915,7 @@ func prodpkgs(osv osversion) []string {
                return append(pkgs,
                        "mime-support", // keep-web
                )
-       } else if osv.Centos {
+       } else if osv.RedHat {
                return append(pkgs,
                        "fuse-libs", // services/fuse
                        "mailcap",   // keep-web
index 03d9b7f63b4a031666b7eb5cd37f624122d50b7a..182e1bfeb55657021135fe320d830b99cb8deec0 100644 (file)
@@ -1,17 +1,19 @@
 #!/bin/bash
+#
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
 
 set -e -o pipefail
 
-# Starting with a base debian buster system, like "docker run -it
-# debian:10"...
+# Starting with a base debian bullseye system, like "docker run -it
+# debian:11"...
 
 apt update
 apt upgrade
 apt install --no-install-recommends build-essential ca-certificates git golang
 git clone https://git.arvados.org/arvados.git
-cd arvados
-[[ -e lib/install ]] || git checkout origin/16053-install-deps
-cd cmd/arvados-server
+cd arvados/cmd/arvados-server
 go run ./cmd/arvados-server install -type test
-pg_isready || pg_ctlcluster 11 main start # only needed if there's no init process (as in docker)
+pg_isready || pg_ctlcluster 13 main start # only needed if there's no init process (as in docker)
 build/run-tests.sh
index fa16b313beaed8b66b9f1544d148a30c05b78f83..196cb97174952a41fa3daceeb7e5b3665416200a 100644 (file)
@@ -114,7 +114,7 @@ func (s *DockerSuite) runTestClient(c *check.C, args ...string) (stdout, stderr
                "-v", s.tmpdir + "/pam_arvados.so:/usr/lib/pam_arvados.so:ro",
                "-v", s.tmpdir + "/conffile:/usr/share/pam-configs/arvados:ro",
                "-v", s.tmpdir + "/testclient:/testclient:ro",
-               "debian:buster",
+               "debian:bullseye",
                "/testclient"}, args...)...)
        stdout = &bytes.Buffer{}
        stderr = &bytes.Buffer{}
index 6c33f97913f83aaeaebf392fec740ba9f9d0d98a..4e6c5c88f4af70c290b0663ad343c0d75bfd30fd 100644 (file)
@@ -2,7 +2,7 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-options(repos=structure(c(CRAN="http://cran.wustl.edu/")))
+options(repos=structure(c(CRAN="https://cloud.r-project.org/")))
 if (!requireNamespace("devtools")) {
   install.packages("devtools")
 }
@@ -16,10 +16,7 @@ if (!requireNamespace("markdown")) {
   install.packages("markdown")
 }
 if (!requireNamespace("XML")) {
-  # XML 3.99-0.4 depends on R >= 4.0.0, but we run tests on debian
-  # stable (10) with R 3.5.2 so we install an older version from
-  # source.
-  install.packages("https://cran.r-project.org/src/contrib/Archive/XML/XML_3.99-0.3.tar.gz", repos=NULL, type="source")
+  install.packages("XML")
 }
 
 devtools::install_dev_deps()
index 3a856c4f5147a5acddf32bc34b02bbecc8844a01..67f93c19c318ab276204b0d6d883c47024ad9242 100644 (file)
@@ -38,7 +38,7 @@ Gem::Specification.new do |s|
   s.files       = ["bin/arv", "bin/arv-tag", "LICENSE-2.0.txt"]
   s.executables << "arv"
   s.executables << "arv-tag"
-  s.required_ruby_version = '>= 2.5.0'
+  s.required_ruby_version = '>= 2.7.0'
   s.add_runtime_dependency 'arvados', '~> 2.8.a'
   # arvados fork of google-api-client gem with old API and new
   # compatibility fixes, built from ../ruby-google-api-client/
index 9fc00c00171ba4d435a237c4bbb68482421aafaf..7e13488758b10f5ec9f2ac5a61ec31dfaa1ba4f8 100644 (file)
@@ -258,6 +258,10 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         default=False, dest="trash_intermediate",
                         help="Do not trash intermediate outputs (default).")
 
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--enable-usage-report", dest="enable_usage_report", default=None, action="store_true", help="Create usage_report.html with a summary of each step's resource usage.")
+    exgroup.add_argument("--disable-usage-report", dest="enable_usage_report", default=None, action="store_false", help="Disable usage report.")
+
     parser.add_argument("workflow", default=None, help="The workflow to execute")
     parser.add_argument("job_order", nargs=argparse.REMAINDER, help="The input object to the workflow.")
 
index 91a05e125439952b8023dd197dd620211052d9eb..aeb41db568722e35ef045a407117f6864f213a95 100644 (file)
@@ -478,8 +478,13 @@ $graph:
         and stderr produced by the tool to determine if a failed job
         should be retried with more RAM.  By default, searches for the
         substrings 'bad_alloc' and 'OutOfMemory'.
-    - name: memoryRetryMultipler
-      type: float
+    - name: memoryRetryMultiplier
+      type: float?
       doc: |
         If the container failed on its first run, re-submit the
         container with the RAM request multiplied by this factor.
+    - name: memoryRetryMultipler
+      type: float?
+      doc: |
+        Deprecated misspelling of "memoryRetryMultiplier".  Kept only
+        for backwards compatability, don't use this.
index 458d5a37a7b0339bfd4bc21dd43fa5ac09d0fe86..0e51d50080ce032897132322635a7d2a0941aaf9 100644 (file)
@@ -421,8 +421,13 @@ $graph:
         and stderr produced by the tool to determine if a failed job
         should be retried with more RAM.  By default, searches for the
         substrings 'bad_alloc' and 'OutOfMemory'.
-    - name: memoryRetryMultipler
-      type: float
+    - name: memoryRetryMultiplier
+      type: float?
       doc: |
         If the container failed on its first run, re-submit the
         container with the RAM request multiplied by this factor.
+    - name: memoryRetryMultipler
+      type: float?
+      doc: |
+        Deprecated misspelling of "memoryRetryMultiplier".  Kept only
+        for backwards compatability, don't use this.
index 389add41047b7118d1701fcd43cbe85461ac0618..a753579c9aa7bbd7c945b813e1be9d689350d084 100644 (file)
@@ -424,11 +424,17 @@ $graph:
         and stderr produced by the tool to determine if a failed job
         should be retried with more RAM.  By default, searches for the
         substrings 'bad_alloc' and 'OutOfMemory'.
-    - name: memoryRetryMultipler
-      type: float
+    - name: memoryRetryMultiplier
+      type: float?
       doc: |
         If the container failed on its first run, re-submit the
         container with the RAM request multiplied by this factor.
+    - name: memoryRetryMultipler
+      type: float?
+      doc: |
+        Deprecated misspelling of "memoryRetryMultiplier".  Kept only
+        for backwards compatability, don't use this.
+
 
 - name: SeparateRunner
   type: record
index 84b98378f4196a19714657e66629475c86bdf2bb..c3b914ba996a795623c5c9a1f155a2b11098b4d9 100644 (file)
@@ -27,6 +27,9 @@ from cwltool.job import JobBase
 
 import arvados.collection
 
+import crunchstat_summary.summarizer
+import crunchstat_summary.reader
+
 from .arvdocker import arv_docker_get_image
 from . import done
 from .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_location, remove_redundant_fields, make_builder
@@ -370,8 +373,13 @@ class ArvadosContainer(JobBase):
         ram_multiplier = [1]
 
         oom_retry_req, _ = self.get_requirement("http://arvados.org/cwl#OutOfMemoryRetry")
-        if oom_retry_req and oom_retry_req.get('memoryRetryMultipler'):
-            ram_multiplier.append(oom_retry_req.get('memoryRetryMultipler'))
+        if oom_retry_req:
+            if oom_retry_req.get('memoryRetryMultiplier'):
+                ram_multiplier.append(oom_retry_req.get('memoryRetryMultiplier'))
+            elif oom_retry_req.get('memoryRetryMultipler'):
+                ram_multiplier.append(oom_retry_req.get('memoryRetryMultipler'))
+            else:
+                ram_multiplier.append(2)
 
         if runtimeContext.runnerjob.startswith("arvwf:"):
             wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index("#")]
@@ -492,11 +500,14 @@ class ArvadosContainer(JobBase):
             else:
                 processStatus = "permanentFail"
 
-            if processStatus == "permanentFail" and record["log_uuid"]:
-                logc = arvados.collection.CollectionReader(record["log_uuid"],
-                                                           api_client=self.arvrunner.api,
-                                                           keep_client=self.arvrunner.keep_client,
-                                                           num_retries=self.arvrunner.num_retries)
+            logc = None
+            if record["log_uuid"]:
+                logc = arvados.collection.Collection(record["log_uuid"],
+                                                     api_client=self.arvrunner.api,
+                                                     keep_client=self.arvrunner.keep_client,
+                                                     num_retries=self.arvrunner.num_retries)
+
+            if processStatus == "permanentFail" and logc is not None:
                 label = self.arvrunner.label(self)
                 done.logtail(
                     logc, logger.error,
@@ -522,6 +533,28 @@ class ArvadosContainer(JobBase):
                 uuid=self.uuid,
                 body={"container_request": {"properties": properties}}
             ).execute(num_retries=self.arvrunner.num_retries)
+
+            if logc is not None and self.job_runtime.enable_usage_report is not False:
+                try:
+                    summarizer = crunchstat_summary.summarizer.ContainerRequestSummarizer(
+                        record,
+                        collection_object=logc,
+                        label=self.name,
+                        arv=self.arvrunner.api)
+                    summarizer.run()
+                    with logc.open("usage_report.html", "wt") as mr:
+                        mr.write(summarizer.html_report())
+                    logc.save()
+
+                    # Post warnings about nodes that are under-utilized.
+                    for rc in summarizer._recommend_gen(lambda x: x):
+                        self.job_runtime.usage_report_notes.append(rc)
+
+                except Exception as e:
+                    logger.warning("%s unable to generate resource usage report",
+                                 self.arvrunner.label(self),
+                                 exc_info=(e if self.arvrunner.debug else False))
+
         except WorkflowException as e:
             # Only include a stack trace if in debug mode.
             # A stack trace may obfuscate more useful output about the workflow.
@@ -699,6 +732,12 @@ class RunnerContainer(Runner):
         if runtimeContext.prefer_cached_downloads:
             command.append("--prefer-cached-downloads")
 
+        if runtimeContext.enable_usage_report is True:
+            command.append("--enable-usage-report")
+
+        if runtimeContext.enable_usage_report is False:
+            command.append("--disable-usage-report")
+
         if self.fast_parser:
             command.append("--fast-parser")
 
index 0439cb5b15cb64d1c449e39114358d564dc21b86..60ea9bdff50b5c6e46bcfa6381edc662952e15a3 100644 (file)
@@ -46,6 +46,8 @@ class ArvRuntimeContext(RuntimeContext):
         self.cached_docker_lookups = {}
         self.print_keep_deps = False
         self.git_info = {}
+        self.enable_usage_report = None
+        self.usage_report_notes = []
 
         super(ArvRuntimeContext, self).__init__(kwargs)
 
index 2db6a9bfe2a3de1c6f4036a4795ce88924954036..432b380aabcd90c4c91ff3d7d72a9af29ab52823 100644 (file)
@@ -70,7 +70,7 @@ class RuntimeStatusLoggingHandler(logging.Handler):
             kind = 'error'
         elif record.levelno >= logging.WARNING:
             kind = 'warning'
-        if kind == 'warning' and record.name == "salad":
+        if kind == 'warning' and record.name in ("salad", "crunchstat_summary"):
             # Don't send validation warnings to runtime status,
             # they're noisy and unhelpful.
             return
@@ -146,6 +146,7 @@ class ArvCwlExecutor(object):
         self.stdout = stdout
         self.fast_submit = False
         self.git_info = arvargs.git_info
+        self.debug = False
 
         if keep_client is not None:
             self.keep_client = keep_client
@@ -369,7 +370,8 @@ The 'jobs' API is no longer supported.
                     page = keys[:pageSize]
                     try:
                         proc_states = table.list(filters=[["uuid", "in", page]], select=["uuid", "container_uuid", "state", "log_uuid",
-                                                                                         "output_uuid", "modified_at", "properties"]).execute(num_retries=self.num_retries)
+                                                                                         "output_uuid", "modified_at", "properties",
+                                                                                         "runtime_constraints"]).execute(num_retries=self.num_retries)
                     except Exception as e:
                         logger.warning("Temporary error checking states on API server: %s", e)
                         remain_wait = self.poll_interval
@@ -927,6 +929,11 @@ The 'jobs' API is no longer supported.
         if self.final_output is None:
             raise WorkflowException("Workflow did not return a result.")
 
+        if runtimeContext.usage_report_notes:
+            logger.info("Steps with low resource utilization (possible optimization opportunities):")
+            for x in runtimeContext.usage_report_notes:
+                logger.info("  %s", x)
+
         if runtimeContext.submit and isinstance(tool, Runner):
             logger.info("Final output collection %s", tool.final_output)
             if workbench2 or workbench1:
index c3936617f09aa46e11a6822aa2cb868608d20c53..a78dbfcf2b23c1eb89b17bdd6812d8f42e078a2b 100644 (file)
@@ -12,6 +12,7 @@ SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
 VERSION_PATHS = {
         SETUP_DIR,
         os.path.abspath(os.path.join(SETUP_DIR, "../python")),
+        os.path.abspath(os.path.join(SETUP_DIR, "../../tools/crunchstat-summary")),
         os.path.abspath(os.path.join(SETUP_DIR, "../../build/version-at-commit.sh"))
         }
 
index 1da0d53ce8813d29bb788ce86fb35b96a94646c8..043b52cb814067f573423044a88d34b823f72d20 100644 (file)
@@ -39,19 +39,17 @@ setup(name='arvados-cwl-runner',
           'cwltool==3.1.20230601100705',
           'schema-salad==8.4.20230601112322',
           'arvados-python-client{}'.format(pysdk_dep),
+          'crunchstat-summary{}'.format(pysdk_dep),
           'ciso8601 >= 2.0.0',
           'networkx < 2.6',
           'msgpack==1.0.3',
           'importlib-metadata<5',
           'setuptools>=40.3.0',
-
-          # zipp 3.16 dropped support for Python 3.7
-          'zipp<3.16.0; python_version<"3.8"'
       ],
       data_files=[
           ('share/doc/arvados-cwl-runner', ['LICENSE-2.0.txt', 'README.rst']),
       ],
-      python_requires=">=3.5, <4",
+      python_requires="~=3.8",
       classifiers=[
           'Programming Language :: Python :: 3',
       ],
index 55099afdf766c33cad9a6fee3503fc7173524491..51d64b3f84c8dc4c36e2dd637be9b622a8afaa3c 100755 (executable)
@@ -5,8 +5,10 @@
 
 set -x
 
+cwldir=$(readlink -f $(dirname $0))
+
 if ! which arvbox >/dev/null ; then
-    export PATH=$PATH:$(readlink -f $(dirname $0)/../../tools/arvbox/bin)
+    export PATH=$PATH:$cwldir/../../tools/arvbox/bin
 fi
 
 reset_container=1
@@ -14,7 +16,6 @@ leave_running=0
 config=dev
 devcwl=0
 tag="latest"
-pythoncmd=python3
 suite=conformance
 runapi=containers
 reinstall=0
@@ -51,7 +52,7 @@ while test -n "$1" ; do
             shift
             ;;
         --pythoncmd)
-            pythoncmd=$2
+            echo "warning: --pythoncmd option is no longer supported; ignored" >&2
             shift ; shift
             ;;
         --suite)
@@ -63,7 +64,7 @@ while test -n "$1" ; do
             shift ; shift
             ;;
         -h|--help)
-            echo "$0 [--no-reset-container] [--leave-running] [--config dev|localdemo] [--tag docker_tag] [--build] [--pythoncmd python(2|3)] [--suite (integration|conformance-v1.0|conformance-*)]"
+            echo "$0 [--no-reset-container] [--leave-running] [--config dev|localdemo] [--tag docker_tag] [--build] [--suite (integration|conformance-v1.0|conformance-*)]"
             exit
             ;;
         *)
@@ -92,23 +93,15 @@ arvbox start $config $tag
 # of using the one inside the container, so we can make changes to the
 # integration tests without necessarily having to rebuilding the
 # container image.
-docker cp -L $(readlink -f $(dirname $0)/tests) $ARVBOX_CONTAINER:/usr/src/arvados/sdk/cwl
+docker cp -L $cwldir/tests $ARVBOX_CONTAINER:/usr/src/arvados/sdk/cwl
 
 arvbox pipe <<EOF
 set -eu -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
 
-export PYCMD=$pythoncmd
-
 if test $config = dev -o $reinstall = 1; then
-  cd /usr/src/arvados/sdk/python
-  \$PYCMD setup.py sdist
-  pip_install \$(ls -r dist/arvados-python-client-*.tar.gz | head -n1)
-
-  cd /usr/src/arvados/sdk/cwl
-  \$PYCMD setup.py sdist
-  pip_install \$(ls -r dist/arvados-cwl-runner-*.tar.gz | head -n1)
+  pip_install_sdist sdk/python sdk/cwl
 fi
 
 set -x
@@ -117,11 +110,7 @@ set -x
 # our files are in Keep, all the tests fail.
 # We should add [optional] Arvados support to cwltest so it can access
 # Keep but for the time being just install the last working version.
-if [ "\$PYCMD" = "python3" ]; then
-    pip3 install 'cwltest<2.3.20230527113600'
-else
-    pip install 'cwltest<2.3.20230527113600'
-fi
+/opt/arvados-py/bin/pip install 'cwltest<2.3.20230527113600'
 
 mkdir -p /tmp/cwltest
 cd /tmp/cwltest
@@ -148,7 +137,7 @@ if [[ "$suite" = "conformance-v1.1" ]] ; then
 fi
 
 if [[ "$suite" = "conformance-v1.2" ]] ; then
-   git checkout 1.2.1_proposed
+   git checkout v1.2.1
 fi
 
 #if [[ "$suite" != "integration" ]] ; then
@@ -183,24 +172,23 @@ cwltest --version
 # Skip test 199 in the v1.1 suite because it has different output
 # depending on whether there is a pty associated with stdout (fixed in
 # the v1.2 suite)
-#
-# Skip test 307 in the v1.2 suite because the test relied on
-# secondary file behavior of cwltool that wasn't actually correct to specification
 
 if [[ "$suite" = "integration" ]] ; then
    cd /usr/src/arvados/sdk/cwl/tests
    exec ./arvados-tests.sh $@
 elif [[ "$suite" = "conformance-v1.2" ]] ; then
-   exec cwltest --tool arvados-cwl-runner --test conformance_tests.yaml -Sdocker_entrypoint,timelimit_invalid_wf -N307 $@ -- \$EXTRA
+   exec cwltest --tool arvados-cwl-runner --test conformance_tests.yaml -Sdocker_entrypoint --badgedir /tmp/badges $@ -- \$EXTRA
 elif [[ "$suite" = "conformance-v1.1" ]] ; then
-   exec cwltest --tool arvados-cwl-runner --test conformance_tests.yaml -Sdocker_entrypoint,timelimit_invalid_wf -N199 $@ -- \$EXTRA
+   exec cwltest --tool arvados-cwl-runner --test conformance_tests.yaml -Sdocker_entrypoint,timelimit_invalid_wf -N199 --badgedir /tmp/badges $@ -- \$EXTRA
 elif [[ "$suite" = "conformance-v1.0" ]] ; then
-   exec cwltest --tool arvados-cwl-runner --test v1.0/conformance_test_v1.0.yaml -Sdocker_entrypoint $@ -- \$EXTRA
+   exec cwltest --tool arvados-cwl-runner --test v1.0/conformance_test_v1.0.yaml -Sdocker_entrypoint --badgedir /tmp/badges $@ -- \$EXTRA
 fi
 EOF
 
 CODE=$?
 
+docker cp -L $ARVBOX_CONTAINER:/tmp/badges $cwldir/badges
+
 if test $leave_running = 0 ; then
     arvbox stop
 fi
index 9d6646e875e9c9dc3717bb6d666dc9c05ddb99bd..0cf43405ec0fd09523110be2cee861b26df28ec8 100755 (executable)
@@ -24,7 +24,7 @@ if ! arv-get 20850f01122e860fb878758ac1320877+71 > /dev/null ; then
 fi
 
 # Use the python executor associated with the installed OS package, if present.
-python=$(((ls /usr/share/python3*/dist/python3-arvados-cwl-runner/bin/python || echo python3) | head -n1) 2>/dev/null)
+python="$(PATH="/usr/lib/python3-arvados-cwl-runner/bin:/opt/arvados-py/bin:$PATH" command -v python3)"
 
 # Test for #18888
 # This is a standalone test because the bug was observed with this
index e0bdd8a5a3f6a484e1513071b395206b601a3b9d..cb4a151f0eda9792855ffadc5c4956f8afc7b38e 100644 (file)
   tool: oom/19975-oom.cwl
   doc: "Test feature 19975 - retry on exit 137"
 
+- job: oom/fakeoom.yml
+  output: {}
+  tool: oom/19975-oom-mispelled.cwl
+  doc: "Test feature 19975 - retry on exit 137, old misspelled version"
+
 - job: oom/fakeoom2.yml
   output: {}
   tool: oom/19975-oom.cwl
diff --git a/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt b/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt
new file mode 100644 (file)
index 0000000..e8e79cc
--- /dev/null
@@ -0,0 +1,10 @@
+2018-10-03T18:21:16.944508412Z crunchstat: keepcalls 0 put 0 get -- interval 10.0000 seconds 0 put 0 get
+2018-10-03T18:21:16.944508412Z crunchstat: net:keep0 0 tx 0 rx -- interval 10.0000 seconds 0 tx 0 rx
+2018-10-03T18:21:16.944508412Z crunchstat: keepcache 0 hit 0 miss -- interval 10.0000 seconds 0 hit 0 miss
+2018-10-03T18:21:16.944508412Z crunchstat: fuseops 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
+2018-10-03T18:21:16.944508412Z crunchstat: blkio:0:0 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
+2018-10-03T18:21:26.954764471Z crunchstat: keepcalls 0 put 0 get -- interval 10.0000 seconds 0 put 0 get
+2018-10-03T18:21:26.954764471Z crunchstat: net:keep0 0 tx 0 rx -- interval 10.0000 seconds 0 tx 0 rx
+2018-10-03T18:21:26.954764471Z crunchstat: keepcache 0 hit 0 miss -- interval 10.0000 seconds 0 hit 0 miss
+2018-10-03T18:21:26.954764471Z crunchstat: fuseops 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
+2018-10-03T18:21:26.954764471Z crunchstat: blkio:0:0 0 write 0 read -- interval 10.0000 seconds 0 write 0 read
diff --git a/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt b/sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt
new file mode 100644 (file)
index 0000000..6580843
--- /dev/null
@@ -0,0 +1,17 @@
+2018-10-03T18:21:07.823780191Z notice: reading stats from /sys/fs/cgroup/cpuacct//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cgroup.procs
+2018-10-03T18:21:07.823841282Z notice: monitoring temp dir /tmp/crunch-run.9tee4-dz642-lymtndkpy39eibk.438029160
+2018-10-03T18:21:07.823917514Z notice: reading stats from /sys/fs/cgroup/memory//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/memory.stat
+2018-10-03T18:21:07.824136521Z mem 0 cache 0 swap 0 pgmajfault 1187840 rss
+2018-10-03T18:21:07.824187182Z notice: reading stats from /sys/fs/cgroup/cpuacct//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cpuacct.stat
+2018-10-03T18:21:07.824253726Z notice: reading stats from /sys/fs/cgroup/cpuset//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cpuset.cpus
+2018-10-03T18:21:07.824296720Z cpu 0.0000 user 0.0100 sys 20.00 cpus
+2018-10-03T18:21:07.824361476Z notice: reading stats from /sys/fs/cgroup/blkio//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/blkio.io_service_bytes
+2018-10-03T18:21:07.824551021Z statfs 397741461504 available 4869779456 used 402611240960 total
+2018-10-03T18:21:17.824503045Z mem 172032 cache 0 swap 0 pgmajfault 68247552 rss
+2018-10-03T18:21:17.824702097Z cpu 2.0000 user 0.3800 sys 20.00 cpus -- interval 10.0004 seconds 2.0000 user 0.3700 sys
+2018-10-03T18:21:17.824984621Z net:eth0 51930 tx 844687 rx
+2018-10-03T18:21:17.825021992Z statfs 397740937216 available 4870303744 used 402611240960 total -- interval 10.0005 seconds 524288 used
+2018-10-03T18:21:27.824480114Z mem 172032 cache 0 swap 0 pgmajfault 69525504 rss
+2018-10-03T18:21:27.826909728Z cpu 2.0600 user 0.3900 sys 20.00 cpus -- interval 10.0022 seconds 0.0600 user 0.0100 sys
+2018-10-03T18:21:27.827141860Z net:eth0 55888 tx 859480 rx -- interval 10.0022 seconds 3958 tx 14793 rx
+2018-10-03T18:21:27.827177703Z statfs 397744787456 available 4866453504 used 402611240960 total -- interval 10.0022 seconds -3850240 used
diff --git a/sdk/cwl/tests/oom/19975-oom-mispelled.cwl b/sdk/cwl/tests/oom/19975-oom-mispelled.cwl
new file mode 100644 (file)
index 0000000..bbd26b9
--- /dev/null
@@ -0,0 +1,19 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+cwlVersion: v1.2
+class: CommandLineTool
+$namespaces:
+  arv: "http://arvados.org/cwl#"
+hints:
+  arv:OutOfMemoryRetry:
+    # legacy misspelled name, should behave exactly the same
+    memoryRetryMultipler: 2
+  ResourceRequirement:
+    ramMin: 256
+  arv:APIRequirement: {}
+inputs:
+  fakeoom: File
+outputs: []
+arguments: [python3, $(inputs.fakeoom)]
index ec806487161539522193c04e01eb59f73ab5bfc1..bf3e5cc389172b07bca62658d95943cb55cc755e 100644 (file)
@@ -8,7 +8,7 @@ $namespaces:
   arv: "http://arvados.org/cwl#"
 hints:
   arv:OutOfMemoryRetry:
-    memoryRetryMultipler: 2
+    memoryRetryMultiplier: 2
   ResourceRequirement:
     ramMin: 256
   arv:APIRequirement: {}
index af3271b847cec74c095e764608d1aa3c6a96de07..bbca110b6f59cfc0ecc21f77d5e6b92577a82855 100644 (file)
@@ -8,7 +8,7 @@ $namespaces:
   arv: "http://arvados.org/cwl#"
 hints:
   arv:OutOfMemoryRetry:
-    memoryRetryMultipler: 2
+    memoryRetryMultiplier: 2
     memoryErrorRegex: Whoops
   ResourceRequirement:
     ramMin: 256
index 8e3a8ab85e66e70ff76f4a8357e262ec543b084c..b95b8eb67bbc4d83b357fcb209bccaf6ebf7dca4 100644 (file)
@@ -23,6 +23,7 @@ import cwltool.load_tool
 from cwltool.update import INTERNAL_VERSION
 from schema_salad.ref_resolver import Loader
 from schema_salad.sourceline import cmap
+import io
 
 from .matcher import JsonDiffMatcher, StripYAMLComments
 from .mock_discovery import get_rootDesc
@@ -518,11 +519,47 @@ class TestContainer(unittest.TestCase):
         runner.intermediate_output_ttl = 0
         runner.secret_store = cwltool.secrets.SecretStore()
 
+        runner.api.container_requests().get().execute.return_value = {"container_uuid":"zzzzz-xvhdp-zzzzzzzzzzzzzzz"}
+
         runner.api.containers().get().execute.return_value = {"state":"Complete",
                                                               "output": "abc+123",
                                                               "exit_code": 0}
 
-        col().open.return_value = []
+        # Need to noop-out the close method otherwise it gets
+        # discarded when closed and we can't call getvalue() to check
+        # it.
+        class NoopCloseStringIO(io.StringIO):
+            def close(self):
+                pass
+
+        usage_report = NoopCloseStringIO()
+        def colreader_action(name, mode):
+            nonlocal usage_report
+            if name == "node.json":
+                return io.StringIO("""{
+    "ProviderType": "c5.large",
+    "VCPUs": 2,
+    "RAM": 4294967296,
+    "IncludedScratch": 8000000000000,
+    "AddedScratch": 0,
+    "Price": 0.085,
+    "Preemptible": false,
+    "CUDA": {
+        "DriverVersion": "",
+        "HardwareCapability": "",
+        "DeviceCount": 0
+    }
+}""")
+            if name == 'crunchstat.txt':
+                return open("tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt", "rt")
+            if name == 'arv-mount.txt':
+                return open("tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt", "rt")
+            if name == 'usage_report.html':
+                return usage_report
+            return None
+
+        col().open.side_effect = colreader_action
+        col().__iter__.return_value = ['node.json', 'crunchstat.txt', 'arv-mount.txt']
 
         loadingContext, runtimeContext = self.helper(runner)
 
@@ -550,12 +587,16 @@ class TestContainer(unittest.TestCase):
             "uuid": "zzzzz-xvhdp-zzzzzzzzzzzzzzz",
             "container_uuid": "zzzzz-8i9sb-zzzzzzzzzzzzzzz",
             "modified_at": "2017-05-26T12:01:22Z",
-            "properties": {}
+            "properties": {},
+            "name": "testjob"
         })
 
         self.assertFalse(api.collections().create.called)
         self.assertFalse(runner.runtime_status_error.called)
 
+        # Assert that something was written to the usage report
+        self.assertTrue(len(usage_report.getvalue()) > 0)
+
         arvjob.collect_outputs.assert_called_with("keep:abc+123", 0)
         arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
         runner.add_intermediate_output.assert_called_with("zzzzz-4zz18-zzzzzzzzzzzzzz2")
@@ -651,11 +692,14 @@ class TestContainer(unittest.TestCase):
             "properties": {}
         })
 
-        rts_mock.assert_called_with(
-            'error',
-            'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',
-            '  ** log is empty **'
-        )
+        rts_mock.assert_has_calls([
+            mock.call('error',
+                      'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',
+                      '  ** log is empty **'
+                      ),
+            mock.call('warning',
+                      'arvados.cwl-runner: [container testjob] unable to generate resource usage report'
+        )])
         arvjob.output_callback.assert_called_with({"out": "stuff"}, "permanentFail")
 
     # The test passes no builder.resources
index 0169b947066df7a8e37f2224c5c3e5f5548621f3..f66f670d815d1936d88ee079e842ebadf8094a0f 100644 (file)
@@ -24,7 +24,7 @@ RUN python3 -m pip install --no-cache-dir setuptools wheel
 # packages to install.
 COPY . /usr/local/src/
 # Run a-c-r afterward to check for a successful install.
-RUN python3 -m pip install --no-cache-dir /usr/local/src/* && arvados-cwl-runner --version
+RUN python3 -m pip install --no-cache-dir /usr/local/src/* && arvados-cwl-runner --version && crunchstat-summary --version
 
 RUN /usr/sbin/adduser --disabled-password \
       --gecos 'Crunch execution user' crunch && \
index a6b240e2149d5892d3fc2a2dfdbb3d3b22feb857..e7310818f7d745b55fda3ceed59bf2438bfab9e1 100644 (file)
@@ -242,8 +242,9 @@ type LogoutOptions struct {
 }
 
 type BlockReadOptions struct {
-       Locator string
-       WriteTo io.Writer
+       Locator      string
+       WriteTo      io.Writer
+       LocalLocator func(string)
 }
 
 type BlockWriteOptions struct {
@@ -258,8 +259,9 @@ type BlockWriteOptions struct {
 }
 
 type BlockWriteResponse struct {
-       Locator  string
-       Replicas int
+       Locator        string
+       Replicas       int
+       StorageClasses map[string]int
 }
 
 type WebDAVOptions struct {
index 991de1caa90c4e2e98b96015415ebecb44525af8..7bc3d5bc420404559939247b04cb4b7849c620d6 100644 (file)
@@ -307,6 +307,11 @@ func (c *Client) Do(req *http.Request) (*http.Response, error) {
                if c.Timeout == 0 {
                        return false, nil
                }
+               // This check can be removed when
+               // https://github.com/hashicorp/go-retryablehttp/pull/210
+               // (or equivalent) is merged and we update go.mod.
+               // Until then, it is needed to pass
+               // TestNonRetryableStdlibError.
                if respErr != nil && reqErrorRe.MatchString(respErr.Error()) {
                        return false, nil
                }
index 2bd7996b59c0260caf1d61560316c3bc42e09357..d97a2d1fcd2096a7f44983bbc7349ce11c24d307 100644 (file)
@@ -44,6 +44,9 @@ var (
        DefaultProxyTLSHandshakeTimeout = 10 * time.Second
        DefaultProxyKeepAlive           = 120 * time.Second
 
+       DefaultRetryDelay = 2 * time.Second // see KeepClient.RetryDelay
+       MinimumRetryDelay = time.Millisecond
+
        rootCacheDir = "/var/cache/arvados/keep"
        userCacheDir = ".cache/arvados/keep" // relative to HOME
 )
@@ -75,6 +78,8 @@ type ErrNotFound struct {
        multipleResponseError
 }
 
+func (*ErrNotFound) HTTPStatus() int { return http.StatusNotFound }
+
 type InsufficientReplicasError struct{ error }
 
 type OversizeBlockError struct{ error }
@@ -105,14 +110,25 @@ const DiskCacheDisabled = arvados.ByteSizeOrPercent(1)
 
 // KeepClient holds information about Arvados and Keep servers.
 type KeepClient struct {
-       Arvados               *arvadosclient.ArvadosClient
-       Want_replicas         int
-       localRoots            map[string]string
-       writableLocalRoots    map[string]string
-       gatewayRoots          map[string]string
-       lock                  sync.RWMutex
-       HTTPClient            HTTPClient
-       Retries               int
+       Arvados            *arvadosclient.ArvadosClient
+       Want_replicas      int
+       localRoots         map[string]string
+       writableLocalRoots map[string]string
+       gatewayRoots       map[string]string
+       lock               sync.RWMutex
+       HTTPClient         HTTPClient
+
+       // Number of times to automatically retry a read/write
+       // operation after a transient failure.
+       Retries int
+
+       // Initial maximum delay for automatic retry. If zero,
+       // DefaultRetryDelay is used.  The delay after attempt N
+       // (0-based) will be a random duration between
+       // MinimumRetryDelay and RetryDelay * 2^N, not to exceed a cap
+       // of RetryDelay * 10.
+       RetryDelay time.Duration
+
        RequestID             string
        StorageClasses        []string
        DefaultStorageClasses []string                  // Set by cluster's exported config
@@ -141,6 +157,7 @@ func (kc *KeepClient) Clone() *KeepClient {
                gatewayRoots:          kc.gatewayRoots,
                HTTPClient:            kc.HTTPClient,
                Retries:               kc.Retries,
+               RetryDelay:            kc.RetryDelay,
                RequestID:             kc.RequestID,
                StorageClasses:        kc.StorageClasses,
                DefaultStorageClasses: kc.DefaultStorageClasses,
@@ -269,6 +286,7 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
 
        var errs []string
 
+       delay := delayCalculator{InitialMaxDelay: kc.RetryDelay}
        triesRemaining := 1 + kc.Retries
 
        serversToTry := kc.getSortedRoots(locator)
@@ -348,6 +366,9 @@ func (kc *KeepClient) getOrHead(method string, locator string, header http.Heade
                        return nil, expectLength, url, resp.Header, nil
                }
                serversToTry = retryList
+               if len(serversToTry) > 0 && triesRemaining > 0 {
+                       time.Sleep(delay.Next())
+               }
        }
        DebugPrintf("DEBUG: %s %s failed: %v", method, locator, errs)
 
index fe133fe2cbd08f69abdf74efcc93838943d2f2f5..531db31b25cf6cb3d0e20b938358f155922b7433 100644 (file)
@@ -17,6 +17,7 @@ import (
        "os"
        "strings"
        "sync"
+       "sync/atomic"
        "testing"
        "time"
 
@@ -26,8 +27,8 @@ import (
        . "gopkg.in/check.v1"
 )
 
-// Gocheck boilerplate
 func Test(t *testing.T) {
+       DefaultRetryDelay = 50 * time.Millisecond
        TestingT(t)
 }
 
@@ -39,7 +40,10 @@ var _ = Suite(&StandaloneSuite{})
 type ServerRequiredSuite struct{}
 
 // Standalone tests
-type StandaloneSuite struct{}
+type StandaloneSuite struct {
+       origDefaultRetryDelay time.Duration
+       origMinimumRetryDelay time.Duration
+}
 
 var origHOME = os.Getenv("HOME")
 
@@ -47,10 +51,14 @@ func (s *StandaloneSuite) SetUpTest(c *C) {
        RefreshServiceDiscovery()
        // Prevent cache state from leaking between test cases
        os.Setenv("HOME", c.MkDir())
+       s.origDefaultRetryDelay = DefaultRetryDelay
+       s.origMinimumRetryDelay = MinimumRetryDelay
 }
 
 func (s *StandaloneSuite) TearDownTest(c *C) {
        os.Setenv("HOME", origHOME)
+       DefaultRetryDelay = s.origDefaultRetryDelay
+       MinimumRetryDelay = s.origMinimumRetryDelay
 }
 
 func pythonDir() string {
@@ -421,17 +429,17 @@ func (fh FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
 }
 
 type FailThenSucceedHandler struct {
+       morefails      int // fail 1 + this many times before succeeding
        handled        chan string
-       count          int
+       count          atomic.Int64
        successhandler http.Handler
        reqIDs         []string
 }
 
 func (fh *FailThenSucceedHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
        fh.reqIDs = append(fh.reqIDs, req.Header.Get("X-Request-Id"))
-       if fh.count == 0 {
+       if int(fh.count.Add(1)) <= fh.morefails+1 {
                resp.WriteHeader(500)
-               fh.count++
                fh.handled <- fmt.Sprintf("http://%s", req.Host)
        } else {
                fh.successhandler.ServeHTTP(resp, req)
@@ -530,7 +538,7 @@ func (s *StandaloneSuite) TestPutB(c *C) {
 }
 
 func (s *StandaloneSuite) TestPutHR(c *C) {
-       hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
+       hash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
 
        st := &StubPutHandler{
                c:                    c,
@@ -560,14 +568,7 @@ func (s *StandaloneSuite) TestPutHR(c *C) {
 
        kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
 
-       reader, writer := io.Pipe()
-
-       go func() {
-               writer.Write([]byte("foo"))
-               writer.Close()
-       }()
-
-       kc.PutHR(hash, reader, 3)
+       kc.PutHR(hash, bytes.NewBuffer([]byte("foo")), 3)
 
        shuff := NewRootSorter(kc.LocalRoots(), hash).GetSortedRoots()
 
@@ -804,40 +805,68 @@ func (s *StandaloneSuite) TestGetFail(c *C) {
 }
 
 func (s *StandaloneSuite) TestGetFailRetry(c *C) {
+       defer func(origDefault, origMinimum time.Duration) {
+               DefaultRetryDelay = origDefault
+               MinimumRetryDelay = origMinimum
+       }(DefaultRetryDelay, MinimumRetryDelay)
+       DefaultRetryDelay = time.Second / 8
+       MinimumRetryDelay = time.Millisecond
+
        hash := fmt.Sprintf("%x+3", md5.Sum([]byte("foo")))
 
-       st := &FailThenSucceedHandler{
-               handled: make(chan string, 1),
-               successhandler: StubGetHandler{
-                       c,
-                       hash,
-                       "abc123",
-                       http.StatusOK,
-                       []byte("foo")}}
+       for _, delay := range []time.Duration{0, time.Nanosecond, time.Second / 8, time.Second / 16} {
+               c.Logf("=== initial delay %v", delay)
 
-       ks := RunFakeKeepServer(st)
-       defer ks.listener.Close()
+               st := &FailThenSucceedHandler{
+                       morefails: 2,
+                       handled:   make(chan string, 4),
+                       successhandler: StubGetHandler{
+                               c,
+                               hash,
+                               "abc123",
+                               http.StatusOK,
+                               []byte("foo")}}
 
-       arv, err := arvadosclient.MakeArvadosClient()
-       c.Check(err, IsNil)
-       kc, _ := MakeKeepClient(arv)
-       arv.ApiToken = "abc123"
-       kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+               ks := RunFakeKeepServer(st)
+               defer ks.listener.Close()
 
-       r, n, _, err := kc.Get(hash)
-       c.Assert(err, IsNil)
-       c.Check(n, Equals, int64(3))
+               arv, err := arvadosclient.MakeArvadosClient()
+               c.Check(err, IsNil)
+               kc, _ := MakeKeepClient(arv)
+               arv.ApiToken = "abc123"
+               kc.SetServiceRoots(map[string]string{"x": ks.url}, nil, nil)
+               kc.Retries = 3
+               kc.RetryDelay = delay
+               kc.DiskCacheSize = DiskCacheDisabled
 
-       content, err := ioutil.ReadAll(r)
-       c.Check(err, IsNil)
-       c.Check(content, DeepEquals, []byte("foo"))
-       c.Check(r.Close(), IsNil)
+               t0 := time.Now()
+               r, n, _, err := kc.Get(hash)
+               c.Assert(err, IsNil)
+               c.Check(n, Equals, int64(3))
+               elapsed := time.Since(t0)
 
-       c.Logf("%q", st.reqIDs)
-       c.Assert(len(st.reqIDs) > 1, Equals, true)
-       for _, reqid := range st.reqIDs {
-               c.Check(reqid, Not(Equals), "")
-               c.Check(reqid, Equals, st.reqIDs[0])
+               nonsleeptime := time.Second / 10
+               expect := kc.RetryDelay
+               if expect == 0 {
+                       expect = DefaultRetryDelay
+               }
+               min := MinimumRetryDelay * 3
+               max := expect + expect*2 + expect*2*2 + nonsleeptime
+               c.Check(elapsed >= min, Equals, true, Commentf("elapsed %v / expect min %v", elapsed, min))
+               c.Check(elapsed <= max, Equals, true, Commentf("elapsed %v / expect max %v", elapsed, max))
+
+               content, err := ioutil.ReadAll(r)
+               c.Check(err, IsNil)
+               c.Check(content, DeepEquals, []byte("foo"))
+               c.Check(r.Close(), IsNil)
+
+               c.Logf("%q", st.reqIDs)
+               if c.Check(st.reqIDs, Not(HasLen), 0) {
+                       for _, reqid := range st.reqIDs {
+                               c.Check(reqid, Not(Equals), "")
+                               c.Check(reqid, Equals, st.reqIDs[0])
+                       }
+               }
        }
 }
 
@@ -1484,42 +1513,65 @@ func (s *StandaloneSuite) TestGetIndexWithNoSuchPrefix(c *C) {
 }
 
 func (s *StandaloneSuite) TestPutBRetry(c *C) {
-       st := &FailThenSucceedHandler{
-               handled: make(chan string, 1),
-               successhandler: &StubPutHandler{
-                       c:                    c,
-                       expectPath:           Md5String("foo"),
-                       expectAPIToken:       "abc123",
-                       expectBody:           "foo",
-                       expectStorageClass:   "default",
-                       returnStorageClasses: "",
-                       handled:              make(chan string, 5),
-               },
-       }
+       DefaultRetryDelay = time.Second / 8
+       MinimumRetryDelay = time.Millisecond
+
+       for _, delay := range []time.Duration{0, time.Nanosecond, time.Second / 8, time.Second / 16} {
+               c.Logf("=== initial delay %v", delay)
+
+               st := &FailThenSucceedHandler{
+                       morefails: 5, // handler will fail 6x in total, 3 for each server
+                       handled:   make(chan string, 10),
+                       successhandler: &StubPutHandler{
+                               c:                    c,
+                               expectPath:           Md5String("foo"),
+                               expectAPIToken:       "abc123",
+                               expectBody:           "foo",
+                               expectStorageClass:   "default",
+                               returnStorageClasses: "",
+                               handled:              make(chan string, 5),
+                       },
+               }
 
-       arv, _ := arvadosclient.MakeArvadosClient()
-       kc, _ := MakeKeepClient(arv)
+               arv, _ := arvadosclient.MakeArvadosClient()
+               kc, _ := MakeKeepClient(arv)
+               kc.Retries = 3
+               kc.RetryDelay = delay
+               kc.DiskCacheSize = DiskCacheDisabled
+               kc.Want_replicas = 2
 
-       kc.Want_replicas = 2
-       arv.ApiToken = "abc123"
-       localRoots := make(map[string]string)
-       writableLocalRoots := make(map[string]string)
+               arv.ApiToken = "abc123"
+               localRoots := make(map[string]string)
+               writableLocalRoots := make(map[string]string)
 
-       ks := RunSomeFakeKeepServers(st, 2)
+               ks := RunSomeFakeKeepServers(st, 2)
 
-       for i, k := range ks {
-               localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
-               writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
-               defer k.listener.Close()
-       }
+               for i, k := range ks {
+                       localRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+                       writableLocalRoots[fmt.Sprintf("zzzzz-bi6l4-fakefakefake%03d", i)] = k.url
+                       defer k.listener.Close()
+               }
 
-       kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
+               kc.SetServiceRoots(localRoots, writableLocalRoots, nil)
 
-       hash, replicas, err := kc.PutB([]byte("foo"))
+               t0 := time.Now()
+               hash, replicas, err := kc.PutB([]byte("foo"))
 
-       c.Check(err, IsNil)
-       c.Check(hash, Equals, "")
-       c.Check(replicas, Equals, 2)
+               c.Check(err, IsNil)
+               c.Check(hash, Equals, "")
+               c.Check(replicas, Equals, 2)
+               elapsed := time.Since(t0)
+
+               nonsleeptime := time.Second / 10
+               expect := kc.RetryDelay
+               if expect == 0 {
+                       expect = DefaultRetryDelay
+               }
+               min := MinimumRetryDelay * 3
+               max := expect + expect*2 + expect*2*2
+               max += nonsleeptime
+               checkInterval(c, elapsed, min, max)
+       }
 }
 
 func (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {
@@ -1567,3 +1619,60 @@ func (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {
        c.Assert(kc.foundNonDiskSvc, Equals, true)
        c.Assert(kc.httpClient().(*http.Client).Timeout, Equals, 300*time.Second)
 }
+
+func (s *StandaloneSuite) TestDelayCalculator_Default(c *C) {
+       MinimumRetryDelay = time.Second / 2
+       DefaultRetryDelay = time.Second
+
+       dc := delayCalculator{InitialMaxDelay: 0}
+       checkInterval(c, dc.Next(), time.Second/2, time.Second)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*2)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*4)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*8)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*10)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*10)
+}
+
+func (s *StandaloneSuite) TestDelayCalculator_SetInitial(c *C) {
+       MinimumRetryDelay = time.Second / 2
+       DefaultRetryDelay = time.Second
+
+       dc := delayCalculator{InitialMaxDelay: time.Second * 2}
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*2)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*4)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*8)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*16)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*20)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*20)
+       checkInterval(c, dc.Next(), time.Second/2, time.Second*20)
+}
+
+func (s *StandaloneSuite) TestDelayCalculator_EnsureSomeLongDelays(c *C) {
+       dc := delayCalculator{InitialMaxDelay: time.Second * 5}
+       var d time.Duration
+       n := 4000
+       for i := 0; i < n; i++ {
+               if i < 20 || i%10 == 0 {
+                       c.Logf("i=%d, delay=%v", i, d)
+               }
+               if d = dc.Next(); d > dc.InitialMaxDelay*9 {
+                       return
+               }
+       }
+       c.Errorf("after %d trials, never got a delay more than 90%% of expected max %d; last was %v", n, dc.InitialMaxDelay*10, d)
+}
+
+// If InitialMaxDelay is less than MinimumRetryDelay/10, then delay is
+// always MinimumRetryDelay.
+func (s *StandaloneSuite) TestDelayCalculator_InitialLessThanMinimum(c *C) {
+       MinimumRetryDelay = time.Second / 2
+       dc := delayCalculator{InitialMaxDelay: time.Millisecond}
+       for i := 0; i < 20; i++ {
+               c.Check(dc.Next(), Equals, time.Second/2)
+       }
+}
+
+func checkInterval(c *C, t, min, max time.Duration) {
+       c.Check(t >= min, Equals, true, Commentf("got %v which is below expected min %v", t, min))
+       c.Check(t <= max, Equals, true, Commentf("got %v which is above expected max %v", t, max))
+}
index 6acaf64baa34caa265eb10bafcbf36a70d308d9b..d3d799dc5dc2c229d8303d215328fea577c61c10 100644 (file)
@@ -13,10 +13,12 @@ import (
        "io"
        "io/ioutil"
        "log"
+       "math/rand"
        "net/http"
        "os"
        "strconv"
        "strings"
+       "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadosclient"
@@ -218,6 +220,7 @@ func (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWrite
                replicasPerThread = req.Replicas
        }
 
+       delay := delayCalculator{InitialMaxDelay: kc.RetryDelay}
        retriesRemaining := req.Attempts
        var retryServers []string
 
@@ -306,14 +309,17 @@ func (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWrite
                        }
 
                        if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
-                               (status.statusCode >= 500 && status.statusCode != 503) {
+                               (status.statusCode >= 500 && status.statusCode != http.StatusInsufficientStorage) {
                                // Timeout, too many requests, or other server side failure
-                               // Do not retry when status code is 503, which means the keep server is full
+                               // (do not auto-retry status 507 "full")
                                retryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, "/")])
                        }
                }
 
                sv = retryServers
+               if len(sv) > 0 {
+                       time.Sleep(delay.Next())
+               }
        }
 
        return resp, nil
@@ -345,3 +351,37 @@ func parseStorageClassesConfirmedHeader(hdr string) (map[string]int, error) {
        }
        return classesStored, nil
 }
+
+// delayCalculator calculates a series of delays for implementing
+// exponential backoff with jitter.  The first call to Next() returns
+// a random duration between MinimumRetryDelay and the specified
+// InitialMaxDelay (or DefaultRetryDelay if 0).  The max delay is
+// doubled on each subsequent call to Next(), up to 10x the initial
+// max delay.
+type delayCalculator struct {
+       InitialMaxDelay time.Duration
+       n               int // number of delays returned so far
+       nextmax         time.Duration
+       limit           time.Duration
+}
+
+func (dc *delayCalculator) Next() time.Duration {
+       if dc.nextmax <= MinimumRetryDelay {
+               // initialize
+               if dc.InitialMaxDelay > 0 {
+                       dc.nextmax = dc.InitialMaxDelay
+               } else {
+                       dc.nextmax = DefaultRetryDelay
+               }
+               dc.limit = 10 * dc.nextmax
+       }
+       d := time.Duration(rand.Float64() * float64(dc.nextmax))
+       if d < MinimumRetryDelay {
+               d = MinimumRetryDelay
+       }
+       dc.nextmax *= 2
+       if dc.nextmax > dc.limit {
+               dc.nextmax = dc.limit
+       }
+       return d
+}
index ab03d34f19b1e0d1e8714edf3dfca186b2efbc12..4bd59a75d7e0084e4429af13d054bb13d37db67a 100644 (file)
@@ -27,7 +27,7 @@ import java.util.Map;
 
 public abstract class BaseStandardApiClient<T extends Item, L extends ItemList> extends BaseApiClient {
 
-    private static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());
+    protected static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());
     private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseStandardApiClient.class);
 
     BaseStandardApiClient(ConfigProvider config) {
@@ -107,7 +107,7 @@ public abstract class BaseStandardApiClient<T extends Item, L extends ItemList>
         return MAPPER.readValue(content, cls);
     }
 
-    private <TL> String mapToJson(TL type) {
+    protected  <TL> String mapToJson(TL type) {
         ObjectWriter writer = MAPPER.writer().withDefaultPrettyPrinter();
         try {
             return writer.writeValueAsString(type);
index 141f02deba38e6227e0c6b24ef881fd5cdae422a..581253f53cd2cf1fbb2d6b96aa9f7e616cac1fcb 100644 (file)
@@ -9,12 +9,18 @@ package org.arvados.client.api.client;
 
 import org.arvados.client.api.model.Collection;
 import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.api.model.CollectionReplaceFiles;
 import org.arvados.client.config.ConfigProvider;
 import org.slf4j.Logger;
 
+import okhttp3.HttpUrl;
+import okhttp3.Request;
+import okhttp3.RequestBody;
+
 public class CollectionsApiClient extends BaseStandardApiClient<Collection, CollectionList> {
 
     private static final String RESOURCE = "collections";
+
     private final Logger log = org.slf4j.LoggerFactory.getLogger(CollectionsApiClient.class);
 
     public CollectionsApiClient(ConfigProvider config) {
@@ -28,6 +34,14 @@ public class CollectionsApiClient extends BaseStandardApiClient<Collection, Coll
         return newCollection;
     }
 
+    public Collection update(String collectionUUID, CollectionReplaceFiles replaceFilesRequest) {
+        String json = mapToJson(replaceFilesRequest);
+        RequestBody body = RequestBody.create(JSON, json);
+        HttpUrl url = getUrlBuilder().addPathSegment(collectionUUID).build();
+        Request request = getRequestBuilder().put(body).url(url).build();
+        return callForType(request);
+    }
+
     @Override
     String getResource() {
         return RESOURCE;
index 2c3168649ff70b734eccb57b627a83967f3cf94e..ad37dad2bbda5e88296c52d5905701d4bd34cbff 100644 (file)
@@ -10,9 +10,13 @@ package org.arvados.client.api.client;
 import okhttp3.HttpUrl;
 import okhttp3.Request;
 import okhttp3.RequestBody;
+import okhttp3.Response;
+import okhttp3.ResponseBody;
+
 import org.arvados.client.config.ConfigProvider;
 
 import java.io.File;
+import java.io.IOException;
 import java.io.InputStream;
 
 public class KeepWebApiClient extends BaseApiClient {
@@ -30,6 +34,27 @@ public class KeepWebApiClient extends BaseApiClient {
         return newFileCall(request);
     }
 
+    public InputStream get(String collectionUuid, String filePathName, long start, Long end) throws IOException {
+        Request.Builder builder = this.getRequestBuilder();
+        String rangeValue = "bytes=" + start + "-";
+        if (end != null) {
+            rangeValue += end;
+        }
+        builder.addHeader("Range", rangeValue);
+        Request request = builder.url(this.getUrlBuilder(collectionUuid, filePathName).build()).get().build();
+        Response response = client.newCall(request).execute();
+        if (!response.isSuccessful()) {
+            response.close();
+            throw new IOException("Failed to download file: " + response);
+        }
+        ResponseBody body = response.body();
+        if (body == null) {
+            response.close();
+            throw new IOException("Response body is null for request: " + request);
+        }
+        return body.byteStream();
+    }
+
     public String delete(String collectionUuid, String filePathName) {
         Request request = getRequestBuilder()
                 .url(getUrlBuilder(collectionUuid, filePathName).build())
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java
new file mode 100644 (file)
index 0000000..2ef19ce
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.HashMap;
+import java.util.Map;
+
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class CollectionReplaceFiles {
+
+    @JsonProperty("collection")
+    private CollectionOptions collectionOptions;
+
+    @JsonProperty("replace_files")
+    private Map<String, String> replaceFiles;
+
+    public CollectionReplaceFiles() {
+        this.collectionOptions = new CollectionOptions();
+        this.replaceFiles = new HashMap<>();
+    }
+
+    public void addFileReplacement(String targetPath, String sourcePath) {
+        this.replaceFiles.put(targetPath, sourcePath);
+    }
+
+    @JsonInclude(JsonInclude.Include.NON_NULL)
+    @JsonIgnoreProperties(ignoreUnknown = true)
+    public static class CollectionOptions {
+        @JsonProperty("preserve_version")
+        private boolean preserveVersion;
+
+        public CollectionOptions() {
+            this.preserveVersion = true;
+        }
+
+        public boolean isPreserveVersion() {
+            return preserveVersion;
+        }
+
+        public void setPreserveVersion(boolean preserveVersion) {
+            this.preserveVersion = preserveVersion;
+        }
+    }
+
+    public CollectionOptions getCollectionOptions() {
+        return collectionOptions;
+    }
+
+    public void setCollectionOptions(CollectionOptions collectionOptions) {
+        this.collectionOptions = collectionOptions;
+    }
+
+    public Map<String, String> getReplaceFiles() {
+        return replaceFiles;
+    }
+
+    public void setReplaceFiles(Map<String, String> replaceFiles) {
+        this.replaceFiles = replaceFiles;
+    }
+}
\ No newline at end of file
index 571cb2590906f9d041a342dbf26d95724184e3b0..8b65cebc59a0d20d0c9ba1b6add34aaf65e2a584 100644 (file)
@@ -28,6 +28,7 @@ import java.io.File;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 
 public class ArvadosFacade {
 
@@ -201,6 +202,21 @@ public class ArvadosFacade {
         return collectionsApiClient.create(collection);
     }
 
+    /**
+     * Uploads multiple files to an existing collection.
+     *
+     * @param collectionUUID UUID of collection to which the files are to be copied
+     * @param files          map of files to be copied to existing collection.
+     *                       The map consists of a pair in the form of a filename and a filename
+     *                       along with the Portable data hash
+     * @return collection object mapped from JSON that is returned from server after successful copied
+     */
+    public Collection updateWithReplaceFiles(String collectionUUID, Map<String, String> files) {
+        CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();
+        replaceFilesRequest.getReplaceFiles().putAll(files);
+        return collectionsApiClient.update(collectionUUID, replaceFilesRequest);
+    }
+
     /**
      * Returns current user information based on Api Token provided via configuration
      *
index c1e8849e39f625128133bea1d8376e01e005ca54..5bfcabc10984bdb55a20bf130a2be0c88d819254 100644 (file)
@@ -23,6 +23,8 @@ import org.slf4j.Logger;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
@@ -70,6 +72,37 @@ public class FileDownloader {
         return downloadedFile;
     }
 
+    public File downloadFileWithResume(String collectionUuid, String fileName, String pathToDownloadFolder, long start, Long end) throws IOException {
+        if (end != null && end < start) {
+            throw new IllegalArgumentException("End index must be greater than or equal to the start index");
+        }
+
+        File destinationFile = new File(pathToDownloadFolder, fileName);
+
+        if (!destinationFile.exists()) {
+            boolean isCreated = destinationFile.createNewFile();
+            if (!isCreated) {
+                throw new IOException("Failed to create new file: " + destinationFile.getAbsolutePath());
+            }
+        }
+
+        try (RandomAccessFile outputFile = new RandomAccessFile(destinationFile, "rw");
+             InputStream inputStream = keepWebApiClient.get(collectionUuid, fileName, start, end)) {
+            outputFile.seek(start);
+
+            long remaining = (end == null) ? Long.MAX_VALUE : end - start + 1;
+            byte[] buffer = new byte[4096];
+            int bytesRead;
+            while ((bytesRead = inputStream.read(buffer)) != -1 && remaining > 0) {
+                int bytesToWrite = (int) Math.min(bytesRead, remaining);
+                outputFile.write(buffer, 0, bytesToWrite);
+                remaining -= bytesToWrite;
+            }
+        }
+
+        return destinationFile;
+    }
+
     public List<File> downloadFilesFromCollectionUsingKeepWeb(String collectionUuid, String pathToDownloadFolder) {
         String collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder).getAbsolutePath();
         List<FileToken> fileTokens = listFileInfoFromCollection(collectionUuid);
index 8da3bfbf514b04c6f188bb0f5e1185d42c8002d9..94a79041a0f135bb997ec9b3704d29eb58811bfd 100644 (file)
@@ -7,21 +7,39 @@
 
 package org.arvados.client.api.client;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
 import okhttp3.mockwebserver.RecordedRequest;
 import org.arvados.client.api.model.Collection;
 import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.api.model.CollectionReplaceFiles;
 import org.arvados.client.test.utils.RequestMethod;
 import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Before;
 import org.junit.Test;
 
 import static org.arvados.client.test.utils.ApiClientTestUtils.*;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
 
 public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
 
     private static final String RESOURCE = "collections";
-
-    private CollectionsApiClient client = new CollectionsApiClient(CONFIG);
+    private static final String TEST_COLLECTION_NAME = "Super Collection";
+    private static final String TEST_COLLECTION_UUID = "test-collection-uuid";
+    private ObjectMapper objectMapper;
+    private CollectionsApiClient client;
+
+    @Before
+    public void setUp() {
+        objectMapper = new ObjectMapper();
+        objectMapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
+        client = new CollectionsApiClient(CONFIG);
+    }
 
     @Test
     public void listCollections() throws Exception {
@@ -66,7 +84,7 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
         // given
         server.enqueue(getResponse("collections-create-simple"));
 
-        String name = "Super Collection";
+        String name = TEST_COLLECTION_NAME;
         
         Collection collection = new Collection();
         collection.setName(name);
@@ -90,7 +108,7 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
         // given
         server.enqueue(getResponse("collections-create-manifest"));
 
-        String name = "Super Collection";
+        String name = TEST_COLLECTION_NAME;
         String manifestText = ". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\n";
         
         Collection collection = new Collection();
@@ -109,4 +127,45 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
         assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
         assertThat(actual.getManifestText()).isEqualTo(manifestText);
     }
+
+    @Test
+    public void testUpdateWithReplaceFiles() throws IOException, InterruptedException {
+        // given
+        server.enqueue(getResponse("collections-create-manifest"));
+
+        Map<String, String> files = new HashMap<>();
+        files.put("targetPath1", "sourcePath1");
+        files.put("targetPath2", "sourcePath2");
+
+        CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();
+        replaceFilesRequest.setReplaceFiles(files);
+
+        // when
+        Collection actual = client.update(TEST_COLLECTION_UUID, replaceFilesRequest);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, "collections/test-collection-uuid");
+        assertRequestMethod(request, RequestMethod.PUT);
+        assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
+
+        String actualRequestBody = request.getBody().readUtf8();
+        Map<String, Object> actualRequestMap = objectMapper.readValue(actualRequestBody, Map.class);
+
+        Map<String, Object> expectedRequestMap = new HashMap<>();
+        Map<String, Object> collectionOptionsMap = new HashMap<>();
+        collectionOptionsMap.put("preserve_version", true);
+
+        Map<String, String> replaceFilesMap = new HashMap<>();
+        replaceFilesMap.put("targetPath1", "sourcePath1");
+        replaceFilesMap.put("targetPath2", "sourcePath2");
+
+        expectedRequestMap.put("collection", collectionOptionsMap);
+        expectedRequestMap.put("replace_files", replaceFilesMap);
+
+        String expectedJson = objectMapper.writeValueAsString(expectedRequestMap);
+        String actualJson = objectMapper.writeValueAsString(actualRequestMap);
+        assertEquals(expectedJson, actualJson);
+    }
 }
index 07b7b2533991a1a32e3c0b7a5c6587b9ed07dec2..9b6b4fa17fe094f55935004414bcf9ddbb5d75d7 100644 (file)
@@ -10,15 +10,23 @@ package org.arvados.client.api.client;
 import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
 import org.junit.Test;
 
+import java.io.ByteArrayOutputStream;
 import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
 import java.nio.file.Files;
 
+import okhttp3.mockwebserver.MockResponse;
+import okio.Buffer;
+
 import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertNotNull;
 
 public class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest {
 
-    private KeepWebApiClient client = new KeepWebApiClient(CONFIG);
+    private final KeepWebApiClient client = new KeepWebApiClient(CONFIG);
 
     @Test
     public void uploadFile() throws Exception {
@@ -36,4 +44,38 @@ public class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest {
         assertThat(uploadResponse).isEqualTo("Created");
     }
 
+    @Test
+    public void downloadPartialIsPerformedSuccessfully() throws Exception {
+        // given
+        String collectionUuid = "some-collection-uuid";
+        String filePathName = "sample-file-path";
+        long start = 1024;
+        Long end = null;
+
+        byte[] expectedData = "test data".getBytes();
+
+        try (Buffer buffer = new Buffer().write(expectedData)) {
+            server.enqueue(new MockResponse().setBody(buffer));
+
+            // when
+            InputStream inputStream = client.get(collectionUuid, filePathName, start, end);
+            byte[] actualData = inputStreamToByteArray(inputStream);
+
+            // then
+            assertNotNull(actualData);
+            assertArrayEquals(expectedData, actualData);
+        }
+    }
+
+    private byte[] inputStreamToByteArray(InputStream inputStream) throws IOException {
+        ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+        int nRead;
+        byte[] data = new byte[1024];
+        while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
+            buffer.write(data, 0, nRead);
+        }
+        buffer.flush();
+        return buffer.toByteArray();
+    }
+
 }
index 0fb1f0206c5afad8aa6717e193568fc25a1453ea..741f80f7c99bee94e996470d62d7d09585076eb7 100644 (file)
@@ -19,7 +19,6 @@ import org.arvados.client.test.utils.FileTestUtils;
 import org.arvados.client.utils.FileMerge;
 import org.apache.commons.io.FileUtils;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -27,8 +26,11 @@ import org.mockito.InjectMocks;
 import org.mockito.Mock;
 import org.mockito.junit.MockitoJUnitRunner;
 
+import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -36,6 +38,10 @@ import java.util.UUID;
 
 import static org.arvados.client.test.utils.FileTestUtils.*;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.when;
 
 @RunWith(MockitoJUnitRunner.class)
@@ -80,17 +86,17 @@ public class FileDownloaderTest {
         List<File> downloadedFiles = fileDownloader.downloadFilesFromCollection(collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
 
         //then
-        Assert.assertEquals(3, downloadedFiles.size()); // 3 files downloaded
+        assertEquals(3, downloadedFiles.size()); // 3 files downloaded
 
         File collectionDir = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionToDownload.getUuid());
-        Assert.assertTrue(collectionDir.exists()); // collection directory created
+        assertTrue(collectionDir.exists()); // collection directory created
 
         // 3 files correctly saved
         assertThat(downloadedFiles).allMatch(File::exists);
 
         for(int i = 0; i < downloadedFiles.size(); i ++) {
             File downloaded = new File(collectionDir + Characters.SLASH + files.get(i).getName());
-            Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloaded), FileUtils.readFileToByteArray(files.get(i)));
+            assertArrayEquals(FileUtils.readFileToByteArray(downloaded), FileUtils.readFileToByteArray(files.get(i)));
         }
     }
 
@@ -108,9 +114,32 @@ public class FileDownloaderTest {
         File downloadedFile = fileDownloader.downloadSingleFileUsingKeepWeb(file.getName(), collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
 
         //then
-        Assert.assertTrue(downloadedFile.exists());
-        Assert.assertEquals(file.getName(), downloadedFile.getName());
-        Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));
+        assertTrue(downloadedFile.exists());
+        assertEquals(file.getName(), downloadedFile.getName());
+        assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));
+    }
+
+    @Test
+    public void testDownloadFileWithResume() throws Exception {
+        //given
+        String collectionUuid = "some-collection-uuid";
+        String expectedDataString = "testData";
+        String fileName = "sample-file-name";
+        long start = 0;
+        Long end = null;
+
+        InputStream inputStream = new ByteArrayInputStream(expectedDataString.getBytes());
+
+        when(keepWebApiClient.get(collectionUuid, fileName, start, end)).thenReturn(inputStream);
+
+        //when
+        File downloadedFile = fileDownloader.downloadFileWithResume(collectionUuid, fileName, FILE_DOWNLOAD_TEST_DIR, start, end);
+
+        //then
+        assertNotNull(downloadedFile);
+        assertTrue(downloadedFile.exists());
+        String actualDataString = Files.readString(downloadedFile.toPath());
+        assertEquals("The content of the file does not match the expected data.", expectedDataString, actualDataString);
     }
 
     @After
index 17454b7d17394dbdc5a7dce36ac82c585fee2d54..6c792b2e0d54d7f1e25ffa9850723b4dc9289cc0 100644 (file)
@@ -4,12 +4,21 @@
 
 import argparse
 import errno
-import os
+import json
 import logging
+import os
+import re
 import signal
-from future.utils import listitems, listvalues
 import sys
 
+FILTER_STR_RE = re.compile(r'''
+^\(
+\ *(\w+)
+\ *(<|<=|=|>=|>)
+\ *(\w+)
+\ *\)$
+''', re.ASCII | re.VERBOSE)
+
 def _pos_int(s):
     num = int(s)
     if num < 0:
@@ -61,5 +70,89 @@ def install_signal_handlers():
                             for sigcode in CAUGHT_SIGNALS}
 
 def restore_signal_handlers():
-    for sigcode, orig_handler in listitems(orig_signal_handlers):
+    for sigcode, orig_handler in orig_signal_handlers.items():
         signal.signal(sigcode, orig_handler)
+
+def validate_filters(filters):
+    """Validate user-provided filters
+
+    This function validates that a user-defined object represents valid
+    Arvados filters that can be passed to an API client: that it's a list of
+    3-element lists with the field name and operator given as strings. If any
+    of these conditions are not true, it raises a ValueError with details about
+    the problem.
+
+    It returns validated filters. Currently the provided filters are returned
+    unmodified. Future versions of this function may clean up the filters with
+    "obvious" type conversions, so callers SHOULD use the returned value for
+    Arvados API calls.
+    """
+    if not isinstance(filters, list):
+        raise ValueError(f"filters are not a list: {filters!r}")
+    for index, f in enumerate(filters):
+        if isinstance(f, str):
+            match = FILTER_STR_RE.fullmatch(f)
+            if match is None:
+                raise ValueError(f"filter at index {index} has invalid syntax: {f!r}")
+            s, op, o = match.groups()
+            if s[0].isdigit():
+                raise ValueError(f"filter at index {index} has invalid syntax: bad field name {s!r}")
+            if o[0].isdigit():
+                raise ValueError(f"filter at index {index} has invalid syntax: bad field name {o!r}")
+            continue
+        elif not isinstance(f, list):
+            raise ValueError(f"filter at index {index} is not a string or list: {f!r}")
+        try:
+            s, op, o = f
+        except ValueError:
+            raise ValueError(
+                f"filter at index {index} does not have three items (field name, operator, operand): {f!r}",
+            ) from None
+        if not isinstance(s, str):
+            raise ValueError(f"filter at index {index} field name is not a string: {s!r}")
+        if not isinstance(op, str):
+            raise ValueError(f"filter at index {index} operator is not a string: {op!r}")
+    return filters
+
+
+class JSONArgument:
+    """Parse a JSON file from a command line argument string or path
+
+    JSONArgument objects can be called with a string and return an arbitrary
+    object. First it will try to decode the string as JSON. If that fails, it
+    will try to open a file at the path named by the string, and decode it as
+    JSON. If that fails, it raises ValueError with more detail.
+
+    This is designed to be used as an argparse argument type.
+    Typical usage looks like:
+
+        parser = argparse.ArgumentParser()
+        parser.add_argument('--object', type=JSONArgument(), ...)
+
+    You can construct JSONArgument with an optional validation function. If
+    given, it is called with the object decoded from user input, and its
+    return value replaces it. It should raise ValueError if there is a problem
+    with the input. (argparse turns ValueError into a useful error message.)
+
+        filters_type = JSONArgument(validate_filters)
+        parser.add_argument('--filters', type=filters_type, ...)
+    """
+    def __init__(self, validator=None):
+        self.validator = validator
+
+    def __call__(self, value):
+        try:
+            retval = json.loads(value)
+        except json.JSONDecodeError:
+            try:
+                with open(value, 'rb') as json_file:
+                    retval = json.load(json_file)
+            except json.JSONDecodeError as error:
+                raise ValueError(f"error decoding JSON from file {value!r}: {error}") from None
+            except (FileNotFoundError, ValueError):
+                raise ValueError(f"not a valid JSON string or file path: {value!r}") from None
+            except OSError as error:
+                raise ValueError(f"error reading JSON file path {value!r}: {error.strerror}") from None
+        if self.validator is not None:
+            retval = self.validator(retval)
+        return retval
index 7b7367080fdd0780ddcf23c03cd197ea949ceeb8..6823ee1beada080526c9a9aa901d752e7b5aefd9 100644 (file)
@@ -2,34 +2,29 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-from builtins import next
 import argparse
 import collections
 import datetime
 import errno
+import fcntl
 import json
+import logging
 import os
 import re
+import subprocess
 import sys
 import tarfile
 import tempfile
-import shutil
-import _strptime
-import fcntl
+
+import ciso8601
 from operator import itemgetter
 from stat import *
 
-import subprocess
-
 import arvados
+import arvados.config
 import arvados.util
 import arvados.commands._util as arv_cmd
 import arvados.commands.put as arv_put
-from arvados.collection import CollectionReader
-import ciso8601
-import logging
-import arvados.config
-
 from arvados._version import __version__
 
 logger = logging.getLogger('arvados.keepdocker')
@@ -356,6 +351,25 @@ def _uuid2pdh(api, uuid):
         select=['portable_data_hash'],
     ).execute()['items'][0]['portable_data_hash']
 
+def load_image_metadata(image_file):
+    """Load an image manifest and config from an archive
+
+    Given an image archive as an open binary file object, this function loads
+    the image manifest and configuration, deserializing each from JSON and
+    returning them in a 2-tuple of dicts.
+    """
+    image_file.seek(0)
+    with tarfile.open(fileobj=image_file) as image_tar:
+        with image_tar.extractfile('manifest.json') as manifest_file:
+            image_manifest_list = json.load(manifest_file)
+        # Because arv-keepdocker only saves one image, there should only be
+        # one manifest.  This extracts that from the list and raises
+        # ValueError if there's not exactly one.
+        image_manifest, = image_manifest_list
+        with image_tar.extractfile(image_manifest['Config']) as config_file:
+            image_config = json.load(config_file)
+    return image_manifest, image_config
+
 def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None):
     args = arg_parser.parse_args(arguments)
     if api is None:
@@ -532,21 +546,9 @@ def main(arguments=None, stdout=sys.stdout, install_sig_handlers=True, api=None)
         # Managed properties could be already set
         coll_properties = api.collections().get(uuid=coll_uuid).execute(num_retries=args.retries).get('properties', {})
         coll_properties.update({"docker-image-repo-tag": image_repo_tag})
-
         api.collections().update(uuid=coll_uuid, body={"properties": coll_properties}).execute(num_retries=args.retries)
 
-        # Read the image metadata and make Arvados links from it.
-        image_file.seek(0)
-        image_tar = tarfile.open(fileobj=image_file)
-        image_hash_type, _, raw_image_hash = image_hash.rpartition(':')
-        if image_hash_type:
-            json_filename = raw_image_hash + '.json'
-        else:
-            json_filename = raw_image_hash + '/json'
-        json_file = image_tar.extractfile(image_tar.getmember(json_filename))
-        image_metadata = json.loads(json_file.read().decode('utf-8'))
-        json_file.close()
-        image_tar.close()
+        _, image_metadata = load_image_metadata(image_file)
         link_base = {'head_uuid': coll_uuid, 'properties': {}}
         if 'created' in image_metadata:
             link_base['properties']['image_timestamp'] = image_metadata['created']
index 1da8cf4946c652bfca3208ca632821144ccab1f5..f247afeaffb13071dde5b750bfd16417417ce8ed 100644 (file)
@@ -2,23 +2,20 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-from __future__ import division
-from future import standard_library
-standard_library.install_aliases()
-
-import email.utils
-import time
+import calendar
+import dataclasses
 import datetime
-import re
-import arvados
-import arvados.collection
-import urllib.parse
+import email.utils
 import logging
-import calendar
+import re
+import time
+import typing
 import urllib.parse
+
 import pycurl
-import dataclasses
-import typing
+
+import arvados
+import arvados.collection
 from arvados._pycurlhelper import PyCurlHelper
 
 logger = logging.getLogger('arvados.http_import')
index 284a460f1a175e7e081ebb862a276fb4e8e45e3e..e3d66aa472e655e4dbbd92d755c8adf28f9b8f0b 100644 (file)
@@ -116,7 +116,6 @@ setup(name='arvados-python-client',
       ],
       install_requires=[
           'ciso8601 >=2.0.0',
-          'dataclasses; python_version<"3.7"',
           'future',
           'google-api-core <2.11.0', # 2.11.0rc1 is incompatible with google-auth<2
           'google-api-python-client >=2.1.0',
@@ -127,11 +126,9 @@ setup(name='arvados-python-client',
           'pyparsing <3',
           'ruamel.yaml >=0.15.54, <0.17.22',
           'setuptools >=40.3.0',
-          # As of 4.8.0rc1, typing_extensions does not parse in Python 3.7
-          'typing_extensions >=3.7.4, <4.8; python_version<"3.8"',
           'websockets >=11.0',
-          'websockets ~=11.0; python_version<"3.8"',
       ],
+      python_requires="~=3.8",
       classifiers=[
           'Programming Language :: Python :: 3',
       ],
diff --git a/sdk/python/tests/data/hello-world-ManifestV2-OCILayout.tar b/sdk/python/tests/data/hello-world-ManifestV2-OCILayout.tar
new file mode 100644 (file)
index 0000000..a4b3d86
Binary files /dev/null and b/sdk/python/tests/data/hello-world-ManifestV2-OCILayout.tar differ
diff --git a/sdk/python/tests/data/hello-world-ManifestV2.tar b/sdk/python/tests/data/hello-world-ManifestV2.tar
new file mode 100644 (file)
index 0000000..b98e7c7
Binary files /dev/null and b/sdk/python/tests/data/hello-world-ManifestV2.tar differ
diff --git a/sdk/python/tests/data/hello-world-README.txt b/sdk/python/tests/data/hello-world-README.txt
new file mode 100644 (file)
index 0000000..8c6a7de
--- /dev/null
@@ -0,0 +1,25 @@
+The hello-world-*.tar files are archived from the official Docker
+hello-world:latest image available on 2024-02-01,
+sha256:d2c94e258dcb3c5ac2798d32e1249e42ef01cba4841c2234249495f87264ac5a.
+<https://github.com/docker-library/hello-world/tree/a2269bdb107d086851a5e3d448cf47770b50bff7>
+
+Copyright (c) 2014 Docker, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
index e5dd8aa913892db7c7413d1820cc659e5d38c7fd..46981e5016a31892739f7d03ae1ac0ba16490536 100755 (executable)
@@ -1,4 +1,7 @@
 #!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
 
 if test -z "$WORKSPACE" ; then
     echo "WORKSPACE unset"
@@ -11,20 +14,13 @@ docker rm fedbox1-data fedbox2-data fedbox3-data
 
 set -ex
 
-mkdir -p $WORKSPACE/tmp
-cd $WORKSPACE/tmp
-virtualenv --python python3 venv3
-. venv3/bin/activate
-
-cd $WORKSPACE/sdk/python
-pip install -e .
-
-cd $WORKSPACE/sdk/cwl
-pip install -e .
+mkdir -p "$WORKSPACE/tmp/arvbox"
+python3 -m venv "$WORKSPACE/tmp/venv3"
+"$WORKSPACE/tmp/venv3/bin/pip" install -e "$WORKSPACE/sdk/python" "$WORKSPACE/sdk/cwl"
+alias cwltool='"$WORKSPACE/tmp/venv3/bin/cwltool"'
 
 export PATH=$PATH:$WORKSPACE/tools/arvbox/bin
 
-mkdir -p $WORKSPACE/tmp/arvbox
 cd $WORKSPACE/sdk/python/tests/fed-migrate
 cwltool arvbox-make-federation.cwl \
        --arvbox_base $WORKSPACE/tmp/arvbox \
index 526fd68727bb3833761b84c08d4eb5ae28a7ea44..9aebc0350424e0b4051d14687cf7c4376135c18c 100644 (file)
@@ -2,23 +2,24 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
-from __future__ import absolute_import
 import arvados
 import collections
+import collections.abc
 import copy
 import hashlib
+import logging
 import mock
 import os
 import subprocess
 import sys
 import tempfile
 import unittest
-import logging
+from pathlib import Path
+
+import parameterized
 
 import arvados.commands.keepdocker as arv_keepdocker
 from . import arvados_testutil as tutil
-from . import run_test_server
-
 
 class StopTest(Exception):
     pass
@@ -226,3 +227,30 @@ class ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):
         api().collections().update.assert_called_with(
             uuid=mocked_collection['uuid'],
             body={'properties': updated_properties})
+
+
+@parameterized.parameterized_class(('filename',), [
+    ('hello-world-ManifestV2.tar',),
+    ('hello-world-ManifestV2-OCILayout.tar',),
+])
+class ImageMetadataTestCase(unittest.TestCase):
+    DATA_PATH = Path(__file__).parent / 'data'
+
+    @classmethod
+    def setUpClass(cls):
+        cls.image_file = (cls.DATA_PATH / cls.filename).open('rb')
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.image_file.close()
+
+    def setUp(self):
+        self.manifest, self.config = arv_keepdocker.load_image_metadata(self.image_file)
+
+    def test_image_manifest(self):
+        self.assertIsInstance(self.manifest, collections.abc.Mapping)
+        self.assertEqual(self.manifest.get('RepoTags'), ['hello-world:latest'])
+
+    def test_image_config(self):
+        self.assertIsInstance(self.config, collections.abc.Mapping)
+        self.assertEqual(self.config.get('created'), '2023-05-02T16:49:27Z')
diff --git a/sdk/python/tests/test_cmd_util.py b/sdk/python/tests/test_cmd_util.py
new file mode 100644 (file)
index 0000000..ffd45aa
--- /dev/null
@@ -0,0 +1,194 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import contextlib
+import copy
+import itertools
+import json
+import os
+import tempfile
+import unittest
+
+from pathlib import Path
+
+from parameterized import parameterized
+
+import arvados.commands._util as cmd_util
+
+FILE_PATH = Path(__file__)
+
+class ValidateFiltersTestCase(unittest.TestCase):
+    NON_FIELD_TYPES = [
+        None,
+        123,
+        ('name', '=', 'tuple'),
+        {'filters': ['name', '=', 'object']},
+    ]
+    NON_FILTER_TYPES = NON_FIELD_TYPES + ['string']
+    VALID_FILTERS = [
+        ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890'],
+        ['name', 'in', ['foo', 'bar']],
+        '(replication_desired > replication_cofirmed)',
+        '(replication_confirmed>=replication_desired)',
+    ]
+
+    @parameterized.expand(itertools.combinations(VALID_FILTERS, 2))
+    def test_valid_filters(self, f1, f2):
+        expected = [f1, f2]
+        actual = cmd_util.validate_filters(copy.deepcopy(expected))
+        self.assertEqual(actual, expected)
+
+    @parameterized.expand([(t,) for t in NON_FILTER_TYPES])
+    def test_filters_wrong_type(self, value):
+        with self.assertRaisesRegex(ValueError, r'^filters are not a list\b'):
+            cmd_util.validate_filters(value)
+
+    @parameterized.expand([(t,) for t in NON_FIELD_TYPES])
+    def test_single_filter_wrong_type(self, value):
+        with self.assertRaisesRegex(ValueError, r'^filter at index 0 is not a string or list\b'):
+            cmd_util.validate_filters([value])
+
+    @parameterized.expand([
+        ([],),
+        (['owner_uuid'],),
+        (['owner_uuid', 'zzzzz-tpzed-12345abcde67890'],),
+        (['name', 'not in', 'foo', 'bar'],),
+        (['name', 'in', 'foo', 'bar', 'baz'],),
+    ])
+    def test_filters_wrong_arity(self, value):
+        with self.assertRaisesRegex(ValueError, r'^filter at index 0 does not have three items\b'):
+            cmd_util.validate_filters([value])
+
+    @parameterized.expand(itertools.product(
+        [0, 1],
+        NON_FIELD_TYPES,
+    ))
+    def test_filter_definition_wrong_type(self, index, bad_value):
+        value = ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890']
+        value[index] = bad_value
+        name = ('field name', 'operator')[index]
+        with self.assertRaisesRegex(ValueError, rf'^filter at index 0 {name} is not a string\b'):
+            cmd_util.validate_filters([value])
+
+    @parameterized.expand([
+        # Not enclosed in parentheses
+        'foo = bar',
+        '(foo) < bar',
+        'foo > (bar)',
+        # Not exactly one operator
+        '(a >= b >= c)',
+        '(foo)',
+        '(file_count version)',
+        # Invalid field identifiers
+        '(version = 1)',
+        '(2 = file_count)',
+        '(replication.desired <= replication.confirmed)',
+        # Invalid whitespace
+        '(file_count\t=\tversion)',
+        '(file_count >= version\n)',
+    ])
+    def test_invalid_string_filter(self, value):
+        with self.assertRaisesRegex(ValueError, r'^filter at index 0 has invalid syntax\b'):
+            cmd_util.validate_filters([value])
+
+
+class JSONArgumentTestCase(unittest.TestCase):
+    JSON_OBJECTS = [
+        None,
+        123,
+        456.789,
+        'string',
+        ['list', 1],
+        {'object': True, 'yaml': False},
+    ]
+
+    @classmethod
+    def setUpClass(cls):
+        cls.json_file = tempfile.NamedTemporaryFile(
+            'w+',
+            encoding='utf-8',
+            prefix='argtest',
+            suffix='.json',
+        )
+        cls.parser = cmd_util.JSONArgument()
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.json_file.close()
+
+    def setUp(self):
+        self.json_file.seek(0)
+        self.json_file.truncate()
+
+    @parameterized.expand((obj,) for obj in JSON_OBJECTS)
+    def test_valid_argument_string(self, obj):
+        actual = self.parser(json.dumps(obj))
+        self.assertEqual(actual, obj)
+
+    @parameterized.expand((obj,) for obj in JSON_OBJECTS)
+    def test_valid_argument_path(self, obj):
+        json.dump(obj, self.json_file)
+        self.json_file.flush()
+        actual = self.parser(self.json_file.name)
+        self.assertEqual(actual, obj)
+
+    @parameterized.expand([
+        '',
+        '\0',
+        None,
+    ])
+    def test_argument_not_json_or_path(self, value):
+        if value is None:
+            with tempfile.NamedTemporaryFile() as gone_file:
+                value = gone_file.name
+        with self.assertRaisesRegex(ValueError, r'\bnot a valid JSON string or file path\b'):
+            self.parser(value)
+
+    @parameterized.expand([
+        FILE_PATH.parent,
+        FILE_PATH / 'nonexistent.json',
+        None,
+    ])
+    def test_argument_path_unreadable(self, path):
+        if path is None:
+            bad_file = tempfile.NamedTemporaryFile()
+            os.chmod(bad_file.fileno(), 0o000)
+            path = bad_file.name
+            @contextlib.contextmanager
+            def ctx():
+                try:
+                    yield
+                finally:
+                    os.chmod(bad_file.fileno(), 0o600)
+        else:
+            ctx = contextlib.nullcontext
+        with self.assertRaisesRegex(ValueError, rf'^error reading JSON file path {str(path)!r}: '), ctx():
+            self.parser(str(path))
+
+    @parameterized.expand([
+        FILE_PATH,
+        None,
+    ])
+    def test_argument_path_not_json(self, path):
+        if path is None:
+            path = self.json_file.name
+        with self.assertRaisesRegex(ValueError, rf'^error decoding JSON from file {str(path)!r}'):
+            self.parser(str(path))
+
+
+class JSONArgumentValidationTestCase(unittest.TestCase):
+    @parameterized.expand((obj,) for obj in JSONArgumentTestCase.JSON_OBJECTS)
+    def test_object_returned_from_validator(self, value):
+        parser = cmd_util.JSONArgument(lambda _: copy.deepcopy(value))
+        self.assertEqual(parser('{}'), value)
+
+    @parameterized.expand((obj,) for obj in JSONArgumentTestCase.JSON_OBJECTS)
+    def test_exception_raised_from_validator(self, value):
+        json_value = json.dumps(value)
+        def raise_func(_):
+            raise ValueError(json_value)
+        parser = cmd_util.JSONArgument(raise_func)
+        with self.assertRaises(ValueError) as exc_check:
+            parser(json_value)
+        self.assertEqual(exc_check.exception.args, (json_value,))
index f472c0830e65b02d9394c50e45a361c67ab48289..6b1ebf56c0826ee4e23523168b729855f6368bf8 100644 (file)
@@ -167,30 +167,30 @@ class KeepPermissionTestCase(run_test_server.TestCaseWithServers, DiskCacheBase)
                          b'foo',
                          'wrong content from Keep.get(md5("foo"))')
 
-        # GET with an unsigned locator => NotFound
+        # GET with an unsigned locator => bad request
         bar_locator = keep_client.put('bar')
         unsigned_bar_locator = "37b51d194a7513e45b56f6524f2d51f2+3"
         self.assertRegex(
             bar_locator,
             r'^37b51d194a7513e45b56f6524f2d51f2\+3\+A[a-f0-9]+@[a-f0-9]+$',
             'invalid locator from Keep.put("bar"): ' + bar_locator)
-        self.assertRaises(arvados.errors.NotFoundError,
+        self.assertRaises(arvados.errors.KeepReadError,
                           keep_client.get,
                           unsigned_bar_locator)
 
-        # GET from a different user => NotFound
+        # GET from a different user => bad request
         run_test_server.authorize_with('spectator')
-        self.assertRaises(arvados.errors.NotFoundError,
+        self.assertRaises(arvados.errors.KeepReadError,
                           arvados.Keep.get,
                           bar_locator)
 
-        # Unauthenticated GET for a signed locator => NotFound
-        # Unauthenticated GET for an unsigned locator => NotFound
+        # Unauthenticated GET for a signed locator => bad request
+        # Unauthenticated GET for an unsigned locator => bad request
         keep_client.api_token = ''
-        self.assertRaises(arvados.errors.NotFoundError,
+        self.assertRaises(arvados.errors.KeepReadError,
                           keep_client.get,
                           bar_locator)
-        self.assertRaises(arvados.errors.NotFoundError,
+        self.assertRaises(arvados.errors.KeepReadError,
                           keep_client.get,
                           unsigned_bar_locator)
 
index f0117f87eec547f1ef8af6d25c643e841a2f100e..123180ae1c1fae026019105b915b00965a3c7c30 100644 (file)
@@ -5,7 +5,7 @@ Gem::Specification.new do |s|
   s.name = "arvados-google-api-client"
   s.version = Google::APIClient::VERSION::STRING
 
-  s.required_ruby_version = '>= 2.5.0'
+  s.required_ruby_version = '>= 2.7.0'
   s.required_rubygems_version = ">= 1.3.5"
   s.require_paths = ["lib"]
   s.authors = ["Bob Aman", "Steven Bazyl"]
@@ -26,7 +26,12 @@ Gem::Specification.new do |s|
 
   s.add_runtime_dependency 'addressable', '~> 2.3'
   s.add_runtime_dependency 'signet', '~> 0.16.0'
-  s.add_runtime_dependency 'faraday', '~> 2.0'
+  # faraday requires Ruby 3.0 starting with 2.9.0. If you install this gem
+  # on Ruby 2.7, the dependency resolver asks you to resolve the conflict
+  # manually. Instead of teaching all our tooling to do that, we prefer to
+  # require the latest version that supports Ruby 2.7 here. This requirement
+  # can be relaxed to '~> 2.0' when we drop support for Ruby 2.7.
+  s.add_runtime_dependency 'faraday', '~> 2.8.0'
   s.add_runtime_dependency 'faraday-multipart', '~> 1.0'
   s.add_runtime_dependency 'faraday-gzip', '~> 2.0'
   s.add_runtime_dependency 'googleauth', '~> 1.0'
index acfde2754068f61c390ac0bb8733770e583eef31..3f78e4ae37722970ed6cc5e34a5153677bc28a2d 100644 (file)
@@ -19,7 +19,7 @@ module Google
       MAJOR = 0
       MINOR = 8
       TINY  = 7
-      PATCH = 5
+      PATCH = 6
       STRING = [MAJOR, MINOR, TINY, PATCH].compact.join('.')
     end
   end
index 711b51daafbf30d8ab0a7c65e2159a3d4c8abd42..ea5ff8c7c5caec9456e4501998a7412c2054539c 100644 (file)
@@ -37,7 +37,7 @@ Gem::Specification.new do |s|
   s.files       = ["lib/arvados.rb", "lib/arvados/google_api_client.rb",
                    "lib/arvados/collection.rb", "lib/arvados/keep.rb",
                    "README", "LICENSE-2.0.txt"]
-  s.required_ruby_version = '>= 2.5.0'
+  s.required_ruby_version = '>= 2.7.0'
   s.add_dependency('activesupport', '>= 3')
   s.add_dependency('andand', '~> 1.3', '>= 1.3.3')
   # arvados fork of google-api-client gem with old API and new
index cfab90127c5246f79b851b5fdd1c5518d5d6d388..003b886cee3f357a71ee5bd9d08e5707eecae50f 100644 (file)
@@ -77,7 +77,7 @@ GEM
       activemodel (>= 3.0.0)
       activesupport (>= 3.0.0)
       rack (>= 1.1.0)
-    addressable (2.8.5)
+    addressable (2.8.6)
       public_suffix (>= 2.0.2, < 6.0)
     andand (1.3.3)
     arvados (2.7.0.rc2)
@@ -87,12 +87,12 @@ GEM
       i18n
       json (>= 1.7.7, < 3)
       jwt (>= 0.1.5, < 2)
-    arvados-google-api-client (0.8.7.5)
+    arvados-google-api-client (0.8.7.6)
       activesupport (>= 3.2, < 8.0)
       addressable (~> 2.3)
       autoparse (~> 0.3)
       extlib (~> 0.9)
-      faraday (~> 2.0)
+      faraday (~> 2.8.0)
       faraday-gzip (~> 2.0)
       faraday-multipart (~> 1.0)
       googleauth (~> 1.0)
@@ -104,9 +104,10 @@ GEM
       addressable (>= 2.3.1)
       extlib (>= 0.9.15)
       multi_json (>= 1.0.0)
+    base64 (0.2.0)
     builder (3.2.4)
     byebug (11.1.3)
-    concurrent-ruby (1.2.2)
+    concurrent-ruby (1.2.3)
     crass (1.0.6)
     date (3.3.3)
     docile (1.4.0)
@@ -117,10 +118,11 @@ GEM
     factory_bot_rails (6.2.0)
       factory_bot (~> 6.2.0)
       railties (>= 5.0.0)
-    faraday (2.7.10)
+    faraday (2.8.1)
+      base64
       faraday-net_http (>= 2.0, < 3.1)
       ruby2_keywords (>= 0.0.4)
-    faraday-gzip (2.0.0)
+    faraday-gzip (2.0.1)
       faraday (>= 1.0)
       zlib (~> 3.0)
     faraday-multipart (1.0.4)
@@ -129,10 +131,12 @@ GEM
     ffi (1.15.5)
     globalid (1.2.1)
       activesupport (>= 6.1)
-    googleauth (1.7.0)
-      faraday (>= 0.17.3, < 3.a)
+    google-cloud-env (2.1.1)
+      faraday (>= 1.0, < 3.a)
+    googleauth (1.9.2)
+      faraday (>= 1.0, < 3.a)
+      google-cloud-env (~> 2.1)
       jwt (>= 1.4, < 3.0)
-      memoist (~> 0.16)
       multi_json (~> 1.11)
       os (>= 0.9, < 2.0)
       signet (>= 0.16, < 2.a)
@@ -165,7 +169,6 @@ GEM
       net-pop
       net-smtp
     marcel (1.0.2)
-    memoist (0.16.2)
     method_source (1.0.0)
     mini_mime (1.1.5)
     mini_portile2 (2.8.4)
@@ -173,7 +176,7 @@ GEM
     mocha (2.1.0)
       ruby2_keywords (>= 0.0.5)
     multi_json (1.15.0)
-    multipart-post (2.3.0)
+    multipart-post (2.4.0)
     net-imap (0.3.7)
       date
       net-protocol
@@ -195,7 +198,7 @@ GEM
       rake (>= 0.8.1)
     pg (1.5.4)
     power_assert (2.0.3)
-    public_suffix (5.0.3)
+    public_suffix (5.0.4)
     racc (1.7.1)
     rack (2.2.8)
     rack-test (2.1.0)
@@ -278,7 +281,7 @@ GEM
       websocket-extensions (>= 0.1.0)
     websocket-extensions (0.1.5)
     zeitwerk (2.6.11)
-    zlib (3.0.0)
+    zlib (3.1.0)
 
 PLATFORMS
   ruby
index 4d7b2bbaeb2d7d42a79227cff1f2f09142f19179..5a95fb0b88e41efc593495bc50efeb5bd13b51b9 100644 (file)
@@ -656,13 +656,16 @@ SELECT target_uuid, perm_level
 
     remote_should_be_active = should_activate && remote_user[:is_invited] != false && remote_user[:is_active] == true
 
+    # Make sure blank username is nil
+    remote_user[:username] = nil if remote_user[:username] == ""
+
     begin
       user = User.create_with(email: remote_user[:email],
                               username: remote_user[:username],
                               first_name: remote_user[:first_name],
                               last_name: remote_user[:last_name],
-                              is_active: remote_should_be_active
-      ).find_or_create_by(uuid: remote_user[:uuid])
+                              is_active: remote_should_be_active,
+                             ).find_or_create_by(uuid: remote_user[:uuid])
     rescue ActiveRecord::RecordNotUnique
       retry
     end
@@ -680,8 +683,9 @@ SELECT target_uuid, perm_level
 
       loginCluster = Rails.configuration.Login.LoginCluster
       if user.username.nil? || user.username == ""
-        # Don't have a username yet, set one
-        needupdate[:username] = user.set_initial_username(requested: remote_user[:username])
+        # Don't have a username yet, try to set one
+        initial_username = user.set_initial_username(requested: remote_user[:username])
+        needupdate[:username] = initial_username if !initial_username.nil?
       elsif remote_user_prefix != loginCluster
         # Upstream is not login cluster, don't try to change the
         # username once set.
@@ -710,6 +714,14 @@ SELECT target_uuid, perm_level
           end
           raise # Not the issue we're handling above
         end
+      elsif user.new_record?
+        begin
+          user.save!
+        rescue => e
+          Rails.logger.debug "Error saving user record: #{$!}"
+          Rails.logger.debug "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+          raise
+        end
       end
 
       if remote_should_be_unsetup
index 69c9474703c95e270221bc1f598f769efa589cca..cccbc1b56b19639a903eec1d238d12174b5cb0e9 100644 (file)
@@ -8,10 +8,6 @@ case "$TARGET" in
     centos*|rocky*)
         fpm_depends+=(libcurl-devel postgresql-devel bison make automake gcc gcc-c++ postgresql shared-mime-info)
         ;;
-    ubuntu1804)
-        fpm_depends+=(libcurl-ssl-dev libpq-dev g++ bison zlib1g-dev make postgresql-client shared-mime-info)
-        fpm_conflicts+=(ruby-bundler)
-        ;;
     debian* | ubuntu*)
         fpm_depends+=(libcurl-ssl-dev libpq-dev g++ bison zlib1g-dev make postgresql-client shared-mime-info)
         ;;
index 5a3242e4ffce028bb95df3e9beaaa36f9bf5059f..72aad1d68ee3f28e7fe853cf911ac4841cf896e0 100644 (file)
@@ -220,6 +220,51 @@ foo_collection_in_aproject:
   manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
   name: "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
 
+fuse_filters_test_foo:
+  uuid: zzzzz-4zz18-4e2kjqv891jl3p3
+  current_version_uuid: zzzzz-4zz18-4e2kjqv891jl3p3
+  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-000000000000000
+  owner_uuid: zzzzz-tpzed-fusefiltertest1
+  created_at: 2024-02-09T12:01:00Z
+  modified_at: 2024-02-09T12:01:01Z
+  updated_at: 2024-02-09T12:01:01Z
+  manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+  name: foo
+  properties:
+    MainFile: foo
+
+fuse_filters_test_bar:
+  uuid: zzzzz-4zz18-qpxqtq2wbjnu630
+  current_version_uuid: zzzzz-4zz18-qpxqtq2wbjnu630
+  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  owner_uuid: zzzzz-tpzed-fusefiltertest1
+  created_at: 2024-02-09T12:02:00Z
+  modified_at: 2024-02-09T12:02:01Z
+  updated_at: 2024-02-09T12:02:01Z
+  manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+  name: bar
+  properties:
+    MainFile: bar
+
+fuse_filters_test_baz:
+  uuid: zzzzz-4zz18-ls97ezovrkkpfxz
+  current_version_uuid: zzzzz-4zz18-ls97ezovrkkpfxz
+  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+  owner_uuid: zzzzz-tpzed-fusefiltertest1
+  created_at: 2024-02-09T12:03:00Z
+  modified_at: 2024-02-09T12:03:01Z
+  updated_at: 2024-02-09T12:03:01Z
+  manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+  name: baz
+  properties:
+    MainFile: baz
+
 user_agreement_in_anonymously_accessible_project:
   uuid: zzzzz-4zz18-uukreo9rbgwsujr
   current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujr
index 9a2dc169b63aec6ff8d624bf4128c69483e0ce3b..9034ac6ee7d2dd72928388b51b4461bff2814af8 100644 (file)
@@ -172,6 +172,17 @@ afiltergroup5:
   properties:
     filters: [["collections.properties.listprop","contains","elem1"],["uuid", "is_a", "arvados#collection"]]
 
+fuse_filters_test_project:
+  uuid: zzzzz-j7d0g-fusefiltertest1
+  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+  created_at: 2024-02-09T12:00:00Z
+  modified_at: 2024-02-09T12:00:01Z
+  updated_at: 2024-02-09T12:00:01Z
+  name: FUSE Filters Test Project 1
+  group_class: project
+
 future_project_viewing_group:
   uuid: zzzzz-j7d0g-futrprojviewgrp
   owner_uuid: zzzzz-tpzed-000000000000000
index 07e0b71d86098877da212a2d7eefc0087204d25e..cc0b5e1320988b1098f698528fb6e892f4b11ea1 100644 (file)
@@ -1101,6 +1101,37 @@ The Arvados team.
     assert_equal(1, Log.where(object_uuid: unchanginguuid).count)
   end
 
+  test 'batch update does not produce spurious log events' do
+    # test for bug #21304
+
+    existinguuid = 'remot-tpzed-foobarbazwazqux'
+    act_as_system_user do
+      User.create!(uuid: existinguuid,
+                   first_name: 'root',
+                   is_active: true,
+                  )
+    end
+    assert_equal(1, Log.where(object_uuid: existinguuid).count)
+
+    Rails.configuration.Login.LoginCluster = 'remot'
+
+    authorize_with(:admin)
+    patch(:batch_update,
+          params: {
+            updates: {
+              existinguuid => {
+                'first_name' => 'root',
+                'email' => '',
+                'username' => '',
+                'is_active' => true,
+                'is_invited' => true
+              },
+            }})
+    assert_response(:success)
+
+    assert_equal(1, Log.where(object_uuid: existinguuid).count)
+  end
+
   NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "is_admin", "is_invited", "email", "first_name",
                          "last_name", "username", "can_write", "can_manage"].sort
 
index e3dd113c710cd8cf5b1cede361d794ad7fd67839..b4fc10f83ee02232b411cb101d1e7ea6b938f7cf 100644 (file)
@@ -5,8 +5,6 @@
 Description=Arvados Crunch Dispatcher for LOCAL service
 Documentation=https://doc.arvados.org/
 After=network.target
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -19,8 +17,5 @@ Restart=always
 RestartSec=1
 LimitNOFILE=1000000
 
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
-
 [Install]
 WantedBy=multi-user.target
index 2aab42b2a37c7c4be9a6ff6907a6b6c38c3373cf..819c920ff2a307a7e1c86ddf62fb9009501082f0 100644 (file)
@@ -6,8 +6,6 @@
 Description=Arvados Docker Image Cleaner
 Documentation=https://doc.arvados.org/
 After=network.target
-
-# systemd>=230 (debian:9) obeys StartLimitIntervalSec in the [Unit] section
 StartLimitIntervalSec=0
 
 [Service]
@@ -15,14 +13,7 @@ Type=simple
 Restart=always
 RestartSec=10s
 RestartPreventExitStatus=2
-#
-# This unwieldy ExecStart command detects at runtime whether
-# arvados-docker-cleaner is installed with the Python 3.3 Software
-# Collection, and if so, invokes it with the "scl" wrapper.
-ExecStart=/bin/sh -c 'if [ -e /opt/rh/rh-python36/root/bin/arvados-docker-cleaner ]; then exec scl enable rh-python36 arvados-docker-cleaner; else exec arvados-docker-cleaner; fi'
-
-# systemd<=219 (centos:7, debian:8, ubuntu:trusty) obeys StartLimitInterval in the [Service] section
-StartLimitInterval=0
+ExecStart=/usr/bin/arvados-docker-cleaner
 
 [Install]
 WantedBy=multi-user.target
index d5c572f2ed46885231bde9edbb31b980f0b7f5c2..2b386c70b47aa2c925b87aedff14838297b88315 100644 (file)
@@ -37,18 +37,10 @@ setup(name="arvados-docker-cleaner",
           ('share/doc/arvados-docker-cleaner', ['agpl-3.0.txt', 'arvados-docker-cleaner.service']),
       ],
       install_requires=[
-          # The requirements for the docker library broke when requests started
-          # supporting urllib3 2.0.
-          # See <https://github.com/docker/docker-py/issues/3113>.
-          # Make sure we get a version with the bugfix, assuming Python is
-          # recent enough.
-          'docker>=6.1.0; python_version>"3.6"',
-          # If Python is too old, install the latest version we can and pin
-          # urllib3 ourselves.
-          'docker~=5.0; python_version<"3.7"',
-          'urllib3~=1.26; python_version<"3.7"',
+          'docker>=6.1.0',
           'setuptools',
       ],
+      python_requires="~=3.8",
       test_suite='tests',
       zip_safe=False
 )
index 9c607c7f0c3dbf97ffdfa0f98a76e4489731c71f..719ec7ee959701fde58bfef0dfb8b3c46dc4b895 100644 (file)
@@ -28,99 +28,327 @@ class ArgumentParser(argparse.ArgumentParser):
     def __init__(self):
         super(ArgumentParser, self).__init__(
             parents=[arv_cmd.retry_opt],
-            description='''Mount Keep data under the local filesystem.  Default mode is --home''',
-            epilog="""
-    Note: When using the --exec feature, you must either specify the
-    mountpoint before --exec, or mark the end of your --exec arguments
-    with "--".
-            """)
-        self.add_argument('--version', action='version',
-                          version=u"%s %s" % (sys.argv[0], __version__),
-                          help='Print version and exit.')
-        self.add_argument('mountpoint', type=str, help="""Mount point.""")
-        self.add_argument('--allow-other', action='store_true',
-                            help="""Let other users read the mount""")
-        self.add_argument('--subtype', type=str, metavar='STRING',
-                            help="""Report mounted filesystem type as "fuse.STRING", instead of just "fuse".""")
-
-        mode = self.add_mutually_exclusive_group()
-
-        mode.add_argument('--all', action='store_const', const='all', dest='mode',
-                                help="""Mount a subdirectory for each mode: home, shared, by_tag, by_id (default if no --mount-* arguments are given).""")
-        mode.add_argument('--custom', action='store_const', const=None, dest='mode',
-                                help="""Mount a top level meta-directory with subdirectories as specified by additional --mount-* arguments (default if any --mount-* arguments are given).""")
-        mode.add_argument('--home', action='store_const', const='home', dest='mode',
-                                help="""Mount only the user's home project.""")
-        mode.add_argument('--shared', action='store_const', const='shared', dest='mode',
-                                help="""Mount only list of projects shared with the user.""")
-        mode.add_argument('--by-tag', action='store_const', const='by_tag', dest='mode',
-                                help="""Mount subdirectories listed by tag.""")
-        mode.add_argument('--by-id', action='store_const', const='by_id', dest='mode',
-                                help="""Mount subdirectories listed by portable data hash or uuid.""")
-        mode.add_argument('--by-pdh', action='store_const', const='by_pdh', dest='mode',
-                                help="""Mount subdirectories listed by portable data hash.""")
-        mode.add_argument('--project', type=str, metavar='UUID',
-                                help="""Mount the specified project.""")
-        mode.add_argument('--collection', type=str, metavar='UUID_or_PDH',
-                                help="""Mount only the specified collection.""")
-
-        mounts = self.add_argument_group('Custom mount options')
-        mounts.add_argument('--mount-by-pdh',
-                            type=str, metavar='PATH', action='append', default=[],
-                            help="Mount each readable collection at mountpoint/PATH/P where P is the collection's portable data hash.")
-        mounts.add_argument('--mount-by-id',
-                            type=str, metavar='PATH', action='append', default=[],
-                            help="Mount each readable collection at mountpoint/PATH/UUID and mountpoint/PATH/PDH where PDH is the collection's portable data hash and UUID is its UUID.")
-        mounts.add_argument('--mount-by-tag',
-                            type=str, metavar='PATH', action='append', default=[],
-                            help="Mount all collections with tag TAG at mountpoint/PATH/TAG/UUID.")
-        mounts.add_argument('--mount-home',
-                            type=str, metavar='PATH', action='append', default=[],
-                            help="Mount the current user's home project at mountpoint/PATH.")
-        mounts.add_argument('--mount-shared',
-                            type=str, metavar='PATH', action='append', default=[],
-                            help="Mount projects shared with the current user at mountpoint/PATH.")
-        mounts.add_argument('--mount-tmp',
-                            type=str, metavar='PATH', action='append', default=[],
-                            help="Create a new collection, mount it in read/write mode at mountpoint/PATH, and delete it when unmounting.")
-
-
-        self.add_argument('--debug', action='store_true', help="""Debug mode""")
-        self.add_argument('--logfile', help="""Write debug logs and errors to the specified file (default stderr).""")
-        self.add_argument('--foreground', action='store_true', help="""Run in foreground (default is to daemonize unless --exec specified)""", default=False)
-        self.add_argument('--encoding', type=str, help="Character encoding to use for filesystem, default is utf-8 (see Python codec registry for list of available encodings)", default="utf-8")
-
-        self.add_argument('--file-cache', type=int, help="File data cache size, in bytes (default 8 GiB for disk-based cache or 256 MiB with RAM-only cache)", default=0)
-        self.add_argument('--directory-cache', type=int, help="Directory data cache size, in bytes (default 128 MiB)", default=128*1024*1024)
-
-        cachetype = self.add_mutually_exclusive_group()
-        cachetype.add_argument('--ram-cache', action='store_false', dest='disk_cache', help="Use in-memory caching only", default=True)
-        cachetype.add_argument('--disk-cache', action='store_true', dest='disk_cache', help="Use disk based caching (default)", default=True)
-
-        self.add_argument('--disk-cache-dir', type=str, help="Disk cache location (default ~/.cache/arvados/keep)", default=None)
-
-        self.add_argument('--disable-event-listening', action='store_true', help="Don't subscribe to events on the API server", dest="disable_event_listening", default=False)
-
-        self.add_argument('--read-only', action='store_false', help="Mount will be read only (default)", dest="enable_write", default=False)
-        self.add_argument('--read-write', action='store_true', help="Mount will be read-write", dest="enable_write", default=False)
-        self.add_argument('--storage-classes', type=str, metavar='CLASSES', help="Specify comma separated list of storage classes to be used when saving data of new collections", default=None)
-
-        self.add_argument('--crunchstat-interval', type=float, help="Write stats to stderr every N seconds (default disabled)", default=0)
-
-        unmount = self.add_mutually_exclusive_group()
-        unmount.add_argument('--unmount', action='store_true', default=False,
-                             help="Forcefully unmount the specified mountpoint (if it's a fuse mount) and exit. If --subtype is given, unmount only if the mount has the specified subtype. WARNING: This command can affect any kind of fuse mount, not just arv-mount.")
-        unmount.add_argument('--unmount-all', action='store_true', default=False,
-                             help="Forcefully unmount every fuse mount at or below the specified path and exit. If --subtype is given, unmount only mounts that have the specified subtype. Exit non-zero if any other types of mounts are found at or below the given path. WARNING: This command can affect any kind of fuse mount, not just arv-mount.")
-        unmount.add_argument('--replace', action='store_true', default=False,
-                             help="If a fuse mount is already present at mountpoint, forcefully unmount it before mounting")
-        self.add_argument('--unmount-timeout',
-                          type=float, default=2.0,
-                          help="Time to wait for graceful shutdown after --exec program exits and filesystem is unmounted")
-
-        self.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
-                            dest="exec_args", metavar=('command', 'args', '...', '--'),
-                            help="""Mount, run a command, then unmount and exit""")
+            description="Interact with Arvados data through a local filesystem",
+        )
+        self.add_argument(
+            '--version',
+            action='version',
+            version=u"%s %s" % (sys.argv[0], __version__),
+            help="Print version and exit",
+        )
+        self.add_argument(
+            'mountpoint',
+            metavar='MOUNT_DIR',
+            help="Directory path to mount data",
+        )
+
+        mode_group = self.add_argument_group("Mount contents")
+        mode = mode_group.add_mutually_exclusive_group()
+        mode.add_argument(
+            '--all',
+            action='store_const',
+            const='all',
+            dest='mode',
+            help="""
+Mount a subdirectory for each mode: `home`, `shared`, `by_id`, and `by_tag`
+(default if no `--mount-*` options are given)
+""",
+        )
+        mode.add_argument(
+            '--custom',
+            action='store_const',
+            const=None,
+            dest='mode',
+            help="""
+Mount a subdirectory for each mode specified by a `--mount-*` option
+(default if any `--mount-*` options are given;
+see "Mount custom layout and filtering" section)
+""",
+        )
+        mode.add_argument(
+            '--collection',
+            metavar='UUID_OR_PDH',
+            help="Mount the specified collection",
+        )
+        mode.add_argument(
+            '--home',
+            action='store_const',
+            const='home',
+            dest='mode',
+            help="Mount your home project",
+        )
+        mode.add_argument(
+            '--project',
+            metavar='UUID',
+            help="Mount the specified project",
+        )
+        mode.add_argument(
+            '--shared',
+            action='store_const',
+            const='shared',
+            dest='mode',
+            help="Mount a subdirectory for each project shared with you",
+        )
+        mode.add_argument(
+            '--by-id',
+            action='store_const',
+            const='by_id',
+            dest='mode',
+            help="""
+Mount a magic directory where collections and projects are accessible through
+subdirectories named after their UUID or portable data hash
+""",
+        )
+        mode.add_argument(
+            '--by-pdh',
+            action='store_const',
+            const='by_pdh',
+            dest='mode',
+            help="""
+Mount a magic directory where collections are accessible through
+subdirectories named after their portable data hash
+""",
+        )
+        mode.add_argument(
+            '--by-tag',
+            action='store_const',
+            const='by_tag',
+            dest='mode',
+            help="Mount a subdirectory for each tag attached to a collection or project",
+        )
+
+        mounts = self.add_argument_group("Mount custom layout and filtering")
+        mounts.add_argument(
+            '--filters',
+            type=arv_cmd.JSONArgument(arv_cmd.validate_filters),
+            help="""
+Filters to apply to all project, shared, and tag directory contents.
+Pass filters as either a JSON string or a path to a JSON file.
+The JSON object should be a list of filters in Arvados API list filter syntax.
+""",
+        )
+        mounts.add_argument(
+            '--mount-home',
+            metavar='PATH',
+            action='append',
+            default=[],
+            help="Make your home project available under the mount at `PATH`",
+        )
+        mounts.add_argument(
+            '--mount-shared',
+            metavar='PATH',
+            action='append',
+            default=[],
+            help="Make projects shared with you available under the mount at `PATH`",
+        )
+        mounts.add_argument(
+            '--mount-tmp',
+            metavar='PATH',
+            action='append',
+            default=[],
+            help="""
+Make a new temporary writable collection available under the mount at `PATH`.
+This collection is deleted when the mount is unmounted.
+""",
+        )
+        mounts.add_argument(
+            '--mount-by-id',
+            metavar='PATH',
+            action='append',
+            default=[],
+            help="""
+Make a magic directory available under the mount at `PATH` where collections and
+projects are accessible through subdirectories named after their UUID or
+portable data hash
+""",
+        )
+        mounts.add_argument(
+            '--mount-by-pdh',
+            metavar='PATH',
+            action='append',
+            default=[],
+            help="""
+Make a magic directory available under the mount at `PATH` where collections
+are accessible through subdirectories named after portable data hash
+""",
+        )
+        mounts.add_argument(
+            '--mount-by-tag',
+            metavar='PATH',
+            action='append',
+            default=[],
+            help="""
+Make a subdirectory for each tag attached to a collection or project available
+under the mount at `PATH`
+""" ,
+        )
+
+        perms = self.add_argument_group("Mount access and permissions")
+        perms.add_argument(
+            '--allow-other',
+            action='store_true',
+            help="Let other users on this system read mounted data (default false)",
+        )
+        perms.add_argument(
+            '--read-only',
+            action='store_false',
+            default=False,
+            dest='enable_write',
+            help="Mounted data cannot be modified from the mount (default)",
+        )
+        perms.add_argument(
+            '--read-write',
+            action='store_true',
+            default=False,
+            dest='enable_write',
+            help="Mounted data can be modified from the mount",
+        )
+
+        lifecycle = self.add_argument_group("Mount lifecycle management")
+        lifecycle.add_argument(
+            '--exec',
+            nargs=argparse.REMAINDER,
+            dest="exec_args",
+            help="""
+Mount data, run the specified command, then unmount and exit.
+`--exec` reads all remaining options as the command to run,
+so it must be the last option you specify.
+Either end your command arguments (and other options) with a `--` argument,
+or specify `--exec` after your mount point.
+""",
+        )
+        lifecycle.add_argument(
+            '--foreground',
+            action='store_true',
+            default=False,
+            help="Run mount process in the foreground instead of daemonizing (default false)",
+        )
+        lifecycle.add_argument(
+            '--subtype',
+            help="Set mounted filesystem type to `fuse.SUBTYPE` (default is just `fuse`)",
+        )
+        unmount = lifecycle.add_mutually_exclusive_group()
+        unmount.add_argument(
+            '--replace',
+            action='store_true',
+            default=False,
+            help="""
+If a FUSE mount is already mounted at the given directory,
+unmount it before mounting the requested data.
+If `--subtype` is specified, unmount only if the mount has that subtype.
+WARNING: This command can affect any kind of FUSE mount, not just arv-mount.
+""",
+        )
+        unmount.add_argument(
+            '--unmount',
+            action='store_true',
+            default=False,
+            help="""
+If a FUSE mount is already mounted at the given directory, unmount it and exit.
+If `--subtype` is specified, unmount only if the mount has that subtype.
+WARNING: This command can affect any kind of FUSE mount, not just arv-mount.
+""",
+        )
+        unmount.add_argument(
+            '--unmount-all',
+            action='store_true',
+            default=False,
+            help="""
+Unmount all FUSE mounts at or below the given directory, then exit.
+If `--subtype` is specified, unmount only if the mount has that subtype.
+WARNING: This command can affect any kind of FUSE mount, not just arv-mount.
+""",
+        )
+        lifecycle.add_argument(
+            '--unmount-timeout',
+            type=float,
+            default=2.0,
+            metavar='SECONDS',
+            help="""
+The number of seconds to wait for a clean unmount after an `--exec` command has
+exited (default %(default).01f).
+After this time, the mount will be forcefully unmounted.
+""",
+        )
+
+        reporting = self.add_argument_group("Mount logging and statistics")
+        reporting.add_argument(
+            '--crunchstat-interval',
+            type=float,
+            default=0.0,
+            metavar='SECONDS',
+            help="Write stats to stderr every N seconds (default disabled)",
+        )
+        reporting.add_argument(
+            '--debug',
+            action='store_true',
+            help="Log debug information",
+        )
+        reporting.add_argument(
+            '--logfile',
+            help="Write debug logs and errors to the specified file (default stderr)",
+        )
+
+        cache = self.add_argument_group("Mount local cache setup")
+        cachetype = cache.add_mutually_exclusive_group()
+        cachetype.add_argument(
+            '--disk-cache',
+            action='store_true',
+            default=True,
+            dest='disk_cache',
+            help="Cache data on the local filesystem (default)",
+        )
+        cachetype.add_argument(
+            '--ram-cache',
+            action='store_false',
+            default=True,
+            dest='disk_cache',
+            help="Cache data in memory",
+        )
+        cache.add_argument(
+            '--disk-cache-dir',
+            metavar="DIRECTORY",
+            help="Filesystem cache location (default `~/.cache/arvados/keep`)",
+        )
+        cache.add_argument(
+            '--directory-cache',
+            type=int,
+            default=128*1024*1024,
+            metavar='BYTES',
+            help="Size of directory data cache in bytes (default 128 MiB)",
+        )
+        cache.add_argument(
+            '--file-cache',
+            type=int,
+            default=0,
+            metavar='BYTES',
+            help="""
+Size of file data cache in bytes
+(default 8 GiB for filesystem cache, 256 MiB for memory cache)
+""",
+        )
+
+        plumbing = self.add_argument_group("Mount interactions with Arvados and Linux")
+        plumbing.add_argument(
+            '--disable-event-listening',
+            action='store_true',
+            dest='disable_event_listening',
+            default=False,
+            help="Don't subscribe to events on the API server to update mount contents",
+        )
+        plumbing.add_argument(
+            '--encoding',
+            default="utf-8",
+            help="""
+Filesystem character encoding
+(default %(default)r; specify a name from the Python codec registry)
+""",
+        )
+        plumbing.add_argument(
+            '--storage-classes',
+            metavar='CLASSES',
+            help="Comma-separated list of storage classes to request for new collections",
+        )
 
 
 class Mount(object):
@@ -300,7 +528,14 @@ class Mount(object):
         usr = self.api.users().current().execute(num_retries=self.args.retries)
         now = time.time()
         dir_class = None
-        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries, self.args.enable_write]
+        dir_args = [
+            llfuse.ROOT_INODE,
+            self.operations.inodes,
+            self.api,
+            self.args.retries,
+            self.args.enable_write,
+            self.args.filters,
+        ]
         mount_readme = False
 
         storage_classes = None
@@ -366,7 +601,12 @@ class Mount(object):
             return
 
         e = self.operations.inodes.add_entry(Directory(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api.config, self.args.enable_write))
+            llfuse.ROOT_INODE,
+            self.operations.inodes,
+            self.api.config,
+            self.args.enable_write,
+            self.args.filters,
+        ))
         dir_args[0] = e.inode
 
         for name in self.args.mount_by_id:
index 8faf01cb6c4a4ddc58c31ddbe224360870d7026b..e3b8dd4c2cca29616626dab55f6d440c22b58f51 100644 (file)
@@ -36,7 +36,7 @@ class Directory(FreshBase):
     and the value referencing a File or Directory object.
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig, enable_write):
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write, filters):
         """parent_inode is the integer inode number"""
 
         super(Directory, self).__init__()
@@ -50,6 +50,19 @@ class Directory(FreshBase):
         self._entries = {}
         self._mtime = time.time()
         self._enable_write = enable_write
+        self._filters = filters or []
+
+    def _filters_for(self, subtype, *, qualified):
+        for f in self._filters:
+            f_type, _, f_name = f[0].partition('.')
+            if not f_name:
+                yield f
+            elif f_type != subtype:
+                pass
+            elif qualified:
+                yield f
+            else:
+                yield [f_name, *f[1:]]
 
     def forward_slash_subst(self):
         if not hasattr(self, '_fsns'):
@@ -270,8 +283,8 @@ class CollectionDirectoryBase(Directory):
 
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection, collection_root):
-        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write, filters, collection, collection_root):
+        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write, filters)
         self.apiconfig = apiconfig
         self.collection = collection
         self.collection_root = collection_root
@@ -287,7 +300,15 @@ class CollectionDirectoryBase(Directory):
             item.fuse_entry.dead = False
             self._entries[name] = item.fuse_entry
         elif isinstance(item, arvados.collection.RichCollectionBase):
-            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item, self.collection_root))
+            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(
+                self.inode,
+                self.inodes,
+                self.apiconfig,
+                self._enable_write,
+                self._filters,
+                item,
+                self.collection_root,
+            ))
             self._entries[name].populate(mtime)
         else:
             self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
@@ -434,8 +455,8 @@ class CollectionDirectoryBase(Directory):
 class CollectionDirectory(CollectionDirectoryBase):
     """Represents the root of a directory tree representing a collection."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
-        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None, self)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters=None, collection_record=None, explicit_collection=None):
+        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters, None, self)
         self.api = api
         self.num_retries = num_retries
         self._poll = True
@@ -637,7 +658,7 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
         def save_new(self):
             pass
 
-    def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, storage_classes=None):
+    def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, filters=None, storage_classes=None):
         collection = self.UnsaveableCollection(
             api_client=api_client,
             keep_client=api_client.keep,
@@ -646,7 +667,7 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
         # This is always enable_write=True because it never tries to
         # save to the backend
         super(TmpCollectionDirectory, self).__init__(
-            parent_inode, inodes, api_client.config, True, collection, self)
+            parent_inode, inodes, api_client.config, True, filters, collection, self)
         self.populate(self.mtime())
 
     def on_event(self, *args, **kwargs):
@@ -742,8 +763,8 @@ and the directory will appear if it exists.
 
 """.lstrip()
 
-    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, pdh_only=False, storage_classes=None):
-        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, pdh_only=False, storage_classes=None):
+        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.pdh_only = pdh_only
@@ -759,8 +780,14 @@ and the directory will appear if it exists.
             # If we're the root directory, add an identical by_id subdirectory.
             if self.inode == llfuse.ROOT_INODE:
                 self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
-                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
-                    self.pdh_only))
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    self.pdh_only,
+                ))
 
     def __contains__(self, k):
         if k in self._entries:
@@ -774,15 +801,34 @@ and the directory will appear if it exists.
 
             if group_uuid_pattern.match(k):
                 project = self.api.groups().list(
-                    filters=[['group_class', 'in', ['project','filter']], ["uuid", "=", k]]).execute(num_retries=self.num_retries)
+                    filters=[
+                        ['group_class', 'in', ['project','filter']],
+                        ["uuid", "=", k],
+                        *self._filters_for('groups', qualified=False),
+                    ],
+                ).execute(num_retries=self.num_retries)
                 if project[u'items_available'] == 0:
                     return False
                 e = self.inodes.add_entry(ProjectDirectory(
-                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
-                    project[u'items'][0], storage_classes=self.storage_classes))
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    project[u'items'][0],
+                    storage_classes=self.storage_classes,
+                ))
             else:
                 e = self.inodes.add_entry(CollectionDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, self._enable_write, k))
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    k,
+                ))
 
             if e.update():
                 if k not in self._entries:
@@ -816,8 +862,8 @@ and the directory will appear if it exists.
 class TagsDirectory(Directory):
     """A special directory that contains as subdirectories all tags visible to the user."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, poll_time=60):
-        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, poll_time=60):
+        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self._poll = True
@@ -831,15 +877,32 @@ class TagsDirectory(Directory):
     def update(self):
         with llfuse.lock_released:
             tags = self.api.links().list(
-                filters=[['link_class', '=', 'tag'], ["name", "!=", ""]],
-                select=['name'], distinct=True, limit=1000
-                ).execute(num_retries=self.num_retries)
+                filters=[
+                    ['link_class', '=', 'tag'],
+                    ['name', '!=', ''],
+                    *self._filters_for('links', qualified=False),
+                ],
+                select=['name'],
+                distinct=True,
+                limit=1000,
+            ).execute(num_retries=self.num_retries)
         if "items" in tags:
-            self.merge(tags['items']+[{"name": n} for n in self._extra],
-                       lambda i: i['name'],
-                       lambda a, i: a.tag == i['name'],
-                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
-                                              i['name'], poll=self._poll, poll_time=self._poll_time))
+            self.merge(
+                tags['items']+[{"name": n} for n in self._extra],
+                lambda i: i['name'],
+                lambda a, i: a.tag == i['name'],
+                lambda i: TagDirectory(
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    i['name'],
+                    poll=self._poll,
+                    poll_time=self._poll_time,
+                ),
+            )
 
     @use_counter
     @check_update
@@ -848,7 +911,12 @@ class TagsDirectory(Directory):
             return super(TagsDirectory, self).__getitem__(item)
         with llfuse.lock_released:
             tags = self.api.links().list(
-                filters=[['link_class', '=', 'tag'], ['name', '=', item]], limit=1
+                filters=[
+                    ['link_class', '=', 'tag'],
+                    ['name', '=', item],
+                    *self._filters_for('links', qualified=False),
+                ],
+                limit=1,
             ).execute(num_retries=self.num_retries)
         if tags["items"]:
             self._extra.add(item)
@@ -873,9 +941,9 @@ class TagDirectory(Directory):
     to the user that are tagged with a particular tag.
     """
 
-    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, tag,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, tag,
                  poll=False, poll_time=60):
-        super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+        super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.tag = tag
@@ -889,23 +957,36 @@ class TagDirectory(Directory):
     def update(self):
         with llfuse.lock_released:
             taggedcollections = self.api.links().list(
-                filters=[['link_class', '=', 'tag'],
-                         ['name', '=', self.tag],
-                         ['head_uuid', 'is_a', 'arvados#collection']],
-                select=['head_uuid']
-                ).execute(num_retries=self.num_retries)
-        self.merge(taggedcollections['items'],
-                   lambda i: i['head_uuid'],
-                   lambda a, i: a.collection_locator == i['head_uuid'],
-                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid']))
+                filters=[
+                    ['link_class', '=', 'tag'],
+                    ['name', '=', self.tag],
+                    ['head_uuid', 'is_a', 'arvados#collection'],
+                    *self._filters_for('links', qualified=False),
+                ],
+                select=['head_uuid'],
+            ).execute(num_retries=self.num_retries)
+        self.merge(
+            taggedcollections['items'],
+            lambda i: i['head_uuid'],
+            lambda a, i: a.collection_locator == i['head_uuid'],
+            lambda i: CollectionDirectory(
+                self.inode,
+                self.inodes,
+                self.api,
+                self.num_retries,
+                self._enable_write,
+                self._filters,
+                i['head_uuid'],
+            ),
+        )
 
 
 class ProjectDirectory(Directory):
     """A special directory that contains the contents of a project."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, project_object,
-                 poll=True, poll_time=3, storage_classes=None):
-        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+                 project_object, poll=True, poll_time=3, storage_classes=None):
+        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.project_object = project_object
@@ -922,14 +1003,14 @@ class ProjectDirectory(Directory):
         return True
 
     def createDirectory(self, i):
+        common_args = (self.inode, self.inodes, self.api, self.num_retries, self._enable_write, self._filters)
         if collection_uuid_pattern.match(i['uuid']):
-            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i)
+            return CollectionDirectory(*common_args, i)
         elif group_uuid_pattern.match(i['uuid']):
-            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
-                                    i, self._poll, self._poll_time, self.storage_classes)
+            return ProjectDirectory(*common_args, i, self._poll, self._poll_time, self.storage_classes)
         elif link_uuid_pattern.match(i['uuid']):
             if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
-                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid'])
+                return CollectionDirectory(*common_args, i['head_uuid'])
             else:
                 return None
         elif uuid_pattern.match(i['uuid']):
@@ -990,19 +1071,27 @@ class ProjectDirectory(Directory):
                     self.project_object = self.api.users().get(
                         uuid=self.project_uuid).execute(num_retries=self.num_retries)
                 # do this in 2 steps until #17424 is fixed
-                contents = list(arvados.util.keyset_list_all(self.api.groups().contents,
-                                                        order_key="uuid",
-                                                        num_retries=self.num_retries,
-                                                        uuid=self.project_uuid,
-                                                        filters=[["uuid", "is_a", "arvados#group"],
-                                                                 ["groups.group_class", "in", ["project","filter"]]]))
-                contents.extend(filter(lambda i: i["current_version_uuid"] == i["uuid"],
-                                       arvados.util.keyset_list_all(self.api.groups().contents,
-                                                             order_key="uuid",
-                                                             num_retries=self.num_retries,
-                                                             uuid=self.project_uuid,
-                                                             filters=[["uuid", "is_a", "arvados#collection"]])))
-
+                contents = list(arvados.util.keyset_list_all(
+                    self.api.groups().contents,
+                    order_key='uuid',
+                    num_retries=self.num_retries,
+                    uuid=self.project_uuid,
+                    filters=[
+                        ['uuid', 'is_a', 'arvados#group'],
+                        ['groups.group_class', 'in', ['project', 'filter']],
+                        *self._filters_for('groups', qualified=True),
+                    ],
+                ))
+                contents.extend(obj for obj in arvados.util.keyset_list_all(
+                    self.api.groups().contents,
+                    order_key='uuid',
+                    num_retries=self.num_retries,
+                    uuid=self.project_uuid,
+                    filters=[
+                        ['uuid', 'is_a', 'arvados#collection'],
+                        *self._filters_for('collections', qualified=True),
+                    ],
+                ) if obj['current_version_uuid'] == obj['uuid'])
 
             # end with llfuse.lock_released, re-acquire lock
 
@@ -1032,14 +1121,24 @@ class ProjectDirectory(Directory):
                 namefilter = ["name", "=", k]
             else:
                 namefilter = ["name", "in", [k, k2]]
-            contents = self.api.groups().list(filters=[["owner_uuid", "=", self.project_uuid],
-                                                       ["group_class", "in", ["project","filter"]],
-                                                       namefilter],
-                                              limit=2).execute(num_retries=self.num_retries)["items"]
+            contents = self.api.groups().list(
+                filters=[
+                    ["owner_uuid", "=", self.project_uuid],
+                    ["group_class", "in", ["project","filter"]],
+                    namefilter,
+                    *self._filters_for('groups', qualified=False),
+                ],
+                limit=2,
+            ).execute(num_retries=self.num_retries)["items"]
             if not contents:
-                contents = self.api.collections().list(filters=[["owner_uuid", "=", self.project_uuid],
-                                                                namefilter],
-                                                       limit=2).execute(num_retries=self.num_retries)["items"]
+                contents = self.api.collections().list(
+                    filters=[
+                        ["owner_uuid", "=", self.project_uuid],
+                        namefilter,
+                        *self._filters_for('collections', qualified=False),
+                    ],
+                    limit=2,
+                ).execute(num_retries=self.num_retries)["items"]
         if contents:
             if len(contents) > 1 and contents[1]['name'] == k:
                 # If "foo/bar" and "foo[SUBST]bar" both exist, use
@@ -1193,9 +1292,9 @@ class ProjectDirectory(Directory):
 class SharedDirectory(Directory):
     """A special directory that represents users or groups who have shared projects with me."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, exclude,
-                 poll=False, poll_time=60, storage_classes=None):
-        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+                 exclude, poll=False, poll_time=60, storage_classes=None):
+        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
         self.api = api
         self.num_retries = num_retries
         self.current_user = api.users().current().execute(num_retries=num_retries)
@@ -1221,11 +1320,17 @@ class SharedDirectory(Directory):
                 if 'httpMethod' in methods.get('shared', {}):
                     page = []
                     while True:
-                        resp = self.api.groups().shared(filters=[['group_class', 'in', ['project','filter']]]+page,
-                                                        order="uuid",
-                                                        limit=10000,
-                                                        count="none",
-                                                        include="owner_uuid").execute()
+                        resp = self.api.groups().shared(
+                            filters=[
+                                ['group_class', 'in', ['project','filter']],
+                                *page,
+                                *self._filters_for('groups', qualified=False),
+                            ],
+                            order="uuid",
+                            limit=10000,
+                            count="none",
+                            include="owner_uuid",
+                        ).execute()
                         if not resp["items"]:
                             break
                         page = [["uuid", ">", resp["items"][len(resp["items"])-1]["uuid"]]]
@@ -1240,8 +1345,12 @@ class SharedDirectory(Directory):
                         self.api.groups().list,
                         order_key="uuid",
                         num_retries=self.num_retries,
-                        filters=[['group_class','in',['project','filter']]],
-                        select=["uuid", "owner_uuid"]))
+                        filters=[
+                            ['group_class', 'in', ['project','filter']],
+                            *self._filters_for('groups', qualified=False),
+                        ],
+                        select=["uuid", "owner_uuid"],
+                    ))
                     for ob in all_projects:
                         objects[ob['uuid']] = ob
 
@@ -1255,13 +1364,20 @@ class SharedDirectory(Directory):
                         self.api.users().list,
                         order_key="uuid",
                         num_retries=self.num_retries,
-                        filters=[['uuid','in', list(root_owners)]])
+                        filters=[
+                            ['uuid', 'in', list(root_owners)],
+                            *self._filters_for('users', qualified=False),
+                        ],
+                    )
                     lgroups = arvados.util.keyset_list_all(
                         self.api.groups().list,
                         order_key="uuid",
                         num_retries=self.num_retries,
-                        filters=[['uuid','in', list(root_owners)+roots]])
-
+                        filters=[
+                            ['uuid', 'in', list(root_owners)+roots],
+                            *self._filters_for('groups', qualified=False),
+                        ],
+                    )
                     for l in lusers:
                         objects[l["uuid"]] = l
                     for l in lgroups:
@@ -1283,11 +1399,23 @@ class SharedDirectory(Directory):
 
             # end with llfuse.lock_released, re-acquire lock
 
-            self.merge(contents.items(),
-                       lambda i: i[0],
-                       lambda a, i: a.uuid() == i[1]['uuid'],
-                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
-                                                  i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+            self.merge(
+                contents.items(),
+                lambda i: i[0],
+                lambda a, i: a.uuid() == i[1]['uuid'],
+                lambda i: ProjectDirectory(
+                    self.inode,
+                    self.inodes,
+                    self.api,
+                    self.num_retries,
+                    self._enable_write,
+                    self._filters,
+                    i[1],
+                    poll=self._poll,
+                    poll_time=self._poll_time,
+                    storage_classes=self.storage_classes,
+                ),
+            )
         except Exception:
             _logger.exception("arv-mount shared dir error")
         finally:
index d0c46f132040aa400645473ddf347c53be135d23..b04829652e948b4de22c3c433620287c4fb51ef1 100644 (file)
@@ -51,11 +51,8 @@ setup(name='arvados_fuse',
         'setuptools',
         "prometheus_client"
         ],
-      extras_require={
-          ':python_version<"3"': ['pytz'],
-      },
+      python_requires="~=3.8",
       classifiers=[
-          'Programming Language :: Python :: 2',
           'Programming Language :: Python :: 3',
       ],
       test_suite='tests',
index c316010f6c48b17b5d7aa35b4fe96d1021bfb49d..8a3522e0cb0df7e11aec61279ab530d3d2395e44 100644 (file)
@@ -72,15 +72,22 @@ class MountTestBase(unittest.TestCase):
         llfuse.close()
 
     def make_mount(self, root_class, **root_kwargs):
-        enable_write = True
-        if 'enable_write' in root_kwargs:
-            enable_write = root_kwargs.pop('enable_write')
+        enable_write = root_kwargs.pop('enable_write', True)
         self.operations = fuse.Operations(
-            os.getuid(), os.getgid(),
+            os.getuid(),
+            os.getgid(),
             api_client=self.api,
-            enable_write=enable_write)
+            enable_write=enable_write,
+        )
         self.operations.inodes.add_entry(root_class(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, enable_write, **root_kwargs))
+            llfuse.ROOT_INODE,
+            self.operations.inodes,
+            self.api,
+            0,
+            enable_write,
+            root_kwargs.pop('filters', None),
+            **root_kwargs,
+        ))
         llfuse.init(self.operations, self.mounttmp, [])
         self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
         self.llfuse_thread.daemon = True
index f4e5138e2ce0fd5d1559046754f2c50a4f1c2ddb..ef9c25bcf588f0fa7589ce0f06b4f8e1b9263927 100644 (file)
@@ -1126,7 +1126,10 @@ class MagicDirApiError(FuseMagicTest):
 
 class SanitizeFilenameTest(MountTestBase):
     def test_sanitize_filename(self):
-        pdir = fuse.ProjectDirectory(1, {}, self.api, 0, False, project_object=self.api.users().current().execute())
+        pdir = fuse.ProjectDirectory(
+            1, {}, self.api, 0, False, None,
+            project_object=self.api.users().current().execute(),
+        )
         acceptable = [
             "foo.txt",
             ".foo",
diff --git a/services/fuse/tests/test_mount_filters.py b/services/fuse/tests/test_mount_filters.py
new file mode 100644 (file)
index 0000000..5f32453
--- /dev/null
@@ -0,0 +1,223 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import collections
+import itertools
+import json
+import re
+import unittest
+
+from pathlib import Path
+
+from parameterized import parameterized
+
+from arvados_fuse import fusedir
+
+from .integration_test import IntegrationTest
+from .mount_test_base import MountTestBase
+from .run_test_server import fixture
+
+_COLLECTIONS = fixture('collections')
+_GROUPS = fixture('groups')
+_LINKS = fixture('links')
+_USERS = fixture('users')
+
+class DirectoryFiltersTestCase(MountTestBase):
+    DEFAULT_ROOT_KWARGS = {
+        'enable_write': False,
+        'filters': [
+            ['collections.name', 'like', 'zzzzz-4zz18-%'],
+            # This matches both "A Project" (which we use as the test root)
+            # and "A Subproject" (which we assert is found under it).
+            ['groups.name', 'like', 'A %roject'],
+        ],
+    }
+    EXPECTED_PATHS = frozenset([
+        _COLLECTIONS['foo_collection_in_aproject']['name'],
+        _GROUPS['asubproject']['name'],
+    ])
+    CHECKED_PATHS = EXPECTED_PATHS.union([
+        _COLLECTIONS['collection_to_move_around_in_aproject']['name'],
+        _GROUPS['subproject_in_active_user_home_project_to_test_unique_key_violation']['name'],
+    ])
+
+    @parameterized.expand([
+        (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),
+        (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),
+        (fusedir.SharedDirectory, {'exclude': None}, Path(
+            '{first_name} {last_name}'.format_map(_USERS['active']),
+            _GROUPS['aproject']['name'],
+        )),
+    ])
+    def test_filtered_path_exists(self, root_class, root_kwargs, subdir):
+        root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)
+        self.make_mount(root_class, **root_kwargs)
+        dir_path = Path(self.mounttmp, subdir)
+        actual = frozenset(
+            basename
+            for basename in self.CHECKED_PATHS
+            if (dir_path / basename).exists()
+        )
+        self.assertEqual(
+            actual,
+            self.EXPECTED_PATHS,
+            "mount existence checks did not match expected results",
+        )
+
+    @parameterized.expand([
+        (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),
+        (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),
+        (fusedir.SharedDirectory, {'exclude': None}, Path(
+            '{first_name} {last_name}'.format_map(_USERS['active']),
+            _GROUPS['aproject']['name'],
+        )),
+    ])
+    def test_filtered_path_listing(self, root_class, root_kwargs, subdir):
+        root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)
+        self.make_mount(root_class, **root_kwargs)
+        actual = frozenset(path.name for path in Path(self.mounttmp, subdir).iterdir())
+        self.assertEqual(
+            actual & self.EXPECTED_PATHS,
+            self.EXPECTED_PATHS,
+            "mount listing did not include minimum matches",
+        )
+        extra = frozenset(
+            name
+            for name in actual
+            if not (name.startswith('zzzzz-4zz18-') or name.endswith('roject'))
+        )
+        self.assertFalse(
+            extra,
+            "mount listing included results outside filters",
+        )
+
+
+class TagFiltersTestCase(MountTestBase):
+    COLL_UUID = _COLLECTIONS['foo_collection_in_aproject']['uuid']
+    TAG_NAME = _LINKS['foo_collection_tag']['name']
+
+    @parameterized.expand([
+        '=',
+        '!=',
+    ])
+    def test_tag_directory_filters(self, op):
+        self.make_mount(
+            fusedir.TagDirectory,
+            enable_write=False,
+            filters=[
+                ['links.head_uuid', op, self.COLL_UUID],
+            ],
+            tag=self.TAG_NAME,
+        )
+        checked_path = Path(self.mounttmp, self.COLL_UUID)
+        self.assertEqual(checked_path.exists(), op == '=')
+
+    @parameterized.expand(itertools.product(
+        ['in', 'not in'],
+        ['=', '!='],
+    ))
+    def test_tags_directory_filters(self, coll_op, link_op):
+        self.make_mount(
+            fusedir.TagsDirectory,
+            enable_write=False,
+            filters=[
+                ['links.head_uuid', coll_op, [self.COLL_UUID]],
+                ['links.name', link_op, self.TAG_NAME],
+            ],
+        )
+        if link_op == '!=':
+            filtered_path = Path(self.mounttmp, self.TAG_NAME)
+        elif coll_op == 'not in':
+            # As of 2024-02-09, foo tag only applies to the single collection.
+            # If you filter it out via head_uuid, then it disappears completely
+            # from the TagsDirectory. Hence we set that tag directory as
+            # filtered_path. If any of this changes in the future,
+            # it would be fine to append self.COLL_UUID to filtered_path here.
+            filtered_path = Path(self.mounttmp, self.TAG_NAME)
+        else:
+            filtered_path = Path(self.mounttmp, self.TAG_NAME, self.COLL_UUID, 'foo', 'nonexistent')
+        expect_path = filtered_path.parent
+        self.assertTrue(
+            expect_path.exists(),
+            f"path not found but should exist: {expect_path}",
+        )
+        self.assertFalse(
+            filtered_path.exists(),
+            f"path was found but should be filtered out: {filtered_path}",
+        )
+
+
+class FiltersIntegrationTest(IntegrationTest):
+    COLLECTIONS_BY_PROP = {
+        coll['properties']['MainFile']: coll
+        for coll in _COLLECTIONS.values()
+        if coll['owner_uuid'] == _GROUPS['fuse_filters_test_project']['uuid']
+    }
+    PROP_VALUES = list(COLLECTIONS_BY_PROP)
+
+    for test_n, query in enumerate(['foo', 'ba?']):
+        @IntegrationTest.mount([
+            '--filters', json.dumps([
+                ['collections.properties.MainFile', 'like', query],
+            ]),
+            '--mount-by-pdh', 'by_pdh',
+            '--mount-by-id', 'by_id',
+            '--mount-home', 'home',
+        ])
+        def _test_func(self, query=query):
+            pdh_path = Path(self.mnt, 'by_pdh')
+            id_path = Path(self.mnt, 'by_id')
+            home_path = Path(self.mnt, 'home')
+            query_re = re.compile(query.replace('?', '.'))
+            for prop_val, coll in self.COLLECTIONS_BY_PROP.items():
+                should_exist = query_re.fullmatch(prop_val) is not None
+                for path in [
+                        pdh_path / coll['portable_data_hash'],
+                        id_path / coll['portable_data_hash'],
+                        id_path / coll['uuid'],
+                        home_path / coll['name'],
+                ]:
+                    self.assertEqual(
+                        path.exists(),
+                        should_exist,
+                        f"{path} from MainFile={prop_val} exists!={should_exist}",
+                    )
+        exec(f"test_collection_properties_filters_{test_n} = _test_func")
+
+    for test_n, mount_opts in enumerate([
+            ['--home'],
+            ['--project', _GROUPS['aproject']['uuid']],
+    ]):
+        @IntegrationTest.mount([
+            '--filters', json.dumps([
+                ['collections.name', 'like', 'zzzzz-4zz18-%'],
+                ['groups.name', 'like', 'A %roject'],
+            ]),
+            *mount_opts,
+        ])
+        def _test_func(self, mount_opts=mount_opts):
+            root_path = Path(self.mnt)
+            root_depth = len(root_path.parts)
+            max_depth = 0
+            name_re = re.compile(r'(zzzzz-4zz18-.*|A .*roject)')
+            dir_queue = [root_path]
+            while dir_queue:
+                root_path = dir_queue.pop()
+                max_depth = max(max_depth, len(root_path.parts))
+                for child in root_path.iterdir():
+                    if not child.is_dir():
+                        continue
+                    match = name_re.fullmatch(child.name)
+                    self.assertIsNotNone(
+                        match,
+                        "found directory with name that should've been filtered",
+                    )
+                    if not match.group(1).startswith('zzzzz-4zz18-'):
+                        dir_queue.append(child)
+            self.assertGreaterEqual(
+                max_depth,
+                root_depth + (2 if mount_opts[0] == '--home' else 1),
+                "test descended fewer subdirectories than expected",
+            )
+        exec(f"test_multiple_name_filters_{test_n} = _test_func")
index c3579556bb5f174781753676f0208d794a5ee620..771e277d60a4befe5367bfc2299ec80da145b2bb 100644 (file)
@@ -10,6 +10,7 @@ import (
        "sync"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/services/keepstore"
 )
 
 // Pull is a request to retrieve a block from a remote server, and
@@ -23,13 +24,8 @@ type Pull struct {
 // MarshalJSON formats a pull request the way keepstore wants to see
 // it.
 func (p Pull) MarshalJSON() ([]byte, error) {
-       type KeepstorePullRequest struct {
-               Locator   string   `json:"locator"`
-               Servers   []string `json:"servers"`
-               MountUUID string   `json:"mount_uuid"`
-       }
-       return json.Marshal(KeepstorePullRequest{
-               Locator:   string(p.SizedDigest[:32]),
+       return json.Marshal(keepstore.PullListItem{
+               Locator:   string(p.SizedDigest),
                Servers:   []string{p.From.URLBase()},
                MountUUID: p.To.KeepMount.UUID,
        })
@@ -45,13 +41,8 @@ type Trash struct {
 // MarshalJSON formats a trash request the way keepstore wants to see
 // it, i.e., as a bare locator with no +size hint.
 func (t Trash) MarshalJSON() ([]byte, error) {
-       type KeepstoreTrashRequest struct {
-               Locator    string `json:"locator"`
-               BlockMtime int64  `json:"block_mtime"`
-               MountUUID  string `json:"mount_uuid"`
-       }
-       return json.Marshal(KeepstoreTrashRequest{
-               Locator:    string(t.SizedDigest[:32]),
+       return json.Marshal(keepstore.TrashListItem{
+               Locator:    string(t.SizedDigest),
                BlockMtime: t.Mtime,
                MountUUID:  t.From.KeepMount.UUID,
        })
index 5474d29fb57e2d64a67286382b1d53907afe3ae7..f2b9429017cf52a4b21398a4d8106b70a3e34757 100644 (file)
@@ -33,12 +33,12 @@ func (s *changeSetSuite) TestJSONFormat(c *check.C) {
                To:          mnt,
                From:        srv}})
        c.Check(err, check.IsNil)
-       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","servers":["http://keep1.zzzzz.arvadosapi.com:25107"],"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
+       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8+3","servers":["http://keep1.zzzzz.arvadosapi.com:25107"],"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
 
        buf, err = json.Marshal([]Trash{{
                SizedDigest: arvados.SizedDigest("acbd18db4cc2f85cedef654fccc4a4d8+3"),
                From:        mnt,
                Mtime:       123456789}})
        c.Check(err, check.IsNil)
-       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8","block_mtime":123456789,"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
+       c.Check(string(buf), check.Equals, `[{"locator":"acbd18db4cc2f85cedef654fccc4a4d8+3","block_mtime":123456789,"mount_uuid":"zzzzz-mount-abcdefghijklmno"}]`)
 }
index 7efba2348b6593a1232edb6c719253ef388674dd..2c73e2d1040d1b37df4e77375b5a859d3187565e 100644 (file)
@@ -32,8 +32,8 @@ import (
        . "gopkg.in/check.v1"
 )
 
-// Gocheck boilerplate
 func Test(t *testing.T) {
+       keepclient.DefaultRetryDelay = time.Millisecond
        TestingT(t)
 }
 
@@ -346,7 +346,7 @@ func (s *ServerRequiredSuite) TestPutAskGet(c *C) {
        }
 
        {
-               reader, _, _, err := kc.Get(hash)
+               reader, _, _, err := kc.Get(hash + "+3")
                c.Check(reader, Equals, nil)
                c.Check(err, Equals, keepclient.BlockNotFound)
                c.Log("Finished Get (expected BlockNotFound)")
index 56a52c913a196149d3d4bbe03ec1f8f382018b72..2c8a79350c86b02e08eea2007c58a8f2e632ca47 100644 (file)
@@ -5,13 +5,11 @@
 package keepstore
 
 import (
-       "bytes"
        "context"
        "encoding/json"
        "errors"
        "fmt"
        "io"
-       "io/ioutil"
        "net/http"
        "os"
        "regexp"
@@ -32,17 +30,18 @@ func init() {
        driver["Azure"] = newAzureBlobVolume
 }
 
-func newAzureBlobVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
-       v := &AzureBlobVolume{
+func newAzureBlobVolume(params newVolumeParams) (volume, error) {
+       v := &azureBlobVolume{
                RequestTimeout:    azureDefaultRequestTimeout,
                WriteRaceInterval: azureDefaultWriteRaceInterval,
                WriteRacePollTime: azureDefaultWriteRacePollTime,
-               cluster:           cluster,
-               volume:            volume,
-               logger:            logger,
-               metrics:           metrics,
+               cluster:           params.Cluster,
+               volume:            params.ConfigVolume,
+               logger:            params.Logger,
+               metrics:           params.MetricsVecs,
+               bufferPool:        params.BufferPool,
        }
-       err := json.Unmarshal(volume.DriverParameters, &v)
+       err := json.Unmarshal(params.ConfigVolume.DriverParameters, &v)
        if err != nil {
                return nil, err
        }
@@ -80,8 +79,8 @@ func newAzureBlobVolume(cluster *arvados.Cluster, volume arvados.Volume, logger
        return v, v.check()
 }
 
-func (v *AzureBlobVolume) check() error {
-       lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
+func (v *azureBlobVolume) check() error {
+       lbls := prometheus.Labels{"device_id": v.DeviceID()}
        v.container.stats.opsCounters, v.container.stats.errCounters, v.container.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
        return nil
 }
@@ -94,9 +93,9 @@ const (
        azureDefaultWriteRacePollTime    = arvados.Duration(time.Second)
 )
 
-// An AzureBlobVolume stores and retrieves blocks in an Azure Blob
+// An azureBlobVolume stores and retrieves blocks in an Azure Blob
 // container.
-type AzureBlobVolume struct {
+type azureBlobVolume struct {
        StorageAccountName   string
        StorageAccountKey    string
        StorageBaseURL       string // "" means default, "core.windows.net"
@@ -108,12 +107,13 @@ type AzureBlobVolume struct {
        WriteRaceInterval    arvados.Duration
        WriteRacePollTime    arvados.Duration
 
-       cluster   *arvados.Cluster
-       volume    arvados.Volume
-       logger    logrus.FieldLogger
-       metrics   *volumeMetricsVecs
-       azClient  storage.Client
-       container *azureContainer
+       cluster    *arvados.Cluster
+       volume     arvados.Volume
+       logger     logrus.FieldLogger
+       metrics    *volumeMetricsVecs
+       bufferPool *bufferPool
+       azClient   storage.Client
+       container  *azureContainer
 }
 
 // singleSender is a single-attempt storage.Sender.
@@ -124,18 +124,13 @@ func (*singleSender) Send(c *storage.Client, req *http.Request) (resp *http.Resp
        return c.HTTPClient.Do(req)
 }
 
-// Type implements Volume.
-func (v *AzureBlobVolume) Type() string {
-       return "Azure"
-}
-
-// GetDeviceID returns a globally unique ID for the storage container.
-func (v *AzureBlobVolume) GetDeviceID() string {
+// DeviceID returns a globally unique ID for the storage container.
+func (v *azureBlobVolume) DeviceID() string {
        return "azure://" + v.StorageBaseURL + "/" + v.StorageAccountName + "/" + v.ContainerName
 }
 
 // Return true if expires_at metadata attribute is found on the block
-func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
+func (v *azureBlobVolume) checkTrashed(loc string) (bool, map[string]string, error) {
        metadata, err := v.container.GetBlobMetadata(loc)
        if err != nil {
                return false, metadata, v.translateError(err)
@@ -146,30 +141,34 @@ func (v *AzureBlobVolume) checkTrashed(loc string) (bool, map[string]string, err
        return false, metadata, nil
 }
 
-// Get reads a Keep block that has been stored as a block blob in the
-// container.
+// BlockRead reads a Keep block that has been stored as a block blob
+// in the container.
 //
 // If the block is younger than azureWriteRaceInterval and is
-// unexpectedly empty, assume a PutBlob operation is in progress, and
-// wait for it to finish writing.
-func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
-       trashed, _, err := v.checkTrashed(loc)
+// unexpectedly empty, assume a BlockWrite operation is in progress,
+// and wait for it to finish writing.
+func (v *azureBlobVolume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+       trashed, _, err := v.checkTrashed(hash)
        if err != nil {
-               return 0, err
+               return err
        }
        if trashed {
-               return 0, os.ErrNotExist
+               return os.ErrNotExist
+       }
+       buf, err := v.bufferPool.GetContext(ctx)
+       if err != nil {
+               return err
        }
+       defer v.bufferPool.Put(buf)
        var deadline time.Time
-       haveDeadline := false
-       size, err := v.get(ctx, loc, buf)
-       for err == nil && size == 0 && loc != "d41d8cd98f00b204e9800998ecf8427e" {
+       wrote, err := v.get(ctx, hash, w)
+       for err == nil && wrote == 0 && hash != "d41d8cd98f00b204e9800998ecf8427e" {
                // Seeing a brand new empty block probably means we're
                // in a race with CreateBlob, which under the hood
                // (apparently) does "CreateEmpty" and "CommitData"
                // with no additional transaction locking.
-               if !haveDeadline {
-                       t, err := v.Mtime(loc)
+               if deadline.IsZero() {
+                       t, err := v.Mtime(hash)
                        if err != nil {
                                ctxlog.FromContext(ctx).Print("Got empty block (possible race) but Mtime failed: ", err)
                                break
@@ -178,25 +177,24 @@ func (v *AzureBlobVolume) Get(ctx context.Context, loc string, buf []byte) (int,
                        if time.Now().After(deadline) {
                                break
                        }
-                       ctxlog.FromContext(ctx).Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", loc, time.Since(t), deadline)
-                       haveDeadline = true
+                       ctxlog.FromContext(ctx).Printf("Race? Block %s is 0 bytes, %s old. Polling until %s", hash, time.Since(t), deadline)
                } else if time.Now().After(deadline) {
                        break
                }
                select {
                case <-ctx.Done():
-                       return 0, ctx.Err()
+                       return ctx.Err()
                case <-time.After(v.WriteRacePollTime.Duration()):
                }
-               size, err = v.get(ctx, loc, buf)
+               wrote, err = v.get(ctx, hash, w)
        }
-       if haveDeadline {
-               ctxlog.FromContext(ctx).Printf("Race ended with size==%d", size)
+       if !deadline.IsZero() {
+               ctxlog.FromContext(ctx).Printf("Race ended with size==%d", wrote)
        }
-       return size, err
+       return err
 }
 
-func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int, error) {
+func (v *azureBlobVolume) get(ctx context.Context, hash string, dst io.WriterAt) (int, error) {
        ctx, cancel := context.WithCancel(ctx)
        defer cancel()
 
@@ -206,28 +204,30 @@ func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int,
        }
 
        pieces := 1
-       expectSize := len(buf)
+       expectSize := BlockSize
+       sizeKnown := false
        if pieceSize < BlockSize {
-               // Unfortunately the handler doesn't tell us how long the blob
-               // is expected to be, so we have to ask Azure.
-               props, err := v.container.GetBlobProperties(loc)
+               // Unfortunately the handler doesn't tell us how long
+               // the blob is expected to be, so we have to ask
+               // Azure.
+               props, err := v.container.GetBlobProperties(hash)
                if err != nil {
                        return 0, v.translateError(err)
                }
                if props.ContentLength > int64(BlockSize) || props.ContentLength < 0 {
-                       return 0, fmt.Errorf("block %s invalid size %d (max %d)", loc, props.ContentLength, BlockSize)
+                       return 0, fmt.Errorf("block %s invalid size %d (max %d)", hash, props.ContentLength, BlockSize)
                }
                expectSize = int(props.ContentLength)
                pieces = (expectSize + pieceSize - 1) / pieceSize
+               sizeKnown = true
        }
 
        if expectSize == 0 {
                return 0, nil
        }
 
-       // We'll update this actualSize if/when we get the last piece.
-       actualSize := -1
        errors := make(chan error, pieces)
+       var wrote atomic.Int64
        var wg sync.WaitGroup
        wg.Add(pieces)
        for p := 0; p < pieces; p++ {
@@ -252,9 +252,9 @@ func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int,
                        go func() {
                                defer close(gotRdr)
                                if startPos == 0 && endPos == expectSize {
-                                       rdr, err = v.container.GetBlob(loc)
+                                       rdr, err = v.container.GetBlob(hash)
                                } else {
-                                       rdr, err = v.container.GetBlobRange(loc, startPos, endPos-1, nil)
+                                       rdr, err = v.container.GetBlobRange(hash, startPos, endPos-1, nil)
                                }
                        }()
                        select {
@@ -282,86 +282,44 @@ func (v *AzureBlobVolume) get(ctx context.Context, loc string, buf []byte) (int,
                                <-ctx.Done()
                                rdr.Close()
                        }()
-                       n, err := io.ReadFull(rdr, buf[startPos:endPos])
-                       if pieces == 1 && (err == io.ErrUnexpectedEOF || err == io.EOF) {
+                       n, err := io.CopyN(io.NewOffsetWriter(dst, int64(startPos)), rdr, int64(endPos-startPos))
+                       wrote.Add(n)
+                       if pieces == 1 && !sizeKnown && (err == io.ErrUnexpectedEOF || err == io.EOF) {
                                // If we don't know the actual size,
                                // and just tried reading 64 MiB, it's
                                // normal to encounter EOF.
                        } else if err != nil {
-                               if ctx.Err() == nil {
-                                       errors <- err
-                               }
+                               errors <- err
                                cancel()
                                return
                        }
-                       if p == pieces-1 {
-                               actualSize = startPos + n
-                       }
                }(p)
        }
        wg.Wait()
        close(errors)
        if len(errors) > 0 {
-               return 0, v.translateError(<-errors)
+               return int(wrote.Load()), v.translateError(<-errors)
        }
-       if ctx.Err() != nil {
-               return 0, ctx.Err()
-       }
-       return actualSize, nil
+       return int(wrote.Load()), ctx.Err()
 }
 
-// Compare the given data with existing stored data.
-func (v *AzureBlobVolume) Compare(ctx context.Context, loc string, expect []byte) error {
-       trashed, _, err := v.checkTrashed(loc)
-       if err != nil {
-               return err
-       }
-       if trashed {
-               return os.ErrNotExist
-       }
-       var rdr io.ReadCloser
-       gotRdr := make(chan struct{})
-       go func() {
-               defer close(gotRdr)
-               rdr, err = v.container.GetBlob(loc)
-       }()
-       select {
-       case <-ctx.Done():
-               go func() {
-                       <-gotRdr
-                       if err == nil {
-                               rdr.Close()
-                       }
-               }()
-               return ctx.Err()
-       case <-gotRdr:
-       }
-       if err != nil {
-               return v.translateError(err)
-       }
-       defer rdr.Close()
-       return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
-}
-
-// Put stores a Keep block as a block blob in the container.
-func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) error {
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
+// BlockWrite stores a block on the volume. If it already exists, its
+// timestamp is updated.
+func (v *azureBlobVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
        // Send the block data through a pipe, so that (if we need to)
        // we can close the pipe early and abandon our
        // CreateBlockBlobFromReader() goroutine, without worrying
-       // about CreateBlockBlobFromReader() accessing our block
+       // about CreateBlockBlobFromReader() accessing our data
        // buffer after we release it.
        bufr, bufw := io.Pipe()
        go func() {
-               io.Copy(bufw, bytes.NewReader(block))
+               bufw.Write(data)
                bufw.Close()
        }()
-       errChan := make(chan error)
+       errChan := make(chan error, 1)
        go func() {
                var body io.Reader = bufr
-               if len(block) == 0 {
+               if len(data) == 0 {
                        // We must send a "Content-Length: 0" header,
                        // but the http client interprets
                        // ContentLength==0 as "unknown" unless it can
@@ -370,18 +328,15 @@ func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) err
                        body = http.NoBody
                        bufr.Close()
                }
-               errChan <- v.container.CreateBlockBlobFromReader(loc, len(block), body, nil)
+               errChan <- v.container.CreateBlockBlobFromReader(hash, len(data), body, nil)
        }()
        select {
        case <-ctx.Done():
                ctxlog.FromContext(ctx).Debugf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
-               // Our pipe might be stuck in Write(), waiting for
-               // io.Copy() to read. If so, un-stick it. This means
-               // CreateBlockBlobFromReader will get corrupt data,
-               // but that's OK: the size won't match, so the write
-               // will fail.
-               go io.Copy(ioutil.Discard, bufr)
-               // CloseWithError() will return once pending I/O is done.
+               // bufw.CloseWithError() interrupts bufw.Write() if
+               // necessary, ensuring CreateBlockBlobFromReader can't
+               // read any more of our data slice via bufr after we
+               // return.
                bufw.CloseWithError(ctx.Err())
                ctxlog.FromContext(ctx).Debugf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
                return ctx.Err()
@@ -390,12 +345,9 @@ func (v *AzureBlobVolume) Put(ctx context.Context, loc string, block []byte) err
        }
 }
 
-// Touch updates the last-modified property of a block blob.
-func (v *AzureBlobVolume) Touch(loc string) error {
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-       trashed, metadata, err := v.checkTrashed(loc)
+// BlockTouch updates the last-modified property of a block blob.
+func (v *azureBlobVolume) BlockTouch(hash string) error {
+       trashed, metadata, err := v.checkTrashed(hash)
        if err != nil {
                return err
        }
@@ -404,12 +356,12 @@ func (v *AzureBlobVolume) Touch(loc string) error {
        }
 
        metadata["touch"] = fmt.Sprintf("%d", time.Now().Unix())
-       return v.container.SetBlobMetadata(loc, metadata, nil)
+       return v.container.SetBlobMetadata(hash, metadata, nil)
 }
 
 // Mtime returns the last-modified property of a block blob.
-func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
-       trashed, _, err := v.checkTrashed(loc)
+func (v *azureBlobVolume) Mtime(hash string) (time.Time, error) {
+       trashed, _, err := v.checkTrashed(hash)
        if err != nil {
                return time.Time{}, err
        }
@@ -417,21 +369,25 @@ func (v *AzureBlobVolume) Mtime(loc string) (time.Time, error) {
                return time.Time{}, os.ErrNotExist
        }
 
-       props, err := v.container.GetBlobProperties(loc)
+       props, err := v.container.GetBlobProperties(hash)
        if err != nil {
                return time.Time{}, err
        }
        return time.Time(props.LastModified), nil
 }
 
-// IndexTo writes a list of Keep blocks that are stored in the
+// Index writes a list of Keep blocks that are stored in the
 // container.
-func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
+func (v *azureBlobVolume) Index(ctx context.Context, prefix string, writer io.Writer) error {
        params := storage.ListBlobsParameters{
                Prefix:  prefix,
                Include: &storage.IncludeBlobDataset{Metadata: true},
        }
        for page := 1; ; page++ {
+               err := ctx.Err()
+               if err != nil {
+                       return err
+               }
                resp, err := v.listBlobs(page, params)
                if err != nil {
                        return err
@@ -463,11 +419,11 @@ func (v *AzureBlobVolume) IndexTo(prefix string, writer io.Writer) error {
 }
 
 // call v.container.ListBlobs, retrying if needed.
-func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters) (resp storage.BlobListResponse, err error) {
+func (v *azureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters) (resp storage.BlobListResponse, err error) {
        for i := 0; i < v.ListBlobsMaxAttempts; i++ {
                resp, err = v.container.ListBlobs(params)
                err = v.translateError(err)
-               if err == VolumeBusyError {
+               if err == errVolumeUnavailable {
                        v.logger.Printf("ListBlobs: will retry page %d in %s after error: %s", page, v.ListBlobsRetryDelay, err)
                        time.Sleep(time.Duration(v.ListBlobsRetryDelay))
                        continue
@@ -479,10 +435,7 @@ func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters
 }
 
 // Trash a Keep block.
-func (v *AzureBlobVolume) Trash(loc string) error {
-       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
-               return MethodDisabledError
-       }
+func (v *azureBlobVolume) BlockTrash(loc string) error {
        // Ideally we would use If-Unmodified-Since, but that
        // particular condition seems to be ignored by Azure. Instead,
        // we get the Etag before checking Mtime, and use If-Match to
@@ -513,11 +466,11 @@ func (v *AzureBlobVolume) Trash(loc string) error {
        })
 }
 
-// Untrash a Keep block.
-// Delete the expires_at metadata attribute
-func (v *AzureBlobVolume) Untrash(loc string) error {
+// BlockUntrash deletes the expires_at metadata attribute for the
+// specified block blob.
+func (v *azureBlobVolume) BlockUntrash(hash string) error {
        // if expires_at does not exist, return NotFoundError
-       metadata, err := v.container.GetBlobMetadata(loc)
+       metadata, err := v.container.GetBlobMetadata(hash)
        if err != nil {
                return v.translateError(err)
        }
@@ -527,33 +480,19 @@ func (v *AzureBlobVolume) Untrash(loc string) error {
 
        // reset expires_at metadata attribute
        metadata["expires_at"] = ""
-       err = v.container.SetBlobMetadata(loc, metadata, nil)
+       err = v.container.SetBlobMetadata(hash, metadata, nil)
        return v.translateError(err)
 }
 
-// Status returns a VolumeStatus struct with placeholder data.
-func (v *AzureBlobVolume) Status() *VolumeStatus {
-       return &VolumeStatus{
-               DeviceNum: 1,
-               BytesFree: BlockSize * 1000,
-               BytesUsed: 1,
-       }
-}
-
-// String returns a volume label, including the container name.
-func (v *AzureBlobVolume) String() string {
-       return fmt.Sprintf("azure-storage-container:%+q", v.ContainerName)
-}
-
 // If possible, translate an Azure SDK error to a recognizable error
 // like os.ErrNotExist.
-func (v *AzureBlobVolume) translateError(err error) error {
+func (v *azureBlobVolume) translateError(err error) error {
        switch {
        case err == nil:
                return err
        case strings.Contains(err.Error(), "StatusCode=503"):
                // "storage: service returned error: StatusCode=503, ErrorCode=ServerBusy, ErrorMessage=The server is busy" (See #14804)
-               return VolumeBusyError
+               return errVolumeUnavailable
        case strings.Contains(err.Error(), "Not Found"):
                // "storage: service returned without a response body (404 Not Found)"
                return os.ErrNotExist
@@ -567,13 +506,13 @@ func (v *AzureBlobVolume) translateError(err error) error {
 
 var keepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
 
-func (v *AzureBlobVolume) isKeepBlock(s string) bool {
+func (v *azureBlobVolume) isKeepBlock(s string) bool {
        return keepBlockRegexp.MatchString(s)
 }
 
 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
-func (v *AzureBlobVolume) EmptyTrash() {
+func (v *azureBlobVolume) EmptyTrash() {
        var bytesDeleted, bytesInTrash int64
        var blocksDeleted, blocksInTrash int64
 
@@ -637,11 +576,11 @@ func (v *AzureBlobVolume) EmptyTrash() {
        close(todo)
        wg.Wait()
 
-       v.logger.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+       v.logger.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
 // InternalStats returns bucket I/O and API call counters.
-func (v *AzureBlobVolume) InternalStats() interface{} {
+func (v *azureBlobVolume) InternalStats() interface{} {
        return &v.container.stats
 }
 
@@ -708,7 +647,7 @@ func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
        b := c.ctr.GetBlobReference(bname)
        rdr, err := b.Get(nil)
        c.stats.TickErr(err)
-       return NewCountingReader(rdr, c.stats.TickInBytes), err
+       return newCountingReader(rdr, c.stats.TickInBytes), err
 }
 
 func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
@@ -723,7 +662,7 @@ func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storag
                GetBlobOptions: opts,
        })
        c.stats.TickErr(err)
-       return NewCountingReader(rdr, c.stats.TickInBytes), err
+       return newCountingReader(rdr, c.stats.TickInBytes), err
 }
 
 // If we give it an io.Reader that doesn't also have a Len() int
@@ -744,7 +683,7 @@ func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr i
        c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
        if size != 0 {
                rdr = &readerWithAzureLen{
-                       Reader: NewCountingReader(rdr, c.stats.TickOutBytes),
+                       Reader: newCountingReader(rdr, c.stats.TickOutBytes),
                        len:    size,
                }
        }
index 48d58ee9bfc454e5b2972e6d36867a578c29e6bb..b8acd980a1c6a57c8537ded3f2d4a90bb51ef331 100644 (file)
@@ -87,7 +87,7 @@ func (h *azStubHandler) TouchWithDate(container, hash string, t time.Time) {
        blob.Mtime = t
 }
 
-func (h *azStubHandler) PutRaw(container, hash string, data []byte) {
+func (h *azStubHandler) BlockWriteRaw(container, hash string, data []byte) {
        h.Lock()
        defer h.Unlock()
        h.blobs[container+"|"+hash] = &azBlob{
@@ -221,7 +221,7 @@ func (h *azStubHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
                rw.WriteHeader(http.StatusCreated)
        case r.Method == "PUT" && r.Form.Get("comp") == "metadata":
                // "Set Metadata Headers" API. We don't bother
-               // stubbing "Get Metadata Headers": AzureBlobVolume
+               // stubbing "Get Metadata Headers": azureBlobVolume
                // sets metadata headers only as a way to bump Etag
                // and Last-Modified.
                if !blobExists {
@@ -365,14 +365,14 @@ func (d *azStubDialer) Dial(network, address string) (net.Conn, error) {
        return d.Dialer.Dial(network, address)
 }
 
-type TestableAzureBlobVolume struct {
-       *AzureBlobVolume
+type testableAzureBlobVolume struct {
+       *azureBlobVolume
        azHandler *azStubHandler
        azStub    *httptest.Server
        t         TB
 }
 
-func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs) *TestableAzureBlobVolume {
+func (s *stubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, params newVolumeParams) *testableAzureBlobVolume {
        azHandler := newAzStubHandler(t.(*check.C))
        azStub := httptest.NewServer(azHandler)
 
@@ -396,7 +396,7 @@ func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvado
        azClient.Sender = &singleSender{}
 
        bs := azClient.GetBlobService()
-       v := &AzureBlobVolume{
+       v := &azureBlobVolume{
                ContainerName:        container,
                WriteRaceInterval:    arvados.Duration(time.Millisecond),
                WriteRacePollTime:    arvados.Duration(time.Nanosecond),
@@ -404,65 +404,72 @@ func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvado
                ListBlobsRetryDelay:  arvados.Duration(time.Millisecond),
                azClient:             azClient,
                container:            &azureContainer{ctr: bs.GetContainerReference(container)},
-               cluster:              cluster,
-               volume:               volume,
+               cluster:              params.Cluster,
+               volume:               params.ConfigVolume,
                logger:               ctxlog.TestLogger(t),
-               metrics:              metrics,
+               metrics:              params.MetricsVecs,
+               bufferPool:           params.BufferPool,
        }
        if err = v.check(); err != nil {
                t.Fatal(err)
        }
 
-       return &TestableAzureBlobVolume{
-               AzureBlobVolume: v,
+       return &testableAzureBlobVolume{
+               azureBlobVolume: v,
                azHandler:       azHandler,
                azStub:          azStub,
                t:               t,
        }
 }
 
-var _ = check.Suite(&StubbedAzureBlobSuite{})
+var _ = check.Suite(&stubbedAzureBlobSuite{})
 
-type StubbedAzureBlobSuite struct {
+type stubbedAzureBlobSuite struct {
        origHTTPTransport http.RoundTripper
 }
 
-func (s *StubbedAzureBlobSuite) SetUpTest(c *check.C) {
+func (s *stubbedAzureBlobSuite) SetUpSuite(c *check.C) {
        s.origHTTPTransport = http.DefaultTransport
        http.DefaultTransport = &http.Transport{
                Dial: (&azStubDialer{logger: ctxlog.TestLogger(c)}).Dial,
        }
 }
 
-func (s *StubbedAzureBlobSuite) TearDownTest(c *check.C) {
+func (s *stubbedAzureBlobSuite) TearDownSuite(c *check.C) {
        http.DefaultTransport = s.origHTTPTransport
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeWithGeneric(c *check.C) {
-       DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               return s.newTestableAzureBlobVolume(t, cluster, volume, metrics)
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeWithGeneric(c *check.C) {
+       DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+               return s.newTestableAzureBlobVolume(t, params)
        })
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C) {
        // Test (BlockSize mod azureMaxGetBytes)==0 and !=0 cases
-       for _, b := range []int{2 << 22, 2<<22 - 1} {
-               DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-                       v := s.newTestableAzureBlobVolume(t, cluster, volume, metrics)
+       for _, b := range []int{2<<22 - 1, 2<<22 - 1} {
+               c.Logf("=== MaxGetBytes=%d", b)
+               DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+                       v := s.newTestableAzureBlobVolume(t, params)
                        v.MaxGetBytes = b
                        return v
                })
        }
 }
 
-func (s *StubbedAzureBlobSuite) TestReadonlyAzureBlobVolumeWithGeneric(c *check.C) {
-       DoGenericVolumeTests(c, false, func(c TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               return s.newTestableAzureBlobVolume(c, cluster, volume, metrics)
+func (s *stubbedAzureBlobSuite) TestReadonlyAzureBlobVolumeWithGeneric(c *check.C) {
+       DoGenericVolumeTests(c, false, func(c TB, params newVolumeParams) TestableVolume {
+               return s.newTestableAzureBlobVolume(c, params)
        })
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
-       v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
+       v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+               Cluster:      testCluster(c),
+               ConfigVolume: arvados.Volume{Replication: 3},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       })
        defer v.Teardown()
 
        for _, size := range []int{
@@ -478,27 +485,30 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
                        data[i] = byte((i + 7) & 0xff)
                }
                hash := fmt.Sprintf("%x", md5.Sum(data))
-               err := v.Put(context.Background(), hash, data)
+               err := v.BlockWrite(context.Background(), hash, data)
                if err != nil {
                        c.Error(err)
                }
-               gotData := make([]byte, len(data))
-               gotLen, err := v.Get(context.Background(), hash, gotData)
+               gotData := &brbuffer{}
+               err = v.BlockRead(context.Background(), hash, gotData)
                if err != nil {
                        c.Error(err)
                }
-               gotHash := fmt.Sprintf("%x", md5.Sum(gotData))
-               if gotLen != size {
-                       c.Errorf("length mismatch: got %d != %d", gotLen, size)
-               }
+               gotHash := fmt.Sprintf("%x", md5.Sum(gotData.Bytes()))
+               c.Check(gotData.Len(), check.Equals, size)
                if gotHash != hash {
                        c.Errorf("hash mismatch: got %s != %s", gotHash, hash)
                }
        }
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
-       v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
+       v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+               Cluster:      testCluster(c),
+               ConfigVolume: arvados.Volume{Replication: 3},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       })
        defer v.Teardown()
 
        var wg sync.WaitGroup
@@ -508,42 +518,46 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
        wg.Add(1)
        go func() {
                defer wg.Done()
-               err := v.Put(context.Background(), TestHash, TestBlock)
+               err := v.BlockWrite(context.Background(), TestHash, TestBlock)
                if err != nil {
                        c.Error(err)
                }
        }()
-       continuePut := make(chan struct{})
-       // Wait for the stub's Put to create the empty blob
-       v.azHandler.race <- continuePut
+       continueBlockWrite := make(chan struct{})
+       // Wait for the stub's BlockWrite to create the empty blob
+       v.azHandler.race <- continueBlockWrite
        wg.Add(1)
        go func() {
                defer wg.Done()
-               buf := make([]byte, len(TestBlock))
-               _, err := v.Get(context.Background(), TestHash, buf)
+               err := v.BlockRead(context.Background(), TestHash, brdiscard)
                if err != nil {
                        c.Error(err)
                }
        }()
-       // Wait for the stub's Get to get the empty blob
+       // Wait for the stub's BlockRead to get the empty blob
        close(v.azHandler.race)
-       // Allow stub's Put to continue, so the real data is ready
-       // when the volume's Get retries
-       <-continuePut
-       // Wait for Get() and Put() to finish
+       // Allow stub's BlockWrite to continue, so the real data is ready
+       // when the volume's BlockRead retries
+       <-continueBlockWrite
+       // Wait for BlockRead() and BlockWrite() to finish
        wg.Wait()
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *check.C) {
-       v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
-       v.AzureBlobVolume.WriteRaceInterval.Set("2s")
-       v.AzureBlobVolume.WriteRacePollTime.Set("5ms")
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *check.C) {
+       v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+               Cluster:      testCluster(c),
+               ConfigVolume: arvados.Volume{Replication: 3},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       })
+       v.azureBlobVolume.WriteRaceInterval.Set("2s")
+       v.azureBlobVolume.WriteRacePollTime.Set("5ms")
        defer v.Teardown()
 
-       v.PutRaw(TestHash, nil)
+       v.BlockWriteRaw(TestHash, nil)
 
        buf := new(bytes.Buffer)
-       v.IndexTo("", buf)
+       v.Index(context.Background(), "", buf)
        if buf.Len() != 0 {
                c.Errorf("Index %+q should be empty", buf.Bytes())
        }
@@ -553,52 +567,47 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *che
        allDone := make(chan struct{})
        go func() {
                defer close(allDone)
-               buf := make([]byte, BlockSize)
-               n, err := v.Get(context.Background(), TestHash, buf)
+               buf := &brbuffer{}
+               err := v.BlockRead(context.Background(), TestHash, buf)
                if err != nil {
                        c.Error(err)
                        return
                }
-               if n != 0 {
-                       c.Errorf("Got %+q, expected empty buf", buf[:n])
-               }
+               c.Check(buf.String(), check.Equals, "")
        }()
        select {
        case <-allDone:
        case <-time.After(time.Second):
-               c.Error("Get should have stopped waiting for race when block was 2s old")
+               c.Error("BlockRead should have stopped waiting for race when block was 2s old")
        }
 
        buf.Reset()
-       v.IndexTo("", buf)
+       v.Index(context.Background(), "", buf)
        if !bytes.HasPrefix(buf.Bytes(), []byte(TestHash+"+0")) {
                c.Errorf("Index %+q should have %+q", buf.Bytes(), TestHash+"+0")
        }
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelGet(c *check.C) {
-       s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
-               v.PutRaw(TestHash, TestBlock)
-               _, err := v.Get(ctx, TestHash, make([]byte, BlockSize))
-               return err
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockRead(c *check.C) {
+       s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *testableAzureBlobVolume) error {
+               v.BlockWriteRaw(TestHash, TestBlock)
+               return v.BlockRead(ctx, TestHash, brdiscard)
        })
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelPut(c *check.C) {
-       s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
-               return v.Put(ctx, TestHash, make([]byte, BlockSize))
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockWrite(c *check.C) {
+       s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *testableAzureBlobVolume) error {
+               return v.BlockWrite(ctx, TestHash, make([]byte, BlockSize))
        })
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelCompare(c *check.C) {
-       s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
-               v.PutRaw(TestHash, TestBlock)
-               return v.Compare(ctx, TestHash, TestBlock2)
+func (s *stubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, testFunc func(context.Context, *testableAzureBlobVolume) error) {
+       v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+               Cluster:      testCluster(c),
+               ConfigVolume: arvados.Volume{Replication: 3},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
        })
-}
-
-func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, testFunc func(context.Context, *TestableAzureBlobVolume) error) {
-       v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
        defer v.Teardown()
        v.azHandler.race = make(chan chan struct{})
 
@@ -633,8 +642,13 @@ func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, tes
        }()
 }
 
-func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
-       volume := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+func (s *stubbedAzureBlobSuite) TestStats(c *check.C) {
+       volume := s.newTestableAzureBlobVolume(c, newVolumeParams{
+               Cluster:      testCluster(c),
+               ConfigVolume: arvados.Volume{Replication: 3},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       })
        defer volume.Teardown()
 
        stats := func() string {
@@ -647,38 +661,38 @@ func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
        c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
 
        loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-       _, err := volume.Get(context.Background(), loc, make([]byte, 3))
+       err := volume.BlockRead(context.Background(), loc, brdiscard)
        c.Check(err, check.NotNil)
        c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
        c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
        c.Check(stats(), check.Matches, `.*"storage\.AzureStorageServiceError 404 \(404 Not Found\)":[^0].*`)
        c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
 
-       err = volume.Put(context.Background(), loc, []byte("foo"))
+       err = volume.BlockWrite(context.Background(), loc, []byte("foo"))
        c.Check(err, check.IsNil)
        c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
        c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
 
-       _, err = volume.Get(context.Background(), loc, make([]byte, 3))
+       err = volume.BlockRead(context.Background(), loc, brdiscard)
        c.Check(err, check.IsNil)
-       _, err = volume.Get(context.Background(), loc, make([]byte, 3))
+       err = volume.BlockRead(context.Background(), loc, brdiscard)
        c.Check(err, check.IsNil)
        c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
 }
 
-func (v *TestableAzureBlobVolume) PutRaw(locator string, data []byte) {
-       v.azHandler.PutRaw(v.ContainerName, locator, data)
+func (v *testableAzureBlobVolume) BlockWriteRaw(locator string, data []byte) {
+       v.azHandler.BlockWriteRaw(v.ContainerName, locator, data)
 }
 
-func (v *TestableAzureBlobVolume) TouchWithDate(locator string, lastPut time.Time) {
-       v.azHandler.TouchWithDate(v.ContainerName, locator, lastPut)
+func (v *testableAzureBlobVolume) TouchWithDate(locator string, lastBlockWrite time.Time) {
+       v.azHandler.TouchWithDate(v.ContainerName, locator, lastBlockWrite)
 }
 
-func (v *TestableAzureBlobVolume) Teardown() {
+func (v *testableAzureBlobVolume) Teardown() {
        v.azStub.Close()
 }
 
-func (v *TestableAzureBlobVolume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableAzureBlobVolume) ReadWriteOperationLabelValues() (r, w string) {
        return "get", "create"
 }
 
index b4cc5d38e1670034212816bd96b95cdc838a2cfb..811715b191c7384cfe904744221cbffcd3ac1b46 100644 (file)
@@ -5,13 +5,17 @@
 package keepstore
 
 import (
+       "context"
        "sync"
        "sync/atomic"
        "time"
 
+       "github.com/prometheus/client_golang/prometheus"
        "github.com/sirupsen/logrus"
 )
 
+var bufferPoolBlockSize = BlockSize // modified by tests
+
 type bufferPool struct {
        log logrus.FieldLogger
        // limiter has a "true" placeholder for each in-use buffer.
@@ -22,17 +26,67 @@ type bufferPool struct {
        sync.Pool
 }
 
-func newBufferPool(log logrus.FieldLogger, count int, bufSize int) *bufferPool {
+func newBufferPool(log logrus.FieldLogger, count int, reg *prometheus.Registry) *bufferPool {
        p := bufferPool{log: log}
        p.Pool.New = func() interface{} {
-               atomic.AddUint64(&p.allocated, uint64(bufSize))
-               return make([]byte, bufSize)
+               atomic.AddUint64(&p.allocated, uint64(bufferPoolBlockSize))
+               return make([]byte, bufferPoolBlockSize)
        }
        p.limiter = make(chan bool, count)
+       if reg != nil {
+               reg.MustRegister(prometheus.NewGaugeFunc(
+                       prometheus.GaugeOpts{
+                               Namespace: "arvados",
+                               Subsystem: "keepstore",
+                               Name:      "bufferpool_allocated_bytes",
+                               Help:      "Number of bytes allocated to buffers",
+                       },
+                       func() float64 { return float64(p.Alloc()) },
+               ))
+               reg.MustRegister(prometheus.NewGaugeFunc(
+                       prometheus.GaugeOpts{
+                               Namespace: "arvados",
+                               Subsystem: "keepstore",
+                               Name:      "bufferpool_max_buffers",
+                               Help:      "Maximum number of buffers allowed",
+                       },
+                       func() float64 { return float64(p.Cap()) },
+               ))
+               reg.MustRegister(prometheus.NewGaugeFunc(
+                       prometheus.GaugeOpts{
+                               Namespace: "arvados",
+                               Subsystem: "keepstore",
+                               Name:      "bufferpool_inuse_buffers",
+                               Help:      "Number of buffers in use",
+                       },
+                       func() float64 { return float64(p.Len()) },
+               ))
+       }
        return &p
 }
 
-func (p *bufferPool) Get(size int) []byte {
+// GetContext gets a buffer from the pool -- but gives up and returns
+// ctx.Err() if ctx ends before a buffer is available.
+func (p *bufferPool) GetContext(ctx context.Context) ([]byte, error) {
+       bufReady := make(chan []byte)
+       go func() {
+               bufReady <- p.Get()
+       }()
+       select {
+       case buf := <-bufReady:
+               return buf, nil
+       case <-ctx.Done():
+               go func() {
+                       // Even if closeNotifier happened first, we
+                       // need to keep waiting for our buf so we can
+                       // return it to the pool.
+                       p.Put(<-bufReady)
+               }()
+               return nil, ctx.Err()
+       }
+}
+
+func (p *bufferPool) Get() []byte {
        select {
        case p.limiter <- true:
        default:
@@ -42,14 +96,14 @@ func (p *bufferPool) Get(size int) []byte {
                p.log.Printf("waited %v for a buffer", time.Since(t0))
        }
        buf := p.Pool.Get().([]byte)
-       if cap(buf) < size {
-               p.log.Fatalf("bufferPool Get(size=%d) but max=%d", size, cap(buf))
+       if len(buf) < bufferPoolBlockSize {
+               p.log.Fatalf("bufferPoolBlockSize=%d but cap(buf)=%d", bufferPoolBlockSize, len(buf))
        }
-       return buf[:size]
+       return buf
 }
 
 func (p *bufferPool) Put(buf []byte) {
-       p.Pool.Put(buf)
+       p.Pool.Put(buf[:cap(buf)])
        <-p.limiter
 }
 
index 13e1cb4f332ba180857aef747b3086e9251466ee..8ecc833228f5b07218a6b0ea8ba58f2f44616c7e 100644 (file)
@@ -5,55 +5,54 @@
 package keepstore
 
 import (
-       "context"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/prometheus/client_golang/prometheus"
        . "gopkg.in/check.v1"
 )
 
 var _ = Suite(&BufferPoolSuite{})
 
+var bufferPoolTestSize = 10
+
 type BufferPoolSuite struct{}
 
-// Initialize a default-sized buffer pool for the benefit of test
-// suites that don't run main().
-func init() {
-       bufs = newBufferPool(ctxlog.FromContext(context.Background()), 12, BlockSize)
+func (s *BufferPoolSuite) SetUpTest(c *C) {
+       bufferPoolBlockSize = bufferPoolTestSize
 }
 
-// Restore sane default after bufferpool's own tests
 func (s *BufferPoolSuite) TearDownTest(c *C) {
-       bufs = newBufferPool(ctxlog.FromContext(context.Background()), 12, BlockSize)
+       bufferPoolBlockSize = BlockSize
 }
 
 func (s *BufferPoolSuite) TestBufferPoolBufSize(c *C) {
-       bufs := newBufferPool(ctxlog.TestLogger(c), 2, 10)
-       b1 := bufs.Get(1)
-       bufs.Get(2)
+       bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
+       b1 := bufs.Get()
+       bufs.Get()
        bufs.Put(b1)
-       b3 := bufs.Get(3)
-       c.Check(len(b3), Equals, 3)
+       b3 := bufs.Get()
+       c.Check(len(b3), Equals, bufferPoolTestSize)
 }
 
 func (s *BufferPoolSuite) TestBufferPoolUnderLimit(c *C) {
-       bufs := newBufferPool(ctxlog.TestLogger(c), 3, 10)
-       b1 := bufs.Get(10)
-       bufs.Get(10)
+       bufs := newBufferPool(ctxlog.TestLogger(c), 3, prometheus.NewRegistry())
+       b1 := bufs.Get()
+       bufs.Get()
        testBufferPoolRace(c, bufs, b1, "Get")
 }
 
 func (s *BufferPoolSuite) TestBufferPoolAtLimit(c *C) {
-       bufs := newBufferPool(ctxlog.TestLogger(c), 2, 10)
-       b1 := bufs.Get(10)
-       bufs.Get(10)
+       bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
+       b1 := bufs.Get()
+       bufs.Get()
        testBufferPoolRace(c, bufs, b1, "Put")
 }
 
 func testBufferPoolRace(c *C, bufs *bufferPool, unused []byte, expectWin string) {
        race := make(chan string)
        go func() {
-               bufs.Get(10)
+               bufs.Get()
                time.Sleep(time.Millisecond)
                race <- "Get"
        }()
@@ -68,9 +67,9 @@ func testBufferPoolRace(c *C, bufs *bufferPool, unused []byte, expectWin string)
 }
 
 func (s *BufferPoolSuite) TestBufferPoolReuse(c *C) {
-       bufs := newBufferPool(ctxlog.TestLogger(c), 2, 10)
-       bufs.Get(10)
-       last := bufs.Get(10)
+       bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
+       bufs.Get()
+       last := bufs.Get()
        // The buffer pool is allowed to throw away unused buffers
        // (e.g., during sync.Pool's garbage collection hook, in the
        // the current implementation). However, if unused buffers are
@@ -81,7 +80,7 @@ func (s *BufferPoolSuite) TestBufferPoolReuse(c *C) {
        reuses := 0
        for i := 0; i < allocs; i++ {
                bufs.Put(last)
-               next := bufs.Get(10)
+               next := bufs.Get()
                copy(last, []byte("last"))
                copy(next, []byte("next"))
                if last[0] == 'n' {
diff --git a/services/keepstore/collision.go b/services/keepstore/collision.go
deleted file mode 100644 (file)
index 16f2d09..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "bytes"
-       "context"
-       "crypto/md5"
-       "fmt"
-       "io"
-)
-
-// Compute the MD5 digest of a data block (consisting of buf1 + buf2 +
-// all bytes readable from rdr). If all data is read successfully,
-// return DiskHashError or CollisionError depending on whether it
-// matches expectMD5. If an error occurs while reading, return that
-// error.
-//
-// "content has expected MD5" is called a collision because this
-// function is used in cases where we have another block in hand with
-// the given MD5 but different content.
-func collisionOrCorrupt(expectMD5 string, buf1, buf2 []byte, rdr io.Reader) error {
-       outcome := make(chan error)
-       data := make(chan []byte, 1)
-       go func() {
-               h := md5.New()
-               for b := range data {
-                       h.Write(b)
-               }
-               if fmt.Sprintf("%x", h.Sum(nil)) == expectMD5 {
-                       outcome <- CollisionError
-               } else {
-                       outcome <- DiskHashError
-               }
-       }()
-       data <- buf1
-       if buf2 != nil {
-               data <- buf2
-       }
-       var err error
-       for rdr != nil && err == nil {
-               buf := make([]byte, 1<<18)
-               var n int
-               n, err = rdr.Read(buf)
-               data <- buf[:n]
-       }
-       close(data)
-       if rdr != nil && err != io.EOF {
-               <-outcome
-               return err
-       }
-       return <-outcome
-}
-
-func compareReaderWithBuf(ctx context.Context, rdr io.Reader, expect []byte, hash string) error {
-       bufLen := 1 << 20
-       if bufLen > len(expect) && len(expect) > 0 {
-               // No need for bufLen to be longer than
-               // expect, except that len(buf)==0 would
-               // prevent us from handling empty readers the
-               // same way as non-empty readers: reading 0
-               // bytes at a time never reaches EOF.
-               bufLen = len(expect)
-       }
-       buf := make([]byte, bufLen)
-       cmp := expect
-
-       // Loop invariants: all data read so far matched what
-       // we expected, and the first N bytes of cmp are
-       // expected to equal the next N bytes read from
-       // rdr.
-       for {
-               ready := make(chan bool)
-               var n int
-               var err error
-               go func() {
-                       n, err = rdr.Read(buf)
-                       close(ready)
-               }()
-               select {
-               case <-ready:
-               case <-ctx.Done():
-                       return ctx.Err()
-               }
-               if n > len(cmp) || bytes.Compare(cmp[:n], buf[:n]) != 0 {
-                       return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], buf[:n], rdr)
-               }
-               cmp = cmp[n:]
-               if err == io.EOF {
-                       if len(cmp) != 0 {
-                               return collisionOrCorrupt(hash, expect[:len(expect)-len(cmp)], nil, nil)
-                       }
-                       return nil
-               } else if err != nil {
-                       return err
-               }
-       }
-}
diff --git a/services/keepstore/collision_test.go b/services/keepstore/collision_test.go
deleted file mode 100644 (file)
index aa8f0cb..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "bytes"
-       "testing/iotest"
-
-       check "gopkg.in/check.v1"
-)
-
-var _ = check.Suite(&CollisionSuite{})
-
-type CollisionSuite struct{}
-
-func (s *CollisionSuite) TestCollisionOrCorrupt(c *check.C) {
-       fooMD5 := "acbd18db4cc2f85cedef654fccc4a4d8"
-
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, []byte{'o'}, bytes.NewBufferString("o")),
-               check.Equals, CollisionError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, nil, bytes.NewBufferString("oo")),
-               check.Equals, CollisionError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f'}, []byte{'o', 'o'}, nil),
-               check.Equals, CollisionError)
-       c.Check(collisionOrCorrupt(fooMD5, nil, []byte{}, bytes.NewBufferString("foo")),
-               check.Equals, CollisionError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o', 'o'}, nil, bytes.NewBufferString("")),
-               check.Equals, CollisionError)
-       c.Check(collisionOrCorrupt(fooMD5, nil, nil, iotest.NewReadLogger("foo: ", iotest.DataErrReader(iotest.OneByteReader(bytes.NewBufferString("foo"))))),
-               check.Equals, CollisionError)
-
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o', 'o'}, nil, bytes.NewBufferString("bar")),
-               check.Equals, DiskHashError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, nil, nil),
-               check.Equals, DiskHashError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{}, nil, bytes.NewBufferString("")),
-               check.Equals, DiskHashError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'O'}, nil, bytes.NewBufferString("o")),
-               check.Equals, DiskHashError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'O', 'o'}, nil, nil),
-               check.Equals, DiskHashError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, []byte{'O'}, nil),
-               check.Equals, DiskHashError)
-       c.Check(collisionOrCorrupt(fooMD5, []byte{'f', 'o'}, nil, bytes.NewBufferString("O")),
-               check.Equals, DiskHashError)
-
-       c.Check(collisionOrCorrupt(fooMD5, []byte{}, nil, iotest.TimeoutReader(iotest.OneByteReader(bytes.NewBufferString("foo")))),
-               check.Equals, iotest.ErrTimeout)
-}
index 48c8256a3ca1e22e524b60fe89ebc7de26be54e7..d01b30c907fcee9da5215bc442e0d8462954ef3e 100644 (file)
@@ -8,20 +8,13 @@ import (
        "context"
        "errors"
        "flag"
-       "fmt"
        "io"
-       "math/rand"
-       "net/http"
-       "os"
-       "sync"
 
        "git.arvados.org/arvados.git/lib/cmd"
        "git.arvados.org/arvados.git/lib/config"
        "git.arvados.org/arvados.git/lib/service"
        "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "git.arvados.org/arvados.git/sdk/go/keepclient"
        "github.com/prometheus/client_golang/prometheus"
        "github.com/sirupsen/logrus"
 )
@@ -108,112 +101,17 @@ func convertKeepstoreFlagsToServiceFlags(prog string, args []string, lgr logrus.
        return loader.MungeLegacyConfigArgs(lgr, args, "-legacy-keepstore-config"), true, 0
 }
 
-type handler struct {
-       http.Handler
-       Cluster *arvados.Cluster
-       Logger  logrus.FieldLogger
-
-       pullq      *WorkQueue
-       trashq     *WorkQueue
-       volmgr     *RRVolumeManager
-       keepClient *keepclient.KeepClient
-
-       err       error
-       setupOnce sync.Once
-}
-
-func (h *handler) CheckHealth() error {
-       return h.err
-}
-
-func (h *handler) Done() <-chan struct{} {
-       return nil
-}
-
 func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {
-       var h handler
        serviceURL, ok := service.URLFromContext(ctx)
        if !ok {
                return service.ErrorHandler(ctx, cluster, errors.New("BUG: no URL from service.URLFromContext"))
        }
-       err := h.setup(ctx, cluster, token, reg, serviceURL)
+       ks, err := newKeepstore(ctx, cluster, token, reg, serviceURL)
        if err != nil {
                return service.ErrorHandler(ctx, cluster, err)
        }
-       return &h
-}
-
-func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry, serviceURL arvados.URL) error {
-       h.Cluster = cluster
-       h.Logger = ctxlog.FromContext(ctx)
-       if h.Cluster.API.MaxKeepBlobBuffers <= 0 {
-               return fmt.Errorf("API.MaxKeepBlobBuffers must be greater than zero")
-       }
-       bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
-
-       if h.Cluster.API.MaxConcurrentRequests > 0 && h.Cluster.API.MaxConcurrentRequests < h.Cluster.API.MaxKeepBlobBuffers {
-               h.Logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", h.Cluster.API.MaxKeepBlobBuffers, h.Cluster.API.MaxConcurrentRequests)
-       }
-
-       if h.Cluster.Collections.BlobSigningKey != "" {
-       } else if h.Cluster.Collections.BlobSigning {
-               return errors.New("cannot enable Collections.BlobSigning with no Collections.BlobSigningKey")
-       } else {
-               h.Logger.Warn("Running without a blob signing key. Block locators returned by this server will not be signed, and will be rejected by a server that enforces permissions. To fix this, configure Collections.BlobSigning and Collections.BlobSigningKey.")
-       }
-
-       if len(h.Cluster.Volumes) == 0 {
-               return errors.New("no volumes configured")
-       }
-
-       h.Logger.Printf("keepstore %s starting, pid %d", cmd.Version.String(), os.Getpid())
-
-       // Start a round-robin VolumeManager with the configured volumes.
-       vm, err := makeRRVolumeManager(h.Logger, h.Cluster, serviceURL, newVolumeMetricsVecs(reg))
-       if err != nil {
-               return err
-       }
-       if len(vm.readables) == 0 {
-               return fmt.Errorf("no volumes configured for %s", serviceURL)
-       }
-       h.volmgr = vm
-
-       // Initialize the pullq and workers
-       h.pullq = NewWorkQueue()
-       for i := 0; i < 1 || i < h.Cluster.Collections.BlobReplicateConcurrency; i++ {
-               go h.runPullWorker(h.pullq)
-       }
-
-       // Initialize the trashq and workers
-       h.trashq = NewWorkQueue()
-       for i := 0; i < h.Cluster.Collections.BlobTrashConcurrency; i++ {
-               go RunTrashWorker(h.volmgr, h.Logger, h.Cluster, h.trashq)
-       }
-
-       // Set up routes and metrics
-       h.Handler = MakeRESTRouter(ctx, cluster, reg, vm, h.pullq, h.trashq)
-
-       // Initialize keepclient for pull workers
-       c, err := arvados.NewClientFromConfig(cluster)
-       if err != nil {
-               return err
-       }
-       ac, err := arvadosclient.New(c)
-       if err != nil {
-               return err
-       }
-       h.keepClient = &keepclient.KeepClient{
-               Arvados:       ac,
-               Want_replicas: 1,
-               DiskCacheSize: keepclient.DiskCacheDisabled,
-       }
-       h.keepClient.Arvados.ApiToken = fmt.Sprintf("%x", rand.Int63())
-
-       if d := h.Cluster.Collections.BlobTrashCheckInterval.Duration(); d > 0 &&
-               h.Cluster.Collections.BlobTrash &&
-               h.Cluster.Collections.BlobDeleteConcurrency > 0 {
-               go emptyTrash(h.volmgr.mounts, d)
-       }
-
-       return nil
+       puller := newPuller(ctx, ks, reg)
+       trasher := newTrasher(ctx, ks, reg)
+       _ = newTrashEmptier(ctx, ks, reg)
+       return newRouter(ks, puller, trasher)
 }
index 700ca19dec958cbf978bc875e2b1f8e71f24bf49..51434a803e681d1fc2b5d9276dcb9ddbcddd6cca 100644 (file)
@@ -8,21 +8,21 @@ import (
        "io"
 )
 
-func NewCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
+func newCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
        return &countingReadWriter{
                writer:  w,
                counter: f,
        }
 }
 
-func NewCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
+func newCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
        return &countingReadWriter{
                reader:  r,
                counter: f,
        }
 }
 
-func NewCountingReaderAtSeeker(r readerAtSeeker, f func(uint64)) *countingReaderAtSeeker {
+func newCountingReaderAtSeeker(r readerAtSeeker, f func(uint64)) *countingReaderAtSeeker {
        return &countingReaderAtSeeker{readerAtSeeker: r, counter: f}
 }
 
diff --git a/services/keepstore/gocheck_test.go b/services/keepstore/gocheck_test.go
deleted file mode 100644 (file)
index 90076db..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "gopkg.in/check.v1"
-       "testing"
-)
-
-func TestGocheck(t *testing.T) {
-       check.TestingT(t)
-}
diff --git a/services/keepstore/handler_test.go b/services/keepstore/handler_test.go
deleted file mode 100644 (file)
index 5bdafb7..0000000
+++ /dev/null
@@ -1,1405 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-// Tests for Keep HTTP handlers:
-//
-// - GetBlockHandler
-// - PutBlockHandler
-// - IndexHandler
-//
-// The HTTP handlers are responsible for enforcing permission policy,
-// so these tests must exercise all possible permission permutations.
-
-package keepstore
-
-import (
-       "bytes"
-       "context"
-       "encoding/json"
-       "fmt"
-       "net/http"
-       "net/http/httptest"
-       "os"
-       "sort"
-       "strings"
-       "sync/atomic"
-       "time"
-
-       "git.arvados.org/arvados.git/lib/config"
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/arvadostest"
-       "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/prometheus/client_golang/prometheus"
-       check "gopkg.in/check.v1"
-)
-
-var testServiceURL = func() arvados.URL {
-       return arvados.URL{Host: "localhost:12345", Scheme: "http"}
-}()
-
-func testCluster(t TB) *arvados.Cluster {
-       cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
-       if err != nil {
-               t.Fatal(err)
-       }
-       cluster, err := cfg.GetCluster("")
-       if err != nil {
-               t.Fatal(err)
-       }
-       cluster.SystemRootToken = arvadostest.SystemRootToken
-       cluster.ManagementToken = arvadostest.ManagementToken
-       cluster.Collections.BlobSigning = false
-       return cluster
-}
-
-var _ = check.Suite(&HandlerSuite{})
-
-type HandlerSuite struct {
-       cluster *arvados.Cluster
-       handler *handler
-}
-
-func (s *HandlerSuite) SetUpTest(c *check.C) {
-       s.cluster = testCluster(c)
-       s.cluster.Volumes = map[string]arvados.Volume{
-               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
-               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
-       }
-       s.handler = &handler{}
-}
-
-// A RequestTester represents the parameters for an HTTP request to
-// be issued on behalf of a unit test.
-type RequestTester struct {
-       uri            string
-       apiToken       string
-       method         string
-       requestBody    []byte
-       storageClasses string
-}
-
-// Test GetBlockHandler on the following situations:
-//   - permissions off, unauthenticated request, unsigned locator
-//   - permissions on, authenticated request, signed locator
-//   - permissions on, authenticated request, unsigned locator
-//   - permissions on, unauthenticated request, signed locator
-//   - permissions on, authenticated request, expired locator
-//   - permissions on, authenticated request, signed locator, transient error from backend
-func (s *HandlerSuite) TestGetHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       vols := s.handler.volmgr.AllWritable()
-       err := vols[0].Put(context.Background(), TestHash, TestBlock)
-       c.Check(err, check.IsNil)
-
-       // Create locators for testing.
-       // Turn on permission settings so we can generate signed locators.
-       s.cluster.Collections.BlobSigning = true
-       s.cluster.Collections.BlobSigningKey = knownKey
-       s.cluster.Collections.BlobSigningTTL.Set("5m")
-
-       var (
-               unsignedLocator  = "/" + TestHash
-               validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
-               expiredTimestamp = time.Now().Add(-time.Hour)
-               signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
-               expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
-       )
-
-       // -----------------
-       // Test unauthenticated request with permissions off.
-       s.cluster.Collections.BlobSigning = false
-
-       // Unauthenticated request, unsigned locator
-       // => OK
-       response := IssueRequest(s.handler,
-               &RequestTester{
-                       method: "GET",
-                       uri:    unsignedLocator,
-               })
-       ExpectStatusCode(c,
-               "Unauthenticated request, unsigned locator", http.StatusOK, response)
-       ExpectBody(c,
-               "Unauthenticated request, unsigned locator",
-               string(TestBlock),
-               response)
-
-       receivedLen := response.Header().Get("Content-Length")
-       expectedLen := fmt.Sprintf("%d", len(TestBlock))
-       if receivedLen != expectedLen {
-               c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
-       }
-
-       // ----------------
-       // Permissions: on.
-       s.cluster.Collections.BlobSigning = true
-
-       // Authenticated request, signed locator
-       // => OK
-       response = IssueRequest(s.handler, &RequestTester{
-               method:   "GET",
-               uri:      signedLocator,
-               apiToken: knownToken,
-       })
-       ExpectStatusCode(c,
-               "Authenticated request, signed locator", http.StatusOK, response)
-       ExpectBody(c,
-               "Authenticated request, signed locator", string(TestBlock), response)
-
-       receivedLen = response.Header().Get("Content-Length")
-       expectedLen = fmt.Sprintf("%d", len(TestBlock))
-       if receivedLen != expectedLen {
-               c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
-       }
-
-       // Authenticated request, unsigned locator
-       // => PermissionError
-       response = IssueRequest(s.handler, &RequestTester{
-               method:   "GET",
-               uri:      unsignedLocator,
-               apiToken: knownToken,
-       })
-       ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
-
-       // Unauthenticated request, signed locator
-       // => PermissionError
-       response = IssueRequest(s.handler, &RequestTester{
-               method: "GET",
-               uri:    signedLocator,
-       })
-       ExpectStatusCode(c,
-               "Unauthenticated request, signed locator",
-               PermissionError.HTTPCode, response)
-
-       // Authenticated request, expired locator
-       // => ExpiredError
-       response = IssueRequest(s.handler, &RequestTester{
-               method:   "GET",
-               uri:      expiredLocator,
-               apiToken: knownToken,
-       })
-       ExpectStatusCode(c,
-               "Authenticated request, expired locator",
-               ExpiredError.HTTPCode, response)
-
-       // Authenticated request, signed locator
-       // => 503 Server busy (transient error)
-
-       // Set up the block owning volume to respond with errors
-       vols[0].Volume.(*MockVolume).Bad = true
-       vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
-       response = IssueRequest(s.handler, &RequestTester{
-               method:   "GET",
-               uri:      signedLocator,
-               apiToken: knownToken,
-       })
-       // A transient error from one volume while the other doesn't find the block
-       // should make the service return a 503 so that clients can retry.
-       ExpectStatusCode(c,
-               "Volume backend busy",
-               503, response)
-}
-
-// Test PutBlockHandler on the following situations:
-//   - no server key
-//   - with server key, authenticated request, unsigned locator
-//   - with server key, unauthenticated request, unsigned locator
-func (s *HandlerSuite) TestPutHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       // --------------
-       // No server key.
-
-       s.cluster.Collections.BlobSigningKey = ""
-
-       // Unauthenticated request, no server key
-       // => OK (unsigned response)
-       unsignedLocator := "/" + TestHash
-       response := IssueRequest(s.handler,
-               &RequestTester{
-                       method:      "PUT",
-                       uri:         unsignedLocator,
-                       requestBody: TestBlock,
-               })
-
-       ExpectStatusCode(c,
-               "Unauthenticated request, no server key", http.StatusOK, response)
-       ExpectBody(c,
-               "Unauthenticated request, no server key",
-               TestHashPutResp, response)
-
-       // ------------------
-       // With a server key.
-
-       s.cluster.Collections.BlobSigningKey = knownKey
-       s.cluster.Collections.BlobSigningTTL.Set("5m")
-
-       // When a permission key is available, the locator returned
-       // from an authenticated PUT request will be signed.
-
-       // Authenticated PUT, signed locator
-       // => OK (signed response)
-       response = IssueRequest(s.handler,
-               &RequestTester{
-                       method:      "PUT",
-                       uri:         unsignedLocator,
-                       requestBody: TestBlock,
-                       apiToken:    knownToken,
-               })
-
-       ExpectStatusCode(c,
-               "Authenticated PUT, signed locator, with server key",
-               http.StatusOK, response)
-       responseLocator := strings.TrimSpace(response.Body.String())
-       if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
-               c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
-                       "response '%s' does not contain a valid signature",
-                       responseLocator)
-       }
-
-       // Unauthenticated PUT, unsigned locator
-       // => OK
-       response = IssueRequest(s.handler,
-               &RequestTester{
-                       method:      "PUT",
-                       uri:         unsignedLocator,
-                       requestBody: TestBlock,
-               })
-
-       ExpectStatusCode(c,
-               "Unauthenticated PUT, unsigned locator, with server key",
-               http.StatusOK, response)
-       ExpectBody(c,
-               "Unauthenticated PUT, unsigned locator, with server key",
-               TestHashPutResp, response)
-}
-
-func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
-       s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       s.cluster.SystemRootToken = "fake-data-manager-token"
-       IssueRequest(s.handler,
-               &RequestTester{
-                       method:      "PUT",
-                       uri:         "/" + TestHash,
-                       requestBody: TestBlock,
-               })
-
-       s.cluster.Collections.BlobTrash = true
-       IssueRequest(s.handler,
-               &RequestTester{
-                       method:      "DELETE",
-                       uri:         "/" + TestHash,
-                       requestBody: TestBlock,
-                       apiToken:    s.cluster.SystemRootToken,
-               })
-       type expect struct {
-               volid     string
-               method    string
-               callcount int
-       }
-       for _, e := range []expect{
-               {"zzzzz-nyw5e-000000000000000", "Get", 0},
-               {"zzzzz-nyw5e-000000000000000", "Compare", 0},
-               {"zzzzz-nyw5e-000000000000000", "Touch", 0},
-               {"zzzzz-nyw5e-000000000000000", "Put", 0},
-               {"zzzzz-nyw5e-000000000000000", "Delete", 0},
-               {"zzzzz-nyw5e-111111111111111", "Get", 0},
-               {"zzzzz-nyw5e-111111111111111", "Compare", 1},
-               {"zzzzz-nyw5e-111111111111111", "Touch", 1},
-               {"zzzzz-nyw5e-111111111111111", "Put", 1},
-               {"zzzzz-nyw5e-111111111111111", "Delete", 1},
-       } {
-               if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
-                       c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
-               }
-       }
-}
-
-func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
-       s.cluster.Volumes = map[string]arvados.Volume{
-               "zzzzz-nyw5e-111111111111111": {
-                       Driver:         "mock",
-                       Replication:    1,
-                       StorageClasses: map[string]bool{"class1": true}},
-               "zzzzz-nyw5e-222222222222222": {
-                       Driver:         "mock",
-                       Replication:    1,
-                       StorageClasses: map[string]bool{"class2": true, "class3": true}},
-       }
-
-       for _, trial := range []struct {
-               priority1 int // priority of class1, thus vol1
-               priority2 int // priority of class2
-               priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
-               get1      int // expected number of "get" ops on vol1
-               get2      int // expected number of "get" ops on vol2
-       }{
-               {100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
-               {100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
-               {66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
-               {66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
-       } {
-               c.Logf("%+v", trial)
-               s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
-                       "class1": {Priority: trial.priority1},
-                       "class2": {Priority: trial.priority2},
-                       "class3": {Priority: trial.priority3},
-               }
-               c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-               IssueRequest(s.handler,
-                       &RequestTester{
-                               method:         "PUT",
-                               uri:            "/" + TestHash,
-                               requestBody:    TestBlock,
-                               storageClasses: "class1",
-                       })
-               IssueRequest(s.handler,
-                       &RequestTester{
-                               method: "GET",
-                               uri:    "/" + TestHash,
-                       })
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
-       }
-}
-
-func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
-       s.cluster.Volumes = map[string]arvados.Volume{
-               "zzzzz-nyw5e-111111111111111": {
-                       Driver:         "mock",
-                       Replication:    1,
-                       ReadOnly:       true,
-                       StorageClasses: map[string]bool{"class1": true}},
-       }
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       resp := IssueRequest(s.handler,
-               &RequestTester{
-                       method:         "PUT",
-                       uri:            "/" + TestHash,
-                       requestBody:    TestBlock,
-                       storageClasses: "class1",
-               })
-       c.Check(resp.Code, check.Equals, FullError.HTTPCode)
-       c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
-}
-
-func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
-       s.cluster.Volumes = map[string]arvados.Volume{
-               "zzzzz-nyw5e-111111111111111": {
-                       Driver:         "mock",
-                       Replication:    1,
-                       StorageClasses: map[string]bool{"class1": true}},
-               "zzzzz-nyw5e-121212121212121": {
-                       Driver:         "mock",
-                       Replication:    1,
-                       StorageClasses: map[string]bool{"class1": true, "class2": true}},
-               "zzzzz-nyw5e-222222222222222": {
-                       Driver:         "mock",
-                       Replication:    1,
-                       StorageClasses: map[string]bool{"class2": true}},
-       }
-
-       for _, trial := range []struct {
-               setCounter uint32 // value to stuff vm.counter, to control offset
-               classes    string // desired classes
-               put111     int    // expected number of "put" ops on 11111... after 2x put reqs
-               put121     int    // expected number of "put" ops on 12121...
-               put222     int    // expected number of "put" ops on 22222...
-               cmp111     int    // expected number of "compare" ops on 11111... after 2x put reqs
-               cmp121     int    // expected number of "compare" ops on 12121...
-               cmp222     int    // expected number of "compare" ops on 22222...
-       }{
-               {0, "class1",
-                       1, 0, 0,
-                       2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
-               {0, "class2",
-                       0, 1, 0,
-                       0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
-               {0, "class1,class2",
-                       1, 1, 0,
-                       2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
-               {1, "class1,class2",
-                       0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
-                       2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
-               {0, "class1,class2,class404",
-                       1, 1, 0,
-                       2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
-       } {
-               c.Logf("%+v", trial)
-               s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
-                       "class1": {},
-                       "class2": {},
-                       "class3": {},
-               }
-               c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-               atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
-               for i := 0; i < 2; i++ {
-                       IssueRequest(s.handler,
-                               &RequestTester{
-                                       method:         "PUT",
-                                       uri:            "/" + TestHash,
-                                       requestBody:    TestBlock,
-                                       storageClasses: trial.classes,
-                               })
-               }
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
-               c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
-       }
-}
-
-// Test TOUCH requests.
-func (s *HandlerSuite) TestTouchHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       vols := s.handler.volmgr.AllWritable()
-       vols[0].Put(context.Background(), TestHash, TestBlock)
-       vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
-       afterPut := time.Now()
-       t, err := vols[0].Mtime(TestHash)
-       c.Assert(err, check.IsNil)
-       c.Assert(t.Before(afterPut), check.Equals, true)
-
-       ExpectStatusCode(c,
-               "touch with no credentials",
-               http.StatusUnauthorized,
-               IssueRequest(s.handler, &RequestTester{
-                       method: "TOUCH",
-                       uri:    "/" + TestHash,
-               }))
-
-       ExpectStatusCode(c,
-               "touch with non-root credentials",
-               http.StatusUnauthorized,
-               IssueRequest(s.handler, &RequestTester{
-                       method:   "TOUCH",
-                       uri:      "/" + TestHash,
-                       apiToken: arvadostest.ActiveTokenV2,
-               }))
-
-       ExpectStatusCode(c,
-               "touch non-existent block",
-               http.StatusNotFound,
-               IssueRequest(s.handler, &RequestTester{
-                       method:   "TOUCH",
-                       uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
-                       apiToken: s.cluster.SystemRootToken,
-               }))
-
-       beforeTouch := time.Now()
-       ExpectStatusCode(c,
-               "touch block",
-               http.StatusOK,
-               IssueRequest(s.handler, &RequestTester{
-                       method:   "TOUCH",
-                       uri:      "/" + TestHash,
-                       apiToken: s.cluster.SystemRootToken,
-               }))
-       t, err = vols[0].Mtime(TestHash)
-       c.Assert(err, check.IsNil)
-       c.Assert(t.After(beforeTouch), check.Equals, true)
-}
-
-// Test /index requests:
-//   - unauthenticated /index request
-//   - unauthenticated /index/prefix request
-//   - authenticated   /index request        | non-superuser
-//   - authenticated   /index/prefix request | non-superuser
-//   - authenticated   /index request        | superuser
-//   - authenticated   /index/prefix request | superuser
-//
-// The only /index requests that should succeed are those issued by the
-// superuser. They should pass regardless of the value of BlobSigning.
-func (s *HandlerSuite) TestIndexHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       // Include multiple blocks on different volumes, and
-       // some metadata files (which should be omitted from index listings)
-       vols := s.handler.volmgr.AllWritable()
-       vols[0].Put(context.Background(), TestHash, TestBlock)
-       vols[1].Put(context.Background(), TestHash2, TestBlock2)
-       vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
-       vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
-
-       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-       unauthenticatedReq := &RequestTester{
-               method: "GET",
-               uri:    "/index",
-       }
-       authenticatedReq := &RequestTester{
-               method:   "GET",
-               uri:      "/index",
-               apiToken: knownToken,
-       }
-       superuserReq := &RequestTester{
-               method:   "GET",
-               uri:      "/index",
-               apiToken: s.cluster.SystemRootToken,
-       }
-       unauthPrefixReq := &RequestTester{
-               method: "GET",
-               uri:    "/index/" + TestHash[0:3],
-       }
-       authPrefixReq := &RequestTester{
-               method:   "GET",
-               uri:      "/index/" + TestHash[0:3],
-               apiToken: knownToken,
-       }
-       superuserPrefixReq := &RequestTester{
-               method:   "GET",
-               uri:      "/index/" + TestHash[0:3],
-               apiToken: s.cluster.SystemRootToken,
-       }
-       superuserNoSuchPrefixReq := &RequestTester{
-               method:   "GET",
-               uri:      "/index/abcd",
-               apiToken: s.cluster.SystemRootToken,
-       }
-       superuserInvalidPrefixReq := &RequestTester{
-               method:   "GET",
-               uri:      "/index/xyz",
-               apiToken: s.cluster.SystemRootToken,
-       }
-
-       // -------------------------------------------------------------
-       // Only the superuser should be allowed to issue /index requests.
-
-       // ---------------------------
-       // BlobSigning enabled
-       // This setting should not affect tests passing.
-       s.cluster.Collections.BlobSigning = true
-
-       // unauthenticated /index request
-       // => UnauthorizedError
-       response := IssueRequest(s.handler, unauthenticatedReq)
-       ExpectStatusCode(c,
-               "permissions on, unauthenticated request",
-               UnauthorizedError.HTTPCode,
-               response)
-
-       // unauthenticated /index/prefix request
-       // => UnauthorizedError
-       response = IssueRequest(s.handler, unauthPrefixReq)
-       ExpectStatusCode(c,
-               "permissions on, unauthenticated /index/prefix request",
-               UnauthorizedError.HTTPCode,
-               response)
-
-       // authenticated /index request, non-superuser
-       // => UnauthorizedError
-       response = IssueRequest(s.handler, authenticatedReq)
-       ExpectStatusCode(c,
-               "permissions on, authenticated request, non-superuser",
-               UnauthorizedError.HTTPCode,
-               response)
-
-       // authenticated /index/prefix request, non-superuser
-       // => UnauthorizedError
-       response = IssueRequest(s.handler, authPrefixReq)
-       ExpectStatusCode(c,
-               "permissions on, authenticated /index/prefix request, non-superuser",
-               UnauthorizedError.HTTPCode,
-               response)
-
-       // superuser /index request
-       // => OK
-       response = IssueRequest(s.handler, superuserReq)
-       ExpectStatusCode(c,
-               "permissions on, superuser request",
-               http.StatusOK,
-               response)
-
-       // ----------------------------
-       // BlobSigning disabled
-       // Valid Request should still pass.
-       s.cluster.Collections.BlobSigning = false
-
-       // superuser /index request
-       // => OK
-       response = IssueRequest(s.handler, superuserReq)
-       ExpectStatusCode(c,
-               "permissions on, superuser request",
-               http.StatusOK,
-               response)
-
-       expected := `^` + TestHash + `\+\d+ \d+\n` +
-               TestHash2 + `\+\d+ \d+\n\n$`
-       c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
-               "permissions on, superuser request"))
-
-       // superuser /index/prefix request
-       // => OK
-       response = IssueRequest(s.handler, superuserPrefixReq)
-       ExpectStatusCode(c,
-               "permissions on, superuser request",
-               http.StatusOK,
-               response)
-
-       expected = `^` + TestHash + `\+\d+ \d+\n\n$`
-       c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
-               "permissions on, superuser /index/prefix request"))
-
-       // superuser /index/{no-such-prefix} request
-       // => OK
-       response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
-       ExpectStatusCode(c,
-               "permissions on, superuser request",
-               http.StatusOK,
-               response)
-
-       if "\n" != response.Body.String() {
-               c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
-       }
-
-       // superuser /index/{invalid-prefix} request
-       // => StatusBadRequest
-       response = IssueRequest(s.handler, superuserInvalidPrefixReq)
-       ExpectStatusCode(c,
-               "permissions on, superuser request",
-               http.StatusBadRequest,
-               response)
-}
-
-// TestDeleteHandler
-//
-// Cases tested:
-//
-//     With no token and with a non-data-manager token:
-//     * Delete existing block
-//       (test for 403 Forbidden, confirm block not deleted)
-//
-//     With data manager token:
-//
-//     * Delete existing block
-//       (test for 200 OK, response counts, confirm block deleted)
-//
-//     * Delete nonexistent block
-//       (test for 200 OK, response counts)
-//
-//     TODO(twp):
-//
-//     * Delete block on read-only and read-write volume
-//       (test for 200 OK, response with copies_deleted=1,
-//       copies_failed=1, confirm block deleted only on r/w volume)
-//
-//     * Delete block on read-only volume only
-//       (test for 200 OK, response with copies_deleted=0, copies_failed=1,
-//       confirm block not deleted)
-func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       vols := s.handler.volmgr.AllWritable()
-       vols[0].Put(context.Background(), TestHash, TestBlock)
-
-       // Explicitly set the BlobSigningTTL to 0 for these
-       // tests, to ensure the MockVolume deletes the blocks
-       // even though they have just been created.
-       s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
-
-       var userToken = "NOT DATA MANAGER TOKEN"
-       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-       s.cluster.Collections.BlobTrash = true
-
-       unauthReq := &RequestTester{
-               method: "DELETE",
-               uri:    "/" + TestHash,
-       }
-
-       userReq := &RequestTester{
-               method:   "DELETE",
-               uri:      "/" + TestHash,
-               apiToken: userToken,
-       }
-
-       superuserExistingBlockReq := &RequestTester{
-               method:   "DELETE",
-               uri:      "/" + TestHash,
-               apiToken: s.cluster.SystemRootToken,
-       }
-
-       superuserNonexistentBlockReq := &RequestTester{
-               method:   "DELETE",
-               uri:      "/" + TestHash2,
-               apiToken: s.cluster.SystemRootToken,
-       }
-
-       // Unauthenticated request returns PermissionError.
-       var response *httptest.ResponseRecorder
-       response = IssueRequest(s.handler, unauthReq)
-       ExpectStatusCode(c,
-               "unauthenticated request",
-               PermissionError.HTTPCode,
-               response)
-
-       // Authenticated non-admin request returns PermissionError.
-       response = IssueRequest(s.handler, userReq)
-       ExpectStatusCode(c,
-               "authenticated non-admin request",
-               PermissionError.HTTPCode,
-               response)
-
-       // Authenticated admin request for nonexistent block.
-       type deletecounter struct {
-               Deleted int `json:"copies_deleted"`
-               Failed  int `json:"copies_failed"`
-       }
-       var responseDc, expectedDc deletecounter
-
-       response = IssueRequest(s.handler, superuserNonexistentBlockReq)
-       ExpectStatusCode(c,
-               "data manager request, nonexistent block",
-               http.StatusNotFound,
-               response)
-
-       // Authenticated admin request for existing block while BlobTrash is false.
-       s.cluster.Collections.BlobTrash = false
-       response = IssueRequest(s.handler, superuserExistingBlockReq)
-       ExpectStatusCode(c,
-               "authenticated request, existing block, method disabled",
-               MethodDisabledError.HTTPCode,
-               response)
-       s.cluster.Collections.BlobTrash = true
-
-       // Authenticated admin request for existing block.
-       response = IssueRequest(s.handler, superuserExistingBlockReq)
-       ExpectStatusCode(c,
-               "data manager request, existing block",
-               http.StatusOK,
-               response)
-       // Expect response {"copies_deleted":1,"copies_failed":0}
-       expectedDc = deletecounter{1, 0}
-       json.NewDecoder(response.Body).Decode(&responseDc)
-       if responseDc != expectedDc {
-               c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
-                       expectedDc, responseDc)
-       }
-       // Confirm the block has been deleted
-       buf := make([]byte, BlockSize)
-       _, err := vols[0].Get(context.Background(), TestHash, buf)
-       var blockDeleted = os.IsNotExist(err)
-       if !blockDeleted {
-               c.Error("superuserExistingBlockReq: block not deleted")
-       }
-
-       // A DELETE request on a block newer than BlobSigningTTL
-       // should return success but leave the block on the volume.
-       vols[0].Put(context.Background(), TestHash, TestBlock)
-       s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
-
-       response = IssueRequest(s.handler, superuserExistingBlockReq)
-       ExpectStatusCode(c,
-               "data manager request, existing block",
-               http.StatusOK,
-               response)
-       // Expect response {"copies_deleted":1,"copies_failed":0}
-       expectedDc = deletecounter{1, 0}
-       json.NewDecoder(response.Body).Decode(&responseDc)
-       if responseDc != expectedDc {
-               c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
-                       expectedDc, responseDc)
-       }
-       // Confirm the block has NOT been deleted.
-       _, err = vols[0].Get(context.Background(), TestHash, buf)
-       if err != nil {
-               c.Errorf("testing delete on new block: %s\n", err)
-       }
-}
-
-// TestPullHandler
-//
-// Test handling of the PUT /pull statement.
-//
-// Cases tested: syntactically valid and invalid pull lists, from the
-// data manager and from unprivileged users:
-//
-//  1. Valid pull list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  2. Invalid pull request from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  3. Valid pull request from the data manager
-//     (expected result: 200 OK with request body "Received 3 pull
-//     requests"
-//
-//  4. Invalid pull request from the data manager
-//     (expected result: 400 Bad Request)
-//
-// Test that in the end, the pull manager received a good pull list with
-// the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously.  Make sure that none of them return 400
-// Bad Request and that pullq.GetList() returns a valid list.
-func (s *HandlerSuite) TestPullHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       // Replace the router's pullq -- which the worker goroutines
-       // started by setup() are now receiving from -- with a new
-       // one, so we can see what the handler sends to it.
-       pullq := NewWorkQueue()
-       s.handler.Handler.(*router).pullq = pullq
-
-       var userToken = "USER TOKEN"
-       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-       goodJSON := []byte(`[
-               {
-                       "locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
-                       "servers":[
-                               "http://server1",
-                               "http://server2"
-                       ]
-               },
-               {
-                       "locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
-                       "servers":[]
-               },
-               {
-                       "locator":"cccccccccccccccccccccccccccccccc+12345",
-                       "servers":["http://server1"]
-               }
-       ]`)
-
-       badJSON := []byte(`{ "key":"I'm a little teapot" }`)
-
-       type pullTest struct {
-               name         string
-               req          RequestTester
-               responseCode int
-               responseBody string
-       }
-       var testcases = []pullTest{
-               {
-                       "Valid pull list from an ordinary user",
-                       RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
-                       http.StatusUnauthorized,
-                       "Unauthorized\n",
-               },
-               {
-                       "Invalid pull request from an ordinary user",
-                       RequestTester{"/pull", userToken, "PUT", badJSON, ""},
-                       http.StatusUnauthorized,
-                       "Unauthorized\n",
-               },
-               {
-                       "Valid pull request from the data manager",
-                       RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
-                       http.StatusOK,
-                       "Received 3 pull requests\n",
-               },
-               {
-                       "Invalid pull request from the data manager",
-                       RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
-                       http.StatusBadRequest,
-                       "",
-               },
-       }
-
-       for _, tst := range testcases {
-               response := IssueRequest(s.handler, &tst.req)
-               ExpectStatusCode(c, tst.name, tst.responseCode, response)
-               ExpectBody(c, tst.name, tst.responseBody, response)
-       }
-
-       // The Keep pull manager should have received one good list with 3
-       // requests on it.
-       for i := 0; i < 3; i++ {
-               var item interface{}
-               select {
-               case item = <-pullq.NextItem:
-               case <-time.After(time.Second):
-                       c.Error("timed out")
-               }
-               if _, ok := item.(PullRequest); !ok {
-                       c.Errorf("item %v could not be parsed as a PullRequest", item)
-               }
-       }
-
-       expectChannelEmpty(c, pullq.NextItem)
-}
-
-// TestTrashHandler
-//
-// Test cases:
-//
-// Cases tested: syntactically valid and invalid trash lists, from the
-// data manager and from unprivileged users:
-//
-//  1. Valid trash list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  2. Invalid trash list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  3. Valid trash list from the data manager
-//     (expected result: 200 OK with request body "Received 3 trash
-//     requests"
-//
-//  4. Invalid trash list from the data manager
-//     (expected result: 400 Bad Request)
-//
-// Test that in the end, the trash collector received a good list
-// trash list with the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously.  Make sure that none of them return 400
-// Bad Request and that replica.Dump() returns a valid list.
-func (s *HandlerSuite) TestTrashHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       // Replace the router's trashq -- which the worker goroutines
-       // started by setup() are now receiving from -- with a new
-       // one, so we can see what the handler sends to it.
-       trashq := NewWorkQueue()
-       s.handler.Handler.(*router).trashq = trashq
-
-       var userToken = "USER TOKEN"
-       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-       goodJSON := []byte(`[
-               {
-                       "locator":"block1",
-                       "block_mtime":1409082153
-               },
-               {
-                       "locator":"block2",
-                       "block_mtime":1409082153
-               },
-               {
-                       "locator":"block3",
-                       "block_mtime":1409082153
-               }
-       ]`)
-
-       badJSON := []byte(`I am not a valid JSON string`)
-
-       type trashTest struct {
-               name         string
-               req          RequestTester
-               responseCode int
-               responseBody string
-       }
-
-       var testcases = []trashTest{
-               {
-                       "Valid trash list from an ordinary user",
-                       RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
-                       http.StatusUnauthorized,
-                       "Unauthorized\n",
-               },
-               {
-                       "Invalid trash list from an ordinary user",
-                       RequestTester{"/trash", userToken, "PUT", badJSON, ""},
-                       http.StatusUnauthorized,
-                       "Unauthorized\n",
-               },
-               {
-                       "Valid trash list from the data manager",
-                       RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
-                       http.StatusOK,
-                       "Received 3 trash requests\n",
-               },
-               {
-                       "Invalid trash list from the data manager",
-                       RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
-                       http.StatusBadRequest,
-                       "",
-               },
-       }
-
-       for _, tst := range testcases {
-               response := IssueRequest(s.handler, &tst.req)
-               ExpectStatusCode(c, tst.name, tst.responseCode, response)
-               ExpectBody(c, tst.name, tst.responseBody, response)
-       }
-
-       // The trash collector should have received one good list with 3
-       // requests on it.
-       for i := 0; i < 3; i++ {
-               item := <-trashq.NextItem
-               if _, ok := item.(TrashRequest); !ok {
-                       c.Errorf("item %v could not be parsed as a TrashRequest", item)
-               }
-       }
-
-       expectChannelEmpty(c, trashq.NextItem)
-}
-
-// ====================
-// Helper functions
-// ====================
-
-// IssueTestRequest executes an HTTP request described by rt, to a
-// REST router.  It returns the HTTP response to the request.
-func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
-       response := httptest.NewRecorder()
-       body := bytes.NewReader(rt.requestBody)
-       req, _ := http.NewRequest(rt.method, rt.uri, body)
-       if rt.apiToken != "" {
-               req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
-       }
-       if rt.storageClasses != "" {
-               req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
-       }
-       handler.ServeHTTP(response, req)
-       return response
-}
-
-func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
-       response := httptest.NewRecorder()
-       body := bytes.NewReader(rt.requestBody)
-       req, _ := http.NewRequest(rt.method, rt.uri, body)
-       if rt.apiToken != "" {
-               req.Header.Set("Authorization", "Bearer "+rt.apiToken)
-       }
-       handler.ServeHTTP(response, req)
-       return response
-}
-
-// ExpectStatusCode checks whether a response has the specified status code,
-// and reports a test failure if not.
-func ExpectStatusCode(
-       c *check.C,
-       testname string,
-       expectedStatus int,
-       response *httptest.ResponseRecorder) {
-       c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
-}
-
-func ExpectBody(
-       c *check.C,
-       testname string,
-       expectedBody string,
-       response *httptest.ResponseRecorder) {
-       if expectedBody != "" && response.Body.String() != expectedBody {
-               c.Errorf("%s: expected response body '%s', got %+v",
-                       testname, expectedBody, response)
-       }
-}
-
-// See #7121
-func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       defer func(orig *bufferPool) {
-               bufs = orig
-       }(bufs)
-       bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
-
-       ok := make(chan struct{})
-       go func() {
-               for i := 0; i < 2; i++ {
-                       response := IssueRequest(s.handler,
-                               &RequestTester{
-                                       method:      "PUT",
-                                       uri:         "/" + TestHash,
-                                       requestBody: TestBlock,
-                               })
-                       ExpectStatusCode(c,
-                               "TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
-               }
-               ok <- struct{}{}
-       }()
-
-       select {
-       case <-ok:
-       case <-time.After(time.Second):
-               c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
-       }
-}
-
-// Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       ok := make(chan bool)
-       go func() {
-               for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
-                       // Unauthenticated request, no server key
-                       // => OK (unsigned response)
-                       unsignedLocator := "/" + TestHash
-                       response := IssueRequest(s.handler,
-                               &RequestTester{
-                                       method:      "PUT",
-                                       uri:         unsignedLocator,
-                                       requestBody: TestBlock,
-                               })
-                       ExpectStatusCode(c,
-                               "TestPutHandlerBufferleak", http.StatusOK, response)
-                       ExpectBody(c,
-                               "TestPutHandlerBufferleak",
-                               TestHashPutResp, response)
-               }
-               ok <- true
-       }()
-       select {
-       case <-time.After(20 * time.Second):
-               // If the buffer pool leaks, the test goroutine hangs.
-               c.Fatal("test did not finish, assuming pool leaked")
-       case <-ok:
-       }
-}
-
-func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
-       s.cluster.Collections.BlobSigning = false
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       defer func(orig *bufferPool) {
-               bufs = orig
-       }(bufs)
-       bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
-       defer bufs.Put(bufs.Get(BlockSize))
-
-       err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
-       c.Assert(err, check.IsNil)
-
-       resp := httptest.NewRecorder()
-       ok := make(chan struct{})
-       go func() {
-               ctx, cancel := context.WithCancel(context.Background())
-               req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
-               cancel()
-               s.handler.ServeHTTP(resp, req)
-               ok <- struct{}{}
-       }()
-
-       select {
-       case <-time.After(20 * time.Second):
-               c.Fatal("request took >20s, close notifier must be broken")
-       case <-ok:
-       }
-
-       ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
-       for i, v := range s.handler.volmgr.AllWritable() {
-               if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
-                       c.Errorf("volume %d got %d calls, expected 0", i, calls)
-               }
-       }
-}
-
-// Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       vols := s.handler.volmgr.AllWritable()
-       if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
-               c.Error(err)
-       }
-
-       ok := make(chan bool)
-       go func() {
-               for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
-                       // Unauthenticated request, unsigned locator
-                       // => OK
-                       unsignedLocator := "/" + TestHash
-                       response := IssueRequest(s.handler,
-                               &RequestTester{
-                                       method: "GET",
-                                       uri:    unsignedLocator,
-                               })
-                       ExpectStatusCode(c,
-                               "Unauthenticated request, unsigned locator", http.StatusOK, response)
-                       ExpectBody(c,
-                               "Unauthenticated request, unsigned locator",
-                               string(TestBlock),
-                               response)
-               }
-               ok <- true
-       }()
-       select {
-       case <-time.After(20 * time.Second):
-               // If the buffer pool leaks, the test goroutine hangs.
-               c.Fatal("test did not finish, assuming pool leaked")
-       case <-ok:
-       }
-}
-
-func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
-       s.cluster.Volumes = map[string]arvados.Volume{
-               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
-               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
-               "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
-       }
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       rt := RequestTester{
-               method:      "PUT",
-               uri:         "/" + TestHash,
-               requestBody: TestBlock,
-       }
-
-       for _, trial := range []struct {
-               ask    string
-               expect string
-       }{
-               {"", ""},
-               {"default", "default=1"},
-               {" , default , default , ", "default=1"},
-               {"special", "extra=1, special=1"},
-               {"special, readonly", "extra=1, special=1"},
-               {"special, nonexistent", "extra=1, special=1"},
-               {"extra, special", "extra=1, special=1"},
-               {"default, special", "default=1, extra=1, special=1"},
-       } {
-               c.Logf("success case %#v", trial)
-               rt.storageClasses = trial.ask
-               resp := IssueRequest(s.handler, &rt)
-               if trial.expect == "" {
-                       // any non-empty value is correct
-                       c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
-               } else {
-                       c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
-               }
-       }
-
-       for _, trial := range []struct {
-               ask string
-       }{
-               {"doesnotexist"},
-               {"doesnotexist, readonly"},
-               {"readonly"},
-       } {
-               c.Logf("failure case %#v", trial)
-               rt.storageClasses = trial.ask
-               resp := IssueRequest(s.handler, &rt)
-               c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
-       }
-}
-
-func sortCommaSeparated(s string) string {
-       slice := strings.Split(s, ", ")
-       sort.Strings(slice)
-       return strings.Join(slice, ", ")
-}
-
-func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       resp := IssueRequest(s.handler, &RequestTester{
-               method:      "PUT",
-               uri:         "/" + TestHash,
-               requestBody: TestBlock,
-       })
-       c.Logf("%#v", resp)
-       c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
-       c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
-}
-
-func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       // Set up Keep volumes
-       vols := s.handler.volmgr.AllWritable()
-       vols[0].Put(context.Background(), TestHash, TestBlock)
-
-       s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-       // unauthenticatedReq => UnauthorizedError
-       unauthenticatedReq := &RequestTester{
-               method: "PUT",
-               uri:    "/untrash/" + TestHash,
-       }
-       response := IssueRequest(s.handler, unauthenticatedReq)
-       ExpectStatusCode(c,
-               "Unauthenticated request",
-               UnauthorizedError.HTTPCode,
-               response)
-
-       // notDataManagerReq => UnauthorizedError
-       notDataManagerReq := &RequestTester{
-               method:   "PUT",
-               uri:      "/untrash/" + TestHash,
-               apiToken: knownToken,
-       }
-
-       response = IssueRequest(s.handler, notDataManagerReq)
-       ExpectStatusCode(c,
-               "Non-datamanager token",
-               UnauthorizedError.HTTPCode,
-               response)
-
-       // datamanagerWithBadHashReq => StatusBadRequest
-       datamanagerWithBadHashReq := &RequestTester{
-               method:   "PUT",
-               uri:      "/untrash/thisisnotalocator",
-               apiToken: s.cluster.SystemRootToken,
-       }
-       response = IssueRequest(s.handler, datamanagerWithBadHashReq)
-       ExpectStatusCode(c,
-               "Bad locator in untrash request",
-               http.StatusBadRequest,
-               response)
-
-       // datamanagerWrongMethodReq => StatusBadRequest
-       datamanagerWrongMethodReq := &RequestTester{
-               method:   "GET",
-               uri:      "/untrash/" + TestHash,
-               apiToken: s.cluster.SystemRootToken,
-       }
-       response = IssueRequest(s.handler, datamanagerWrongMethodReq)
-       ExpectStatusCode(c,
-               "Only PUT method is supported for untrash",
-               http.StatusMethodNotAllowed,
-               response)
-
-       // datamanagerReq => StatusOK
-       datamanagerReq := &RequestTester{
-               method:   "PUT",
-               uri:      "/untrash/" + TestHash,
-               apiToken: s.cluster.SystemRootToken,
-       }
-       response = IssueRequest(s.handler, datamanagerReq)
-       ExpectStatusCode(c,
-               "",
-               http.StatusOK,
-               response)
-       c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
-}
-
-func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
-       // Change all volumes to read-only
-       for uuid, v := range s.cluster.Volumes {
-               v.ReadOnly = true
-               s.cluster.Volumes[uuid] = v
-       }
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       // datamanagerReq => StatusOK
-       datamanagerReq := &RequestTester{
-               method:   "PUT",
-               uri:      "/untrash/" + TestHash,
-               apiToken: s.cluster.SystemRootToken,
-       }
-       response := IssueRequest(s.handler, datamanagerReq)
-       ExpectStatusCode(c,
-               "No writable volumes",
-               http.StatusNotFound,
-               response)
-}
-
-func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
-       s.cluster.ManagementToken = arvadostest.ManagementToken
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       pingReq := &RequestTester{
-               method:   "GET",
-               uri:      "/_health/ping",
-               apiToken: arvadostest.ManagementToken,
-       }
-       response := IssueHealthCheckRequest(s.handler, pingReq)
-       ExpectStatusCode(c,
-               "",
-               http.StatusOK,
-               response)
-       want := `{"health":"OK"}`
-       if !strings.Contains(response.Body.String(), want) {
-               c.Errorf("expected response to include %s: got %s", want, response.Body.String())
-       }
-}
diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
deleted file mode 100644 (file)
index abeb20f..0000000
+++ /dev/null
@@ -1,1056 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "container/list"
-       "context"
-       "crypto/md5"
-       "encoding/json"
-       "fmt"
-       "io"
-       "net/http"
-       "os"
-       "regexp"
-       "runtime"
-       "strconv"
-       "strings"
-       "sync"
-       "sync/atomic"
-       "time"
-
-       "git.arvados.org/arvados.git/lib/cmd"
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "git.arvados.org/arvados.git/sdk/go/health"
-       "git.arvados.org/arvados.git/sdk/go/httpserver"
-       "github.com/gorilla/mux"
-       "github.com/prometheus/client_golang/prometheus"
-       "github.com/sirupsen/logrus"
-)
-
-type router struct {
-       *mux.Router
-       cluster     *arvados.Cluster
-       logger      logrus.FieldLogger
-       remoteProxy remoteProxy
-       metrics     *nodeMetrics
-       volmgr      *RRVolumeManager
-       pullq       *WorkQueue
-       trashq      *WorkQueue
-}
-
-// MakeRESTRouter returns a new router that forwards all Keep requests
-// to the appropriate handlers.
-func MakeRESTRouter(ctx context.Context, cluster *arvados.Cluster, reg *prometheus.Registry, volmgr *RRVolumeManager, pullq, trashq *WorkQueue) http.Handler {
-       rtr := &router{
-               Router:  mux.NewRouter(),
-               cluster: cluster,
-               logger:  ctxlog.FromContext(ctx),
-               metrics: &nodeMetrics{reg: reg},
-               volmgr:  volmgr,
-               pullq:   pullq,
-               trashq:  trashq,
-       }
-
-       rtr.HandleFunc(
-               `/{hash:[0-9a-f]{32}}`, rtr.handleGET).Methods("GET", "HEAD")
-       rtr.HandleFunc(
-               `/{hash:[0-9a-f]{32}}+{hints}`,
-               rtr.handleGET).Methods("GET", "HEAD")
-
-       rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handlePUT).Methods("PUT")
-       rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handleDELETE).Methods("DELETE")
-       // List all blocks stored here. Privileged client only.
-       rtr.HandleFunc(`/index`, rtr.handleIndex).Methods("GET", "HEAD")
-       // List blocks stored here whose hash has the given prefix.
-       // Privileged client only.
-       rtr.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, rtr.handleIndex).Methods("GET", "HEAD")
-       // Update timestamp on existing block. Privileged client only.
-       rtr.HandleFunc(`/{hash:[0-9a-f]{32}}`, rtr.handleTOUCH).Methods("TOUCH")
-
-       // Internals/debugging info (runtime.MemStats)
-       rtr.HandleFunc(`/debug.json`, rtr.DebugHandler).Methods("GET", "HEAD")
-
-       // List volumes: path, device number, bytes used/avail.
-       rtr.HandleFunc(`/status.json`, rtr.StatusHandler).Methods("GET", "HEAD")
-
-       // List mounts: UUID, readonly, tier, device ID, ...
-       rtr.HandleFunc(`/mounts`, rtr.MountsHandler).Methods("GET")
-       rtr.HandleFunc(`/mounts/{uuid}/blocks`, rtr.handleIndex).Methods("GET")
-       rtr.HandleFunc(`/mounts/{uuid}/blocks/`, rtr.handleIndex).Methods("GET")
-
-       // Replace the current pull queue.
-       rtr.HandleFunc(`/pull`, rtr.handlePull).Methods("PUT")
-
-       // Replace the current trash queue.
-       rtr.HandleFunc(`/trash`, rtr.handleTrash).Methods("PUT")
-
-       // Untrash moves blocks from trash back into store
-       rtr.HandleFunc(`/untrash/{hash:[0-9a-f]{32}}`, rtr.handleUntrash).Methods("PUT")
-
-       rtr.Handle("/_health/{check}", &health.Handler{
-               Token:  cluster.ManagementToken,
-               Prefix: "/_health/",
-       }).Methods("GET")
-
-       // Any request which does not match any of these routes gets
-       // 400 Bad Request.
-       rtr.NotFoundHandler = http.HandlerFunc(BadRequestHandler)
-
-       rtr.metrics.setupBufferPoolMetrics(bufs)
-       rtr.metrics.setupWorkQueueMetrics(rtr.pullq, "pull")
-       rtr.metrics.setupWorkQueueMetrics(rtr.trashq, "trash")
-
-       return rtr
-}
-
-// BadRequestHandler is a HandleFunc to address bad requests.
-func BadRequestHandler(w http.ResponseWriter, r *http.Request) {
-       http.Error(w, BadRequestError.Error(), BadRequestError.HTTPCode)
-}
-
-func (rtr *router) handleGET(resp http.ResponseWriter, req *http.Request) {
-       locator := req.URL.Path[1:]
-       if strings.Contains(locator, "+R") && !strings.Contains(locator, "+A") {
-               rtr.remoteProxy.Get(req.Context(), resp, req, rtr.cluster, rtr.volmgr)
-               return
-       }
-
-       if rtr.cluster.Collections.BlobSigning {
-               locator := req.URL.Path[1:] // strip leading slash
-               if err := VerifySignature(rtr.cluster, locator, GetAPIToken(req)); err != nil {
-                       http.Error(resp, err.Error(), err.(*KeepError).HTTPCode)
-                       return
-               }
-       }
-
-       // TODO: Probe volumes to check whether the block _might_
-       // exist. Some volumes/types could support a quick existence
-       // check without causing other operations to suffer. If all
-       // volumes support that, and assure us the block definitely
-       // isn't here, we can return 404 now instead of waiting for a
-       // buffer.
-
-       buf, err := getBufferWithContext(req.Context(), bufs, BlockSize)
-       if err != nil {
-               http.Error(resp, err.Error(), http.StatusServiceUnavailable)
-               return
-       }
-       defer bufs.Put(buf)
-
-       size, err := GetBlock(req.Context(), rtr.volmgr, mux.Vars(req)["hash"], buf, resp)
-       if err != nil {
-               code := http.StatusInternalServerError
-               if err, ok := err.(*KeepError); ok {
-                       code = err.HTTPCode
-               }
-               http.Error(resp, err.Error(), code)
-               return
-       }
-
-       resp.Header().Set("Content-Length", strconv.Itoa(size))
-       resp.Header().Set("Content-Type", "application/octet-stream")
-       resp.Write(buf[:size])
-}
-
-// Get a buffer from the pool -- but give up and return a non-nil
-// error if ctx ends before we get a buffer.
-func getBufferWithContext(ctx context.Context, bufs *bufferPool, bufSize int) ([]byte, error) {
-       bufReady := make(chan []byte)
-       go func() {
-               bufReady <- bufs.Get(bufSize)
-       }()
-       select {
-       case buf := <-bufReady:
-               return buf, nil
-       case <-ctx.Done():
-               go func() {
-                       // Even if closeNotifier happened first, we
-                       // need to keep waiting for our buf so we can
-                       // return it to the pool.
-                       bufs.Put(<-bufReady)
-               }()
-               return nil, ErrClientDisconnect
-       }
-}
-
-func (rtr *router) handleTOUCH(resp http.ResponseWriter, req *http.Request) {
-       if !rtr.isSystemAuth(GetAPIToken(req)) {
-               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
-               return
-       }
-       hash := mux.Vars(req)["hash"]
-       vols := rtr.volmgr.AllWritable()
-       if len(vols) == 0 {
-               http.Error(resp, "no volumes", http.StatusNotFound)
-               return
-       }
-       var err error
-       for _, mnt := range vols {
-               err = mnt.Touch(hash)
-               if err == nil {
-                       break
-               }
-       }
-       switch {
-       case err == nil:
-               return
-       case os.IsNotExist(err):
-               http.Error(resp, err.Error(), http.StatusNotFound)
-       default:
-               http.Error(resp, err.Error(), http.StatusInternalServerError)
-       }
-}
-
-func (rtr *router) handlePUT(resp http.ResponseWriter, req *http.Request) {
-       hash := mux.Vars(req)["hash"]
-
-       // Detect as many error conditions as possible before reading
-       // the body: avoid transmitting data that will not end up
-       // being written anyway.
-
-       if req.ContentLength == -1 {
-               http.Error(resp, SizeRequiredError.Error(), SizeRequiredError.HTTPCode)
-               return
-       }
-
-       if req.ContentLength > BlockSize {
-               http.Error(resp, TooLongError.Error(), TooLongError.HTTPCode)
-               return
-       }
-
-       if len(rtr.volmgr.AllWritable()) == 0 {
-               http.Error(resp, FullError.Error(), FullError.HTTPCode)
-               return
-       }
-
-       var wantStorageClasses []string
-       if hdr := req.Header.Get("X-Keep-Storage-Classes"); hdr != "" {
-               wantStorageClasses = strings.Split(hdr, ",")
-               for i, sc := range wantStorageClasses {
-                       wantStorageClasses[i] = strings.TrimSpace(sc)
-               }
-       } else {
-               // none specified -- use configured default
-               for class, cfg := range rtr.cluster.StorageClasses {
-                       if cfg.Default {
-                               wantStorageClasses = append(wantStorageClasses, class)
-                       }
-               }
-       }
-
-       buf, err := getBufferWithContext(req.Context(), bufs, int(req.ContentLength))
-       if err != nil {
-               http.Error(resp, err.Error(), http.StatusServiceUnavailable)
-               return
-       }
-
-       _, err = io.ReadFull(req.Body, buf)
-       if err != nil {
-               http.Error(resp, err.Error(), 500)
-               bufs.Put(buf)
-               return
-       }
-
-       result, err := PutBlock(req.Context(), rtr.volmgr, buf, hash, wantStorageClasses)
-       bufs.Put(buf)
-
-       if err != nil {
-               code := http.StatusInternalServerError
-               if err, ok := err.(*KeepError); ok {
-                       code = err.HTTPCode
-               }
-               http.Error(resp, err.Error(), code)
-               return
-       }
-
-       // Success; add a size hint, sign the locator if possible, and
-       // return it to the client.
-       returnHash := fmt.Sprintf("%s+%d", hash, req.ContentLength)
-       apiToken := GetAPIToken(req)
-       if rtr.cluster.Collections.BlobSigningKey != "" && apiToken != "" {
-               expiry := time.Now().Add(rtr.cluster.Collections.BlobSigningTTL.Duration())
-               returnHash = SignLocator(rtr.cluster, returnHash, apiToken, expiry)
-       }
-       resp.Header().Set("X-Keep-Replicas-Stored", result.TotalReplication())
-       resp.Header().Set("X-Keep-Storage-Classes-Confirmed", result.ClassReplication())
-       resp.Write([]byte(returnHash + "\n"))
-}
-
-// IndexHandler responds to "/index", "/index/{prefix}", and
-// "/mounts/{uuid}/blocks" requests.
-func (rtr *router) handleIndex(resp http.ResponseWriter, req *http.Request) {
-       if !rtr.isSystemAuth(GetAPIToken(req)) {
-               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
-               return
-       }
-
-       prefix := mux.Vars(req)["prefix"]
-       if prefix == "" {
-               req.ParseForm()
-               prefix = req.Form.Get("prefix")
-       }
-
-       uuid := mux.Vars(req)["uuid"]
-
-       var vols []*VolumeMount
-       if uuid == "" {
-               vols = rtr.volmgr.AllReadable()
-       } else if mnt := rtr.volmgr.Lookup(uuid, false); mnt == nil {
-               http.Error(resp, "mount not found", http.StatusNotFound)
-               return
-       } else {
-               vols = []*VolumeMount{mnt}
-       }
-
-       for _, v := range vols {
-               if err := v.IndexTo(prefix, resp); err != nil {
-                       // We can't send an error status/message to
-                       // the client because IndexTo() might have
-                       // already written body content. All we can do
-                       // is log the error in our own logs.
-                       //
-                       // The client must notice the lack of trailing
-                       // newline as an indication that the response
-                       // is incomplete.
-                       ctxlog.FromContext(req.Context()).WithError(err).Errorf("truncating index response after error from volume %s", v)
-                       return
-               }
-       }
-       // An empty line at EOF is the only way the client can be
-       // assured the entire index was received.
-       resp.Write([]byte{'\n'})
-}
-
-// MountsHandler responds to "GET /mounts" requests.
-func (rtr *router) MountsHandler(resp http.ResponseWriter, req *http.Request) {
-       err := json.NewEncoder(resp).Encode(rtr.volmgr.Mounts())
-       if err != nil {
-               httpserver.Error(resp, err.Error(), http.StatusInternalServerError)
-       }
-}
-
-// PoolStatus struct
-type PoolStatus struct {
-       Alloc uint64 `json:"BytesAllocatedCumulative"`
-       Cap   int    `json:"BuffersMax"`
-       Len   int    `json:"BuffersInUse"`
-}
-
-type volumeStatusEnt struct {
-       Label         string
-       Status        *VolumeStatus `json:",omitempty"`
-       VolumeStats   *ioStats      `json:",omitempty"`
-       InternalStats interface{}   `json:",omitempty"`
-}
-
-// NodeStatus struct
-type NodeStatus struct {
-       Volumes         []*volumeStatusEnt
-       BufferPool      PoolStatus
-       PullQueue       WorkQueueStatus
-       TrashQueue      WorkQueueStatus
-       RequestsCurrent int
-       RequestsMax     int
-       Version         string
-}
-
-var st NodeStatus
-var stLock sync.Mutex
-
-// DebugHandler addresses /debug.json requests.
-func (rtr *router) DebugHandler(resp http.ResponseWriter, req *http.Request) {
-       type debugStats struct {
-               MemStats runtime.MemStats
-       }
-       var ds debugStats
-       runtime.ReadMemStats(&ds.MemStats)
-       data, err := json.Marshal(&ds)
-       if err != nil {
-               http.Error(resp, err.Error(), http.StatusInternalServerError)
-               return
-       }
-       resp.Write(data)
-}
-
-// StatusHandler addresses /status.json requests.
-func (rtr *router) StatusHandler(resp http.ResponseWriter, req *http.Request) {
-       stLock.Lock()
-       rtr.readNodeStatus(&st)
-       data, err := json.Marshal(&st)
-       stLock.Unlock()
-       if err != nil {
-               http.Error(resp, err.Error(), http.StatusInternalServerError)
-               return
-       }
-       resp.Write(data)
-}
-
-// populate the given NodeStatus struct with current values.
-func (rtr *router) readNodeStatus(st *NodeStatus) {
-       st.Version = strings.SplitN(cmd.Version.String(), " ", 2)[0]
-       vols := rtr.volmgr.AllReadable()
-       if cap(st.Volumes) < len(vols) {
-               st.Volumes = make([]*volumeStatusEnt, len(vols))
-       }
-       st.Volumes = st.Volumes[:0]
-       for _, vol := range vols {
-               var internalStats interface{}
-               if vol, ok := vol.Volume.(InternalStatser); ok {
-                       internalStats = vol.InternalStats()
-               }
-               st.Volumes = append(st.Volumes, &volumeStatusEnt{
-                       Label:         vol.String(),
-                       Status:        vol.Status(),
-                       InternalStats: internalStats,
-                       //VolumeStats: rtr.volmgr.VolumeStats(vol),
-               })
-       }
-       st.BufferPool.Alloc = bufs.Alloc()
-       st.BufferPool.Cap = bufs.Cap()
-       st.BufferPool.Len = bufs.Len()
-       st.PullQueue = getWorkQueueStatus(rtr.pullq)
-       st.TrashQueue = getWorkQueueStatus(rtr.trashq)
-}
-
-// return a WorkQueueStatus for the given queue. If q is nil (which
-// should never happen except in test suites), return a zero status
-// value instead of crashing.
-func getWorkQueueStatus(q *WorkQueue) WorkQueueStatus {
-       if q == nil {
-               // This should only happen during tests.
-               return WorkQueueStatus{}
-       }
-       return q.Status()
-}
-
-// handleDELETE processes DELETE requests.
-//
-// DELETE /{hash:[0-9a-f]{32} will delete the block with the specified hash
-// from all connected volumes.
-//
-// Only the Data Manager, or an Arvados admin with scope "all", are
-// allowed to issue DELETE requests.  If a DELETE request is not
-// authenticated or is issued by a non-admin user, the server returns
-// a PermissionError.
-//
-// Upon receiving a valid request from an authorized user,
-// handleDELETE deletes all copies of the specified block on local
-// writable volumes.
-//
-// Response format:
-//
-// If the requested blocks was not found on any volume, the response
-// code is HTTP 404 Not Found.
-//
-// Otherwise, the response code is 200 OK, with a response body
-// consisting of the JSON message
-//
-//     {"copies_deleted":d,"copies_failed":f}
-//
-// where d and f are integers representing the number of blocks that
-// were successfully and unsuccessfully deleted.
-func (rtr *router) handleDELETE(resp http.ResponseWriter, req *http.Request) {
-       hash := mux.Vars(req)["hash"]
-
-       // Confirm that this user is an admin and has a token with unlimited scope.
-       var tok = GetAPIToken(req)
-       if tok == "" || !rtr.canDelete(tok) {
-               http.Error(resp, PermissionError.Error(), PermissionError.HTTPCode)
-               return
-       }
-
-       if !rtr.cluster.Collections.BlobTrash {
-               http.Error(resp, MethodDisabledError.Error(), MethodDisabledError.HTTPCode)
-               return
-       }
-
-       // Delete copies of this block from all available volumes.
-       // Report how many blocks were successfully deleted, and how
-       // many were found on writable volumes but not deleted.
-       var result struct {
-               Deleted int `json:"copies_deleted"`
-               Failed  int `json:"copies_failed"`
-       }
-       for _, vol := range rtr.volmgr.Mounts() {
-               if !vol.KeepMount.AllowTrash {
-                       continue
-               } else if err := vol.Trash(hash); err == nil {
-                       result.Deleted++
-               } else if os.IsNotExist(err) {
-                       continue
-               } else {
-                       result.Failed++
-                       ctxlog.FromContext(req.Context()).WithError(err).Errorf("Trash(%s) failed on volume %s", hash, vol)
-               }
-       }
-       if result.Deleted == 0 && result.Failed == 0 {
-               resp.WriteHeader(http.StatusNotFound)
-               return
-       }
-       body, err := json.Marshal(result)
-       if err != nil {
-               http.Error(resp, err.Error(), http.StatusInternalServerError)
-               return
-       }
-       resp.Write(body)
-}
-
-/* PullHandler processes "PUT /pull" requests for the data manager.
-   The request body is a JSON message containing a list of pull
-   requests in the following format:
-
-   [
-      {
-         "locator":"e4d909c290d0fb1ca068ffaddf22cbd0+4985",
-         "servers":[
-                       "keep0.qr1hi.arvadosapi.com:25107",
-                       "keep1.qr1hi.arvadosapi.com:25108"
-                ]
-         },
-         {
-                "locator":"55ae4d45d2db0793d53f03e805f656e5+658395",
-                "servers":[
-                       "10.0.1.5:25107",
-                       "10.0.1.6:25107",
-                       "10.0.1.7:25108"
-                ]
-         },
-         ...
-   ]
-
-   Each pull request in the list consists of a block locator string
-   and an ordered list of servers.  Keepstore should try to fetch the
-   block from each server in turn.
-
-   If the request has not been sent by the Data Manager, return 401
-   Unauthorized.
-
-   If the JSON unmarshalling fails, return 400 Bad Request.
-*/
-
-// PullRequest consists of a block locator and an ordered list of servers
-type PullRequest struct {
-       Locator string   `json:"locator"`
-       Servers []string `json:"servers"`
-
-       // Destination mount, or "" for "anywhere"
-       MountUUID string `json:"mount_uuid"`
-}
-
-// PullHandler processes "PUT /pull" requests for the data manager.
-func (rtr *router) handlePull(resp http.ResponseWriter, req *http.Request) {
-       // Reject unauthorized requests.
-       if !rtr.isSystemAuth(GetAPIToken(req)) {
-               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
-               return
-       }
-
-       // Parse the request body.
-       var pr []PullRequest
-       r := json.NewDecoder(req.Body)
-       if err := r.Decode(&pr); err != nil {
-               http.Error(resp, err.Error(), BadRequestError.HTTPCode)
-               return
-       }
-
-       // We have a properly formatted pull list sent from the data
-       // manager.  Report success and send the list to the pull list
-       // manager for further handling.
-       resp.WriteHeader(http.StatusOK)
-       resp.Write([]byte(
-               fmt.Sprintf("Received %d pull requests\n", len(pr))))
-
-       plist := list.New()
-       for _, p := range pr {
-               plist.PushBack(p)
-       }
-       rtr.pullq.ReplaceQueue(plist)
-}
-
-// TrashRequest consists of a block locator and its Mtime
-type TrashRequest struct {
-       Locator    string `json:"locator"`
-       BlockMtime int64  `json:"block_mtime"`
-
-       // Target mount, or "" for "everywhere"
-       MountUUID string `json:"mount_uuid"`
-}
-
-// TrashHandler processes /trash requests.
-func (rtr *router) handleTrash(resp http.ResponseWriter, req *http.Request) {
-       // Reject unauthorized requests.
-       if !rtr.isSystemAuth(GetAPIToken(req)) {
-               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
-               return
-       }
-
-       // Parse the request body.
-       var trash []TrashRequest
-       r := json.NewDecoder(req.Body)
-       if err := r.Decode(&trash); err != nil {
-               http.Error(resp, err.Error(), BadRequestError.HTTPCode)
-               return
-       }
-
-       // We have a properly formatted trash list sent from the data
-       // manager.  Report success and send the list to the trash work
-       // queue for further handling.
-       resp.WriteHeader(http.StatusOK)
-       resp.Write([]byte(
-               fmt.Sprintf("Received %d trash requests\n", len(trash))))
-
-       tlist := list.New()
-       for _, t := range trash {
-               tlist.PushBack(t)
-       }
-       rtr.trashq.ReplaceQueue(tlist)
-}
-
-// UntrashHandler processes "PUT /untrash/{hash:[0-9a-f]{32}}" requests for the data manager.
-func (rtr *router) handleUntrash(resp http.ResponseWriter, req *http.Request) {
-       // Reject unauthorized requests.
-       if !rtr.isSystemAuth(GetAPIToken(req)) {
-               http.Error(resp, UnauthorizedError.Error(), UnauthorizedError.HTTPCode)
-               return
-       }
-
-       log := ctxlog.FromContext(req.Context())
-       hash := mux.Vars(req)["hash"]
-
-       if len(rtr.volmgr.AllWritable()) == 0 {
-               http.Error(resp, "No writable volumes", http.StatusNotFound)
-               return
-       }
-
-       var untrashedOn, failedOn []string
-       var numNotFound int
-       for _, vol := range rtr.volmgr.AllWritable() {
-               err := vol.Untrash(hash)
-
-               if os.IsNotExist(err) {
-                       numNotFound++
-               } else if err != nil {
-                       log.WithError(err).Errorf("Error untrashing %v on volume %s", hash, vol)
-                       failedOn = append(failedOn, vol.String())
-               } else {
-                       log.Infof("Untrashed %v on volume %v", hash, vol.String())
-                       untrashedOn = append(untrashedOn, vol.String())
-               }
-       }
-
-       if numNotFound == len(rtr.volmgr.AllWritable()) {
-               http.Error(resp, "Block not found on any of the writable volumes", http.StatusNotFound)
-       } else if len(failedOn) == len(rtr.volmgr.AllWritable()) {
-               http.Error(resp, "Failed to untrash on all writable volumes", http.StatusInternalServerError)
-       } else {
-               respBody := "Successfully untrashed on: " + strings.Join(untrashedOn, ", ")
-               if len(failedOn) > 0 {
-                       respBody += "; Failed to untrash on: " + strings.Join(failedOn, ", ")
-                       http.Error(resp, respBody, http.StatusInternalServerError)
-               } else {
-                       fmt.Fprintln(resp, respBody)
-               }
-       }
-}
-
-// GetBlock and PutBlock implement lower-level code for handling
-// blocks by rooting through volumes connected to the local machine.
-// Once the handler has determined that system policy permits the
-// request, it calls these methods to perform the actual operation.
-//
-// TODO(twp): this code would probably be better located in the
-// VolumeManager interface. As an abstraction, the VolumeManager
-// should be the only part of the code that cares about which volume a
-// block is stored on, so it should be responsible for figuring out
-// which volume to check for fetching blocks, storing blocks, etc.
-
-// GetBlock fetches the block identified by "hash" into the provided
-// buf, and returns the data size.
-//
-// If the block cannot be found on any volume, returns NotFoundError.
-//
-// If the block found does not have the correct MD5 hash, returns
-// DiskHashError.
-func GetBlock(ctx context.Context, volmgr *RRVolumeManager, hash string, buf []byte, resp http.ResponseWriter) (int, error) {
-       log := ctxlog.FromContext(ctx)
-
-       // Attempt to read the requested hash from a keep volume.
-       errorToCaller := NotFoundError
-
-       for _, vol := range volmgr.AllReadable() {
-               size, err := vol.Get(ctx, hash, buf)
-               select {
-               case <-ctx.Done():
-                       return 0, ErrClientDisconnect
-               default:
-               }
-               if err != nil {
-                       // IsNotExist is an expected error and may be
-                       // ignored. All other errors are logged. In
-                       // any case we continue trying to read other
-                       // volumes. If all volumes report IsNotExist,
-                       // we return a NotFoundError.
-                       if !os.IsNotExist(err) {
-                               log.WithError(err).Errorf("Get(%s) failed on %s", hash, vol)
-                       }
-                       // If some volume returns a transient error, return it to the caller
-                       // instead of "Not found" so it can retry.
-                       if err == VolumeBusyError {
-                               errorToCaller = err.(*KeepError)
-                       }
-                       continue
-               }
-               // Check the file checksum.
-               filehash := fmt.Sprintf("%x", md5.Sum(buf[:size]))
-               if filehash != hash {
-                       // TODO: Try harder to tell a sysadmin about
-                       // this.
-                       log.Errorf("checksum mismatch for block %s (actual %s), size %d on %s", hash, filehash, size, vol)
-                       errorToCaller = DiskHashError
-                       continue
-               }
-               if errorToCaller == DiskHashError {
-                       log.Warn("after checksum mismatch for block %s on a different volume, a good copy was found on volume %s and returned", hash, vol)
-               }
-               return size, nil
-       }
-       return 0, errorToCaller
-}
-
-type putProgress struct {
-       classNeeded      map[string]bool
-       classTodo        map[string]bool
-       mountUsed        map[*VolumeMount]bool
-       totalReplication int
-       classDone        map[string]int
-}
-
-// Number of distinct replicas stored. "2" can mean the block was
-// stored on 2 different volumes with replication 1, or on 1 volume
-// with replication 2.
-func (pr putProgress) TotalReplication() string {
-       return strconv.Itoa(pr.totalReplication)
-}
-
-// Number of replicas satisfying each storage class, formatted like
-// "default=2; special=1".
-func (pr putProgress) ClassReplication() string {
-       s := ""
-       for k, v := range pr.classDone {
-               if len(s) > 0 {
-                       s += ", "
-               }
-               s += k + "=" + strconv.Itoa(v)
-       }
-       return s
-}
-
-func (pr *putProgress) Add(mnt *VolumeMount) {
-       if pr.mountUsed[mnt] {
-               logrus.Warnf("BUG? superfluous extra write to mount %s", mnt.UUID)
-               return
-       }
-       pr.mountUsed[mnt] = true
-       pr.totalReplication += mnt.Replication
-       for class := range mnt.StorageClasses {
-               pr.classDone[class] += mnt.Replication
-               delete(pr.classTodo, class)
-       }
-}
-
-func (pr *putProgress) Sub(mnt *VolumeMount) {
-       if !pr.mountUsed[mnt] {
-               logrus.Warnf("BUG? Sub called with no prior matching Add: %s", mnt.UUID)
-               return
-       }
-       pr.mountUsed[mnt] = false
-       pr.totalReplication -= mnt.Replication
-       for class := range mnt.StorageClasses {
-               pr.classDone[class] -= mnt.Replication
-               if pr.classNeeded[class] {
-                       pr.classTodo[class] = true
-               }
-       }
-}
-
-func (pr *putProgress) Done() bool {
-       return len(pr.classTodo) == 0 && pr.totalReplication > 0
-}
-
-func (pr *putProgress) Want(mnt *VolumeMount) bool {
-       if pr.Done() || pr.mountUsed[mnt] {
-               return false
-       }
-       if len(pr.classTodo) == 0 {
-               // none specified == "any"
-               return true
-       }
-       for class := range mnt.StorageClasses {
-               if pr.classTodo[class] {
-                       return true
-               }
-       }
-       return false
-}
-
-func (pr *putProgress) Copy() *putProgress {
-       cp := putProgress{
-               classNeeded:      pr.classNeeded,
-               classTodo:        make(map[string]bool, len(pr.classTodo)),
-               classDone:        make(map[string]int, len(pr.classDone)),
-               mountUsed:        make(map[*VolumeMount]bool, len(pr.mountUsed)),
-               totalReplication: pr.totalReplication,
-       }
-       for k, v := range pr.classTodo {
-               cp.classTodo[k] = v
-       }
-       for k, v := range pr.classDone {
-               cp.classDone[k] = v
-       }
-       for k, v := range pr.mountUsed {
-               cp.mountUsed[k] = v
-       }
-       return &cp
-}
-
-func newPutProgress(classes []string) putProgress {
-       pr := putProgress{
-               classNeeded: make(map[string]bool, len(classes)),
-               classTodo:   make(map[string]bool, len(classes)),
-               classDone:   map[string]int{},
-               mountUsed:   map[*VolumeMount]bool{},
-       }
-       for _, c := range classes {
-               if c != "" {
-                       pr.classNeeded[c] = true
-                       pr.classTodo[c] = true
-               }
-       }
-       return pr
-}
-
-// PutBlock stores the given block on one or more volumes.
-//
-// The MD5 checksum of the block must match the given hash.
-//
-// The block is written to each writable volume (ordered by priority
-// and then UUID, see volume.go) until at least one replica has been
-// stored in each of the requested storage classes.
-//
-// The returned error, if any, is a KeepError with one of the
-// following codes:
-//
-// 500 Collision
-//
-//     A different block with the same hash already exists on this
-//     Keep server.
-//
-// 422 MD5Fail
-//
-//     The MD5 hash of the BLOCK does not match the argument HASH.
-//
-// 503 Full
-//
-//     There was not enough space left in any Keep volume to store
-//     the object.
-//
-// 500 Fail
-//
-//     The object could not be stored for some other reason (e.g.
-//     all writes failed). The text of the error message should
-//     provide as much detail as possible.
-func PutBlock(ctx context.Context, volmgr *RRVolumeManager, block []byte, hash string, wantStorageClasses []string) (putProgress, error) {
-       log := ctxlog.FromContext(ctx)
-
-       // Check that BLOCK's checksum matches HASH.
-       blockhash := fmt.Sprintf("%x", md5.Sum(block))
-       if blockhash != hash {
-               log.Printf("%s: MD5 checksum %s did not match request", hash, blockhash)
-               return putProgress{}, RequestHashError
-       }
-
-       result := newPutProgress(wantStorageClasses)
-
-       // If we already have this data, it's intact on disk, and we
-       // can update its timestamp, return success. If we have
-       // different data with the same hash, return failure.
-       if err := CompareAndTouch(ctx, volmgr, hash, block, &result); err != nil || result.Done() {
-               return result, err
-       }
-       if ctx.Err() != nil {
-               return result, ErrClientDisconnect
-       }
-
-       writables := volmgr.NextWritable()
-       if len(writables) == 0 {
-               log.Error("no writable volumes")
-               return result, FullError
-       }
-
-       var wg sync.WaitGroup
-       var mtx sync.Mutex
-       cond := sync.Cond{L: &mtx}
-       // pending predicts what result will be if all pending writes
-       // succeed.
-       pending := result.Copy()
-       var allFull atomic.Value
-       allFull.Store(true)
-
-       // We hold the lock for the duration of the "each volume" loop
-       // below, except when it is released during cond.Wait().
-       mtx.Lock()
-
-       for _, mnt := range writables {
-               // Wait until our decision to use this mount does not
-               // depend on the outcome of pending writes.
-               for result.Want(mnt) && !pending.Want(mnt) {
-                       cond.Wait()
-               }
-               if !result.Want(mnt) {
-                       continue
-               }
-               mnt := mnt
-               pending.Add(mnt)
-               wg.Add(1)
-               go func() {
-                       log.Debugf("PutBlock: start write to %s", mnt.UUID)
-                       defer wg.Done()
-                       err := mnt.Put(ctx, hash, block)
-
-                       mtx.Lock()
-                       if err != nil {
-                               log.Debugf("PutBlock: write to %s failed", mnt.UUID)
-                               pending.Sub(mnt)
-                       } else {
-                               log.Debugf("PutBlock: write to %s succeeded", mnt.UUID)
-                               result.Add(mnt)
-                       }
-                       cond.Broadcast()
-                       mtx.Unlock()
-
-                       if err != nil && err != FullError && ctx.Err() == nil {
-                               // The volume is not full but the
-                               // write did not succeed.  Report the
-                               // error and continue trying.
-                               allFull.Store(false)
-                               log.WithError(err).Errorf("%s: Put(%s) failed", mnt.Volume, hash)
-                       }
-               }()
-       }
-       mtx.Unlock()
-       wg.Wait()
-       if ctx.Err() != nil {
-               return result, ErrClientDisconnect
-       }
-       if result.Done() {
-               return result, nil
-       }
-
-       if result.totalReplication > 0 {
-               // Some, but not all, of the storage classes were
-               // satisfied. This qualifies as success.
-               return result, nil
-       } else if allFull.Load().(bool) {
-               log.Error("all volumes with qualifying storage classes are full")
-               return putProgress{}, FullError
-       } else {
-               // Already logged the non-full errors.
-               return putProgress{}, GenericError
-       }
-}
-
-// CompareAndTouch looks for volumes where the given content already
-// exists and its modification time can be updated (i.e., it is
-// protected from garbage collection), and updates result accordingly.
-// It returns when the result is Done() or all volumes have been
-// checked.
-func CompareAndTouch(ctx context.Context, volmgr *RRVolumeManager, hash string, buf []byte, result *putProgress) error {
-       log := ctxlog.FromContext(ctx)
-       for _, mnt := range volmgr.AllWritable() {
-               if !result.Want(mnt) {
-                       continue
-               }
-               err := mnt.Compare(ctx, hash, buf)
-               if ctx.Err() != nil {
-                       return nil
-               } else if err == CollisionError {
-                       // Stop if we have a block with same hash but
-                       // different content. (It will be impossible
-                       // to tell which one is wanted if we have
-                       // both, so there's no point writing it even
-                       // on a different volume.)
-                       log.Errorf("collision in Compare(%s) on volume %s", hash, mnt.Volume)
-                       return CollisionError
-               } else if os.IsNotExist(err) {
-                       // Block does not exist. This is the only
-                       // "normal" error: we don't log anything.
-                       continue
-               } else if err != nil {
-                       // Couldn't open file, data is corrupt on
-                       // disk, etc.: log this abnormal condition,
-                       // and try the next volume.
-                       log.WithError(err).Warnf("error in Compare(%s) on volume %s", hash, mnt.Volume)
-                       continue
-               }
-               if err := mnt.Touch(hash); err != nil {
-                       log.WithError(err).Errorf("error in Touch(%s) on volume %s", hash, mnt.Volume)
-                       continue
-               }
-               // Compare and Touch both worked --> done.
-               result.Add(mnt)
-               if result.Done() {
-                       return nil
-               }
-       }
-       return nil
-}
-
-var validLocatorRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
-
-// IsValidLocator returns true if the specified string is a valid Keep
-// locator.  When Keep is extended to support hash types other than
-// MD5, this should be updated to cover those as well.
-func IsValidLocator(loc string) bool {
-       return validLocatorRe.MatchString(loc)
-}
-
-var authRe = regexp.MustCompile(`^(OAuth2|Bearer)\s+(.*)`)
-
-// GetAPIToken returns the OAuth2 token from the Authorization
-// header of a HTTP request, or an empty string if no matching
-// token is found.
-func GetAPIToken(req *http.Request) string {
-       if auth, ok := req.Header["Authorization"]; ok {
-               if match := authRe.FindStringSubmatch(auth[0]); match != nil {
-                       return match[2]
-               }
-       }
-       return ""
-}
-
-// canDelete returns true if the user identified by apiToken is
-// allowed to delete blocks.
-func (rtr *router) canDelete(apiToken string) bool {
-       if apiToken == "" {
-               return false
-       }
-       // Blocks may be deleted only when Keep has been configured with a
-       // data manager.
-       if rtr.isSystemAuth(apiToken) {
-               return true
-       }
-       // TODO(twp): look up apiToken with the API server
-       // return true if is_admin is true and if the token
-       // has unlimited scope
-       return false
-}
-
-// isSystemAuth returns true if the given token is allowed to perform
-// system level actions like deleting data.
-func (rtr *router) isSystemAuth(token string) bool {
-       return token != "" && token == rtr.cluster.SystemRootToken
-}
diff --git a/services/keepstore/hashcheckwriter.go b/services/keepstore/hashcheckwriter.go
new file mode 100644 (file)
index 0000000..f191c98
--- /dev/null
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "fmt"
+       "hash"
+       "io"
+)
+
+type hashCheckWriter struct {
+       writer       io.Writer
+       hash         hash.Hash
+       expectSize   int64
+       expectDigest string
+
+       offset int64
+}
+
+// newHashCheckWriter returns a writer that writes through to w, but
+// stops short if the written content reaches expectSize bytes and
+// does not match expectDigest according to the given hash
+// function.
+//
+// It returns a write error if more than expectSize bytes are written.
+//
+// Thus, in case of a hash mismatch, fewer than expectSize will be
+// written through.
+func newHashCheckWriter(writer io.Writer, hash hash.Hash, expectSize int64, expectDigest string) io.Writer {
+       return &hashCheckWriter{
+               writer:       writer,
+               hash:         hash,
+               expectSize:   expectSize,
+               expectDigest: expectDigest,
+       }
+}
+
+func (hcw *hashCheckWriter) Write(p []byte) (int, error) {
+       if todo := hcw.expectSize - hcw.offset - int64(len(p)); todo < 0 {
+               // Writing beyond expected size returns a checksum
+               // error without even checking the hash.
+               return 0, errChecksum
+       } else if todo > 0 {
+               // This isn't the last write, so we pass it through.
+               _, err := hcw.hash.Write(p)
+               if err != nil {
+                       return 0, err
+               }
+               n, err := hcw.writer.Write(p)
+               hcw.offset += int64(n)
+               return n, err
+       } else {
+               // This is the last write, so we check the hash before
+               // writing through.
+               _, err := hcw.hash.Write(p)
+               if err != nil {
+                       return 0, err
+               }
+               if digest := fmt.Sprintf("%x", hcw.hash.Sum(nil)); digest != hcw.expectDigest {
+                       return 0, errChecksum
+               }
+               // Ensure subsequent write will fail
+               hcw.offset = hcw.expectSize + 1
+               return hcw.writer.Write(p)
+       }
+}
index 953aa047cbfa6ab6f7b55630aa30e83732adaf0e..60d062e1e3467b113a137126296f983f969a9f01 100644 (file)
 //
 // SPDX-License-Identifier: AGPL-3.0
 
+// Package keepstore implements the keepstore service component and
+// back-end storage drivers.
+//
+// It is an internal module, only intended to be imported by
+// /cmd/arvados-server and other server-side components in this
+// repository.
 package keepstore
 
 import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "net/http"
+       "os"
+       "sort"
+       "strconv"
+       "strings"
+       "sync"
+       "sync/atomic"
        "time"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "git.arvados.org/arvados.git/sdk/go/keepclient"
+       "github.com/prometheus/client_golang/prometheus"
+       "github.com/sirupsen/logrus"
 )
 
-// BlockSize for a Keep "block" is 64MB.
-const BlockSize = 64 * 1024 * 1024
+// Maximum size of a keep block is 64 MiB.
+const BlockSize = 1 << 26
 
-// MinFreeKilobytes is the amount of space a Keep volume must have available
-// in order to permit writes.
-const MinFreeKilobytes = BlockSize / 1024
+var (
+       errChecksum          = httpserver.ErrorWithStatus(errors.New("checksum mismatch in stored data"), http.StatusBadGateway)
+       errNoTokenProvided   = httpserver.ErrorWithStatus(errors.New("no token provided in Authorization header"), http.StatusUnauthorized)
+       errMethodNotAllowed  = httpserver.ErrorWithStatus(errors.New("method not allowed"), http.StatusMethodNotAllowed)
+       errVolumeUnavailable = httpserver.ErrorWithStatus(errors.New("volume unavailable"), http.StatusServiceUnavailable)
+       errCollision         = httpserver.ErrorWithStatus(errors.New("hash collision"), http.StatusInternalServerError)
+       errExpiredSignature  = httpserver.ErrorWithStatus(errors.New("expired signature"), http.StatusUnauthorized)
+       errInvalidSignature  = httpserver.ErrorWithStatus(errors.New("invalid signature"), http.StatusBadRequest)
+       errInvalidLocator    = httpserver.ErrorWithStatus(errors.New("invalid locator"), http.StatusBadRequest)
+       errFull              = httpserver.ErrorWithStatus(errors.New("insufficient storage"), http.StatusInsufficientStorage)
+       errTooLarge          = httpserver.ErrorWithStatus(errors.New("request entity too large"), http.StatusRequestEntityTooLarge)
+       driver               = make(map[string]volumeDriver)
+)
 
-var bufs *bufferPool
+type indexOptions struct {
+       MountUUID string
+       Prefix    string
+       WriteTo   io.Writer
+}
 
-type KeepError struct {
-       HTTPCode int
-       ErrMsg   string
+type mount struct {
+       arvados.KeepMount
+       volume
+       priority int
 }
 
-var (
-       BadRequestError     = &KeepError{400, "Bad Request"}
-       UnauthorizedError   = &KeepError{401, "Unauthorized"}
-       CollisionError      = &KeepError{500, "Collision"}
-       RequestHashError    = &KeepError{422, "Hash mismatch in request"}
-       PermissionError     = &KeepError{403, "Forbidden"}
-       DiskHashError       = &KeepError{500, "Hash mismatch in stored data"}
-       ExpiredError        = &KeepError{401, "Expired permission signature"}
-       NotFoundError       = &KeepError{404, "Not Found"}
-       VolumeBusyError     = &KeepError{503, "Volume backend busy"}
-       GenericError        = &KeepError{500, "Fail"}
-       FullError           = &KeepError{503, "Full"}
-       SizeRequiredError   = &KeepError{411, "Missing Content-Length"}
-       TooLongError        = &KeepError{413, "Block is too large"}
-       MethodDisabledError = &KeepError{405, "Method disabled"}
-       ErrNotImplemented   = &KeepError{500, "Unsupported configuration"}
-       ErrClientDisconnect = &KeepError{503, "Client disconnected"}
-)
+type keepstore struct {
+       cluster    *arvados.Cluster
+       logger     logrus.FieldLogger
+       serviceURL arvados.URL
+       mounts     map[string]*mount
+       mountsR    []*mount
+       mountsW    []*mount
+       bufferPool *bufferPool
+
+       iostats map[volume]*ioStats
+
+       remoteClients    map[string]*keepclient.KeepClient
+       remoteClientsMtx sync.Mutex
+}
+
+func newKeepstore(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry, serviceURL arvados.URL) (*keepstore, error) {
+       logger := ctxlog.FromContext(ctx)
+
+       if cluster.API.MaxConcurrentRequests > 0 && cluster.API.MaxConcurrentRequests < cluster.API.MaxKeepBlobBuffers {
+               logger.Warnf("Possible configuration mistake: not useful to set API.MaxKeepBlobBuffers (%d) higher than API.MaxConcurrentRequests (%d)", cluster.API.MaxKeepBlobBuffers, cluster.API.MaxConcurrentRequests)
+       }
+
+       if cluster.Collections.BlobSigningKey != "" {
+       } else if cluster.Collections.BlobSigning {
+               return nil, errors.New("cannot enable Collections.BlobSigning with no Collections.BlobSigningKey")
+       } else {
+               logger.Warn("Running without a blob signing key. Block locators returned by this server will not be signed, and will be rejected by a server that enforces permissions. To fix this, configure Collections.BlobSigning and Collections.BlobSigningKey.")
+       }
+
+       if cluster.API.MaxKeepBlobBuffers <= 0 {
+               return nil, fmt.Errorf("API.MaxKeepBlobBuffers must be greater than zero")
+       }
+       bufferPool := newBufferPool(logger, cluster.API.MaxKeepBlobBuffers, reg)
+
+       ks := &keepstore{
+               cluster:       cluster,
+               logger:        logger,
+               serviceURL:    serviceURL,
+               bufferPool:    bufferPool,
+               remoteClients: make(map[string]*keepclient.KeepClient),
+       }
+
+       err := ks.setupMounts(newVolumeMetricsVecs(reg))
+       if err != nil {
+               return nil, err
+       }
+
+       return ks, nil
+}
+
+func (ks *keepstore) setupMounts(metrics *volumeMetricsVecs) error {
+       ks.mounts = make(map[string]*mount)
+       if len(ks.cluster.Volumes) == 0 {
+               return errors.New("no volumes configured")
+       }
+       for uuid, cfgvol := range ks.cluster.Volumes {
+               va, ok := cfgvol.AccessViaHosts[ks.serviceURL]
+               if !ok && len(cfgvol.AccessViaHosts) > 0 {
+                       continue
+               }
+               dri, ok := driver[cfgvol.Driver]
+               if !ok {
+                       return fmt.Errorf("volume %s: invalid driver %q", uuid, cfgvol.Driver)
+               }
+               vol, err := dri(newVolumeParams{
+                       UUID:         uuid,
+                       Cluster:      ks.cluster,
+                       ConfigVolume: cfgvol,
+                       Logger:       ks.logger,
+                       MetricsVecs:  metrics,
+                       BufferPool:   ks.bufferPool,
+               })
+               if err != nil {
+                       return fmt.Errorf("error initializing volume %s: %s", uuid, err)
+               }
+               sc := cfgvol.StorageClasses
+               if len(sc) == 0 {
+                       sc = map[string]bool{"default": true}
+               }
+               repl := cfgvol.Replication
+               if repl < 1 {
+                       repl = 1
+               }
+               pri := 0
+               for class, in := range cfgvol.StorageClasses {
+                       p := ks.cluster.StorageClasses[class].Priority
+                       if in && p > pri {
+                               pri = p
+                       }
+               }
+               mnt := &mount{
+                       volume:   vol,
+                       priority: pri,
+                       KeepMount: arvados.KeepMount{
+                               UUID:           uuid,
+                               DeviceID:       vol.DeviceID(),
+                               AllowWrite:     !va.ReadOnly && !cfgvol.ReadOnly,
+                               AllowTrash:     !va.ReadOnly && (!cfgvol.ReadOnly || cfgvol.AllowTrashWhenReadOnly),
+                               Replication:    repl,
+                               StorageClasses: sc,
+                       },
+               }
+               ks.mounts[uuid] = mnt
+               ks.logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol.DeviceID(), mnt.AllowWrite, mnt.AllowTrash)
+       }
+       if len(ks.mounts) == 0 {
+               return fmt.Errorf("no volumes configured for %s", ks.serviceURL)
+       }
+
+       ks.mountsR = nil
+       ks.mountsW = nil
+       for _, mnt := range ks.mounts {
+               ks.mountsR = append(ks.mountsR, mnt)
+               if mnt.AllowWrite {
+                       ks.mountsW = append(ks.mountsW, mnt)
+               }
+       }
+       // Sorting mounts by UUID makes behavior more predictable, and
+       // is convenient for testing -- for example, "index all
+       // volumes" and "trash block on all volumes" will visit
+       // volumes in predictable order.
+       sort.Slice(ks.mountsR, func(i, j int) bool { return ks.mountsR[i].UUID < ks.mountsR[j].UUID })
+       sort.Slice(ks.mountsW, func(i, j int) bool { return ks.mountsW[i].UUID < ks.mountsW[j].UUID })
+       return nil
+}
+
+// checkLocatorSignature checks that locator has a valid signature.
+// If the BlobSigning config is false, it returns nil even if the
+// signature is invalid or missing.
+func (ks *keepstore) checkLocatorSignature(ctx context.Context, locator string) error {
+       if !ks.cluster.Collections.BlobSigning {
+               return nil
+       }
+       token := ctxToken(ctx)
+       if token == "" {
+               return errNoTokenProvided
+       }
+       err := arvados.VerifySignature(locator, token, ks.cluster.Collections.BlobSigningTTL.Duration(), []byte(ks.cluster.Collections.BlobSigningKey))
+       if err == arvados.ErrSignatureExpired {
+               return errExpiredSignature
+       } else if err != nil {
+               return errInvalidSignature
+       }
+       return nil
+}
+
+// signLocator signs the locator for the given token, if possible.
+// Note this signs if the BlobSigningKey config is available, even if
+// the BlobSigning config is false.
+func (ks *keepstore) signLocator(token, locator string) string {
+       if token == "" || len(ks.cluster.Collections.BlobSigningKey) == 0 {
+               return locator
+       }
+       ttl := ks.cluster.Collections.BlobSigningTTL.Duration()
+       return arvados.SignLocator(locator, token, time.Now().Add(ttl), ttl, []byte(ks.cluster.Collections.BlobSigningKey))
+}
+
+func (ks *keepstore) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (n int, err error) {
+       li, err := getLocatorInfo(opts.Locator)
+       if err != nil {
+               return 0, err
+       }
+       out := opts.WriteTo
+       if rw, ok := out.(http.ResponseWriter); ok && li.size > 0 {
+               out = &setSizeOnWrite{ResponseWriter: rw, size: li.size}
+       }
+       if li.remote && !li.signed {
+               return ks.blockReadRemote(ctx, opts)
+       }
+       if err := ks.checkLocatorSignature(ctx, opts.Locator); err != nil {
+               return 0, err
+       }
+       hashcheck := md5.New()
+       if li.size > 0 {
+               out = newHashCheckWriter(out, hashcheck, int64(li.size), li.hash)
+       } else {
+               out = io.MultiWriter(out, hashcheck)
+       }
+
+       buf, err := ks.bufferPool.GetContext(ctx)
+       if err != nil {
+               return 0, err
+       }
+       defer ks.bufferPool.Put(buf)
+       streamer := newStreamWriterAt(out, 65536, buf)
+       defer streamer.Close()
+
+       var errToCaller error = os.ErrNotExist
+       for _, mnt := range ks.rendezvous(li.hash, ks.mountsR) {
+               if ctx.Err() != nil {
+                       return 0, ctx.Err()
+               }
+               err := mnt.BlockRead(ctx, li.hash, streamer)
+               if err != nil {
+                       if streamer.WroteAt() != 0 {
+                               // BlockRead encountered an error
+                               // after writing some data, so it's
+                               // too late to try another
+                               // volume. Flush streamer before
+                               // calling Wrote() to ensure our
+                               // return value accurately reflects
+                               // the number of bytes written to
+                               // opts.WriteTo.
+                               streamer.Close()
+                               return streamer.Wrote(), err
+                       }
+                       if !os.IsNotExist(err) {
+                               errToCaller = err
+                       }
+                       continue
+               }
+               if li.size == 0 {
+                       // hashCheckingWriter isn't in use because we
+                       // don't know the expected size. All we can do
+                       // is check after writing all the data, and
+                       // trust the caller is doing a HEAD request so
+                       // it's not too late to set an error code in
+                       // the response header.
+                       err = streamer.Close()
+                       if hash := fmt.Sprintf("%x", hashcheck.Sum(nil)); hash != li.hash && err == nil {
+                               err = errChecksum
+                       }
+                       if rw, ok := opts.WriteTo.(http.ResponseWriter); ok {
+                               // We didn't set the content-length header
+                               // above because we didn't know the block size
+                               // until now.
+                               rw.Header().Set("Content-Length", fmt.Sprintf("%d", streamer.WroteAt()))
+                       }
+                       return streamer.WroteAt(), err
+               } else if streamer.WroteAt() != li.size {
+                       // If the backend read fewer bytes than
+                       // expected but returns no error, we can
+                       // classify this as a checksum error (even
+                       // though hashCheckWriter doesn't know that
+                       // yet, it's just waiting for the next
+                       // write). If our caller is serving a GET
+                       // request it's too late to do anything about
+                       // it anyway, but if it's a HEAD request the
+                       // caller can still change the response status
+                       // code.
+                       return streamer.WroteAt(), errChecksum
+               }
+               // Ensure streamer flushes all buffered data without
+               // errors.
+               err = streamer.Close()
+               return streamer.Wrote(), err
+       }
+       return 0, errToCaller
+}
+
+func (ks *keepstore) blockReadRemote(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {
+       token := ctxToken(ctx)
+       if token == "" {
+               return 0, errNoTokenProvided
+       }
+       var remoteClient *keepclient.KeepClient
+       var parts []string
+       li, err := getLocatorInfo(opts.Locator)
+       if err != nil {
+               return 0, err
+       }
+       for i, part := range strings.Split(opts.Locator, "+") {
+               switch {
+               case i == 0:
+                       // don't try to parse hash part as hint
+               case strings.HasPrefix(part, "A"):
+                       // drop local permission hint
+                       continue
+               case len(part) > 7 && part[0] == 'R' && part[6] == '-':
+                       remoteID := part[1:6]
+                       remote, ok := ks.cluster.RemoteClusters[remoteID]
+                       if !ok {
+                               return 0, httpserver.ErrorWithStatus(errors.New("remote cluster not configured"), http.StatusBadRequest)
+                       }
+                       kc, err := ks.remoteClient(remoteID, remote, token)
+                       if err == auth.ErrObsoleteToken {
+                               return 0, httpserver.ErrorWithStatus(err, http.StatusBadRequest)
+                       } else if err != nil {
+                               return 0, err
+                       }
+                       remoteClient = kc
+                       part = "A" + part[7:]
+               }
+               parts = append(parts, part)
+       }
+       if remoteClient == nil {
+               return 0, httpserver.ErrorWithStatus(errors.New("invalid remote hint"), http.StatusBadRequest)
+       }
+       locator := strings.Join(parts, "+")
+       if opts.LocalLocator == nil {
+               // Read from remote cluster and stream response back
+               // to caller
+               if rw, ok := opts.WriteTo.(http.ResponseWriter); ok && li.size > 0 {
+                       rw.Header().Set("Content-Length", fmt.Sprintf("%d", li.size))
+               }
+               return remoteClient.BlockRead(ctx, arvados.BlockReadOptions{
+                       Locator: locator,
+                       WriteTo: opts.WriteTo,
+               })
+       }
+       // We must call LocalLocator before writing any data to
+       // opts.WriteTo, otherwise the caller can't put the local
+       // locator in a response header.  So we copy into memory,
+       // generate the local signature, then copy from memory to
+       // opts.WriteTo.
+       buf, err := ks.bufferPool.GetContext(ctx)
+       if err != nil {
+               return 0, err
+       }
+       defer ks.bufferPool.Put(buf)
+       writebuf := bytes.NewBuffer(buf[:0])
+       ks.logger.Infof("blockReadRemote(%s): remote read(%s)", opts.Locator, locator)
+       _, err = remoteClient.BlockRead(ctx, arvados.BlockReadOptions{
+               Locator: locator,
+               WriteTo: writebuf,
+       })
+       if err != nil {
+               return 0, err
+       }
+       resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+               Hash: locator,
+               Data: writebuf.Bytes(),
+       })
+       if err != nil {
+               return 0, err
+       }
+       opts.LocalLocator(resp.Locator)
+       if rw, ok := opts.WriteTo.(http.ResponseWriter); ok {
+               rw.Header().Set("Content-Length", fmt.Sprintf("%d", writebuf.Len()))
+       }
+       n, err := io.Copy(opts.WriteTo, bytes.NewReader(writebuf.Bytes()))
+       return int(n), err
+}
+
+func (ks *keepstore) remoteClient(remoteID string, remoteCluster arvados.RemoteCluster, token string) (*keepclient.KeepClient, error) {
+       ks.remoteClientsMtx.Lock()
+       kc, ok := ks.remoteClients[remoteID]
+       ks.remoteClientsMtx.Unlock()
+       if !ok {
+               c := &arvados.Client{
+                       APIHost:   remoteCluster.Host,
+                       AuthToken: "xxx",
+                       Insecure:  remoteCluster.Insecure,
+               }
+               ac, err := arvadosclient.New(c)
+               if err != nil {
+                       return nil, err
+               }
+               kc, err = keepclient.MakeKeepClient(ac)
+               if err != nil {
+                       return nil, err
+               }
+               kc.DiskCacheSize = keepclient.DiskCacheDisabled
 
-func (e *KeepError) Error() string {
-       return e.ErrMsg
+               ks.remoteClientsMtx.Lock()
+               ks.remoteClients[remoteID] = kc
+               ks.remoteClientsMtx.Unlock()
+       }
+       accopy := *kc.Arvados
+       accopy.ApiToken = token
+       kccopy := kc.Clone()
+       kccopy.Arvados = &accopy
+       token, err := auth.SaltToken(token, remoteID)
+       if err != nil {
+               return nil, err
+       }
+       kccopy.Arvados.ApiToken = token
+       return kccopy, nil
+}
+
+// BlockWrite writes a block to one or more volumes.
+func (ks *keepstore) BlockWrite(ctx context.Context, opts arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {
+       var resp arvados.BlockWriteResponse
+       var hash string
+       if opts.Data == nil {
+               buf, err := ks.bufferPool.GetContext(ctx)
+               if err != nil {
+                       return resp, err
+               }
+               defer ks.bufferPool.Put(buf)
+               w := bytes.NewBuffer(buf[:0])
+               h := md5.New()
+               limitedReader := &io.LimitedReader{R: opts.Reader, N: BlockSize}
+               n, err := io.Copy(io.MultiWriter(w, h), limitedReader)
+               if err != nil {
+                       return resp, err
+               }
+               if limitedReader.N == 0 {
+                       // Data size is either exactly BlockSize, or too big.
+                       n, err := opts.Reader.Read(make([]byte, 1))
+                       if n > 0 {
+                               return resp, httpserver.ErrorWithStatus(err, http.StatusRequestEntityTooLarge)
+                       }
+                       if err != io.EOF {
+                               return resp, err
+                       }
+               }
+               opts.Data = buf[:n]
+               if opts.DataSize != 0 && int(n) != opts.DataSize {
+                       return resp, httpserver.ErrorWithStatus(fmt.Errorf("content length %d did not match specified data size %d", n, opts.DataSize), http.StatusBadRequest)
+               }
+               hash = fmt.Sprintf("%x", h.Sum(nil))
+       } else {
+               hash = fmt.Sprintf("%x", md5.Sum(opts.Data))
+       }
+       if opts.Hash != "" && !strings.HasPrefix(opts.Hash, hash) {
+               return resp, httpserver.ErrorWithStatus(fmt.Errorf("content hash %s did not match specified locator %s", hash, opts.Hash), http.StatusBadRequest)
+       }
+       rvzmounts := ks.rendezvous(hash, ks.mountsW)
+       result := newPutProgress(opts.StorageClasses)
+       for _, mnt := range rvzmounts {
+               if !result.Want(mnt) {
+                       continue
+               }
+               cmp := &checkEqual{Expect: opts.Data}
+               if err := mnt.BlockRead(ctx, hash, cmp); err == nil {
+                       if !cmp.Equal() {
+                               return resp, errCollision
+                       }
+                       err := mnt.BlockTouch(hash)
+                       if err == nil {
+                               result.Add(mnt)
+                       }
+               }
+       }
+       var allFull atomic.Bool
+       allFull.Store(true)
+       // pending tracks what result will be if all outstanding
+       // writes succeed.
+       pending := result.Copy()
+       cond := sync.NewCond(new(sync.Mutex))
+       cond.L.Lock()
+       var wg sync.WaitGroup
+nextmnt:
+       for _, mnt := range rvzmounts {
+               for {
+                       if result.Done() || ctx.Err() != nil {
+                               break nextmnt
+                       }
+                       if !result.Want(mnt) {
+                               continue nextmnt
+                       }
+                       if pending.Want(mnt) {
+                               break
+                       }
+                       // This mount might not be needed, depending
+                       // on the outcome of pending writes. Wait for
+                       // a pending write to finish, then check
+                       // again.
+                       cond.Wait()
+               }
+               mnt := mnt
+               logger := ks.logger.WithField("mount", mnt.UUID)
+               pending.Add(mnt)
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       logger.Debug("start write")
+                       err := mnt.BlockWrite(ctx, hash, opts.Data)
+                       cond.L.Lock()
+                       defer cond.L.Unlock()
+                       defer cond.Broadcast()
+                       if err != nil {
+                               logger.Debug("write failed")
+                               pending.Sub(mnt)
+                               if err != errFull {
+                                       allFull.Store(false)
+                               }
+                       } else {
+                               result.Add(mnt)
+                               pending.Sub(mnt)
+                       }
+               }()
+       }
+       cond.L.Unlock()
+       wg.Wait()
+       if ctx.Err() != nil {
+               return resp, ctx.Err()
+       }
+       if result.Done() || result.totalReplication > 0 {
+               resp = arvados.BlockWriteResponse{
+                       Locator:        ks.signLocator(ctxToken(ctx), fmt.Sprintf("%s+%d", hash, len(opts.Data))),
+                       Replicas:       result.totalReplication,
+                       StorageClasses: result.classDone,
+               }
+               return resp, nil
+       }
+       if allFull.Load() {
+               return resp, errFull
+       }
+       return resp, errVolumeUnavailable
+}
+
+// rendezvous sorts the given mounts by descending priority, then by
+// rendezvous order for the given locator.
+func (*keepstore) rendezvous(locator string, mnts []*mount) []*mount {
+       hash := locator
+       if len(hash) > 32 {
+               hash = hash[:32]
+       }
+       // copy the provided []*mount before doing an in-place sort
+       mnts = append([]*mount(nil), mnts...)
+       weight := make(map[*mount]string)
+       for _, mnt := range mnts {
+               uuidpart := mnt.UUID
+               if len(uuidpart) == 27 {
+                       // strip zzzzz-yyyyy- prefixes
+                       uuidpart = uuidpart[12:]
+               }
+               weight[mnt] = fmt.Sprintf("%x", md5.Sum([]byte(hash+uuidpart)))
+       }
+       sort.Slice(mnts, func(i, j int) bool {
+               if p := mnts[i].priority - mnts[j].priority; p != 0 {
+                       return p > 0
+               }
+               return weight[mnts[i]] < weight[mnts[j]]
+       })
+       return mnts
+}
+
+// checkEqual reports whether the data written to it (via io.WriterAt
+// interface) is equal to the expected data.
+//
+// Expect should not be changed after the first Write.
+//
+// Results are undefined if WriteAt is called with overlapping ranges.
+type checkEqual struct {
+       Expect   []byte
+       equal    atomic.Int64
+       notequal atomic.Bool
+}
+
+func (ce *checkEqual) Equal() bool {
+       return !ce.notequal.Load() && ce.equal.Load() == int64(len(ce.Expect))
+}
+
+func (ce *checkEqual) WriteAt(p []byte, offset int64) (int, error) {
+       endpos := int(offset) + len(p)
+       if offset >= 0 && endpos <= len(ce.Expect) && bytes.Equal(p, ce.Expect[int(offset):endpos]) {
+               ce.equal.Add(int64(len(p)))
+       } else {
+               ce.notequal.Store(true)
+       }
+       return len(p), nil
 }
 
-// Periodically (once per interval) invoke EmptyTrash on all volumes.
-func emptyTrash(mounts []*VolumeMount, interval time.Duration) {
-       for range time.NewTicker(interval).C {
-               for _, v := range mounts {
-                       if v.KeepMount.AllowTrash {
-                               v.EmptyTrash()
+func (ks *keepstore) BlockUntrash(ctx context.Context, locator string) error {
+       li, err := getLocatorInfo(locator)
+       if err != nil {
+               return err
+       }
+       var errToCaller error = os.ErrNotExist
+       for _, mnt := range ks.mountsW {
+               if ctx.Err() != nil {
+                       return ctx.Err()
+               }
+               err := mnt.BlockUntrash(li.hash)
+               if err == nil {
+                       errToCaller = nil
+               } else if !os.IsNotExist(err) && errToCaller != nil {
+                       errToCaller = err
+               }
+       }
+       return errToCaller
+}
+
+func (ks *keepstore) BlockTouch(ctx context.Context, locator string) error {
+       li, err := getLocatorInfo(locator)
+       if err != nil {
+               return err
+       }
+       var errToCaller error = os.ErrNotExist
+       for _, mnt := range ks.mountsW {
+               if ctx.Err() != nil {
+                       return ctx.Err()
+               }
+               err := mnt.BlockTouch(li.hash)
+               if err == nil {
+                       return nil
+               }
+               if !os.IsNotExist(err) {
+                       errToCaller = err
+               }
+       }
+       return errToCaller
+}
+
+func (ks *keepstore) BlockTrash(ctx context.Context, locator string) error {
+       if !ks.cluster.Collections.BlobTrash {
+               return errMethodNotAllowed
+       }
+       li, err := getLocatorInfo(locator)
+       if err != nil {
+               return err
+       }
+       var errToCaller error = os.ErrNotExist
+       for _, mnt := range ks.mounts {
+               if !mnt.AllowTrash {
+                       continue
+               }
+               if ctx.Err() != nil {
+                       return ctx.Err()
+               }
+               t, err := mnt.Mtime(li.hash)
+               if err == nil && time.Now().Sub(t) > ks.cluster.Collections.BlobSigningTTL.Duration() {
+                       err = mnt.BlockTrash(li.hash)
+               }
+               if os.IsNotExist(errToCaller) || (errToCaller == nil && !os.IsNotExist(err)) {
+                       errToCaller = err
+               }
+       }
+       return errToCaller
+}
+
+func (ks *keepstore) Mounts() []*mount {
+       return ks.mountsR
+}
+
+func (ks *keepstore) Index(ctx context.Context, opts indexOptions) error {
+       mounts := ks.mountsR
+       if opts.MountUUID != "" {
+               mnt, ok := ks.mounts[opts.MountUUID]
+               if !ok {
+                       return os.ErrNotExist
+               }
+               mounts = []*mount{mnt}
+       }
+       for _, mnt := range mounts {
+               err := mnt.Index(ctx, opts.Prefix, opts.WriteTo)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func ctxToken(ctx context.Context) string {
+       if c, ok := auth.FromContext(ctx); ok && len(c.Tokens) > 0 {
+               return c.Tokens[0]
+       } else {
+               return ""
+       }
+}
+
+// locatorInfo expresses the attributes of a locator that are relevant
+// for keepstore decision-making.
+type locatorInfo struct {
+       hash   string
+       size   int
+       remote bool // locator has a +R hint
+       signed bool // locator has a +A hint
+}
+
+func getLocatorInfo(loc string) (locatorInfo, error) {
+       var li locatorInfo
+       plus := 0    // number of '+' chars seen so far
+       partlen := 0 // chars since last '+'
+       for i, c := range loc + "+" {
+               if c == '+' {
+                       if partlen == 0 {
+                               // double/leading/trailing '+'
+                               return li, errInvalidLocator
+                       }
+                       if plus == 0 {
+                               if i != 32 {
+                                       return li, errInvalidLocator
+                               }
+                               li.hash = loc[:i]
+                       }
+                       if plus == 1 {
+                               if size, err := strconv.Atoi(loc[i-partlen : i]); err == nil {
+                                       li.size = size
+                               }
                        }
+                       plus++
+                       partlen = 0
+                       continue
+               }
+               partlen++
+               if partlen == 1 {
+                       if c == 'A' {
+                               li.signed = true
+                       }
+                       if c == 'R' {
+                               li.remote = true
+                       }
+                       if plus > 1 && c >= '0' && c <= '9' {
+                               // size, if present at all, must come first
+                               return li, errInvalidLocator
+                       }
+               }
+               if plus == 0 && !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f')) {
+                       // non-hexadecimal char in hash part
+                       return li, errInvalidLocator
                }
        }
+       return li, nil
 }
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
new file mode 100644 (file)
index 0000000..f9d9888
--- /dev/null
@@ -0,0 +1,892 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "net/http"
+       "os"
+       "sort"
+       "strings"
+       "sync"
+       "testing"
+       "time"
+
+       "git.arvados.org/arvados.git/lib/config"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "github.com/prometheus/client_golang/prometheus"
+       . "gopkg.in/check.v1"
+)
+
+func TestGocheck(t *testing.T) {
+       TestingT(t)
+}
+
+const (
+       fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+       barHash = "37b51d194a7513e45b56f6524f2d51f2"
+)
+
+var testServiceURL = func() arvados.URL {
+       return arvados.URL{Host: "localhost:12345", Scheme: "http"}
+}()
+
+func authContext(token string) context.Context {
+       return auth.NewContext(context.TODO(), &auth.Credentials{Tokens: []string{token}})
+}
+
+func testCluster(t TB) *arvados.Cluster {
+       cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
+       if err != nil {
+               t.Fatal(err)
+       }
+       cluster, err := cfg.GetCluster("")
+       if err != nil {
+               t.Fatal(err)
+       }
+       cluster.SystemRootToken = arvadostest.SystemRootToken
+       cluster.ManagementToken = arvadostest.ManagementToken
+       return cluster
+}
+
+func testKeepstore(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*keepstore, context.CancelFunc) {
+       if reg == nil {
+               reg = prometheus.NewRegistry()
+       }
+       ctx, cancel := context.WithCancel(context.Background())
+       ctx = ctxlog.Context(ctx, ctxlog.TestLogger(t))
+       ks, err := newKeepstore(ctx, cluster, cluster.SystemRootToken, reg, testServiceURL)
+       if err != nil {
+               t.Fatal(err)
+       }
+       return ks, cancel
+}
+
+var _ = Suite(&keepstoreSuite{})
+
+type keepstoreSuite struct {
+       cluster *arvados.Cluster
+}
+
+func (s *keepstoreSuite) SetUpTest(c *C) {
+       s.cluster = testCluster(c)
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+       }
+}
+
+func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+
+       ctx := authContext(arvadostest.ActiveTokenV2)
+
+       fooHash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+       err := ks.mountsW[0].BlockWrite(ctx, fooHash, []byte("bar"))
+       c.Assert(err, IsNil)
+
+       _, err = ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+               Hash: fooHash,
+               Data: []byte("foo"),
+       })
+       c.Check(err, ErrorMatches, "hash collision")
+
+       buf := bytes.NewBuffer(nil)
+       _, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+               Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
+               WriteTo: buf,
+       })
+       c.Check(err, ErrorMatches, "checksum mismatch in stored data")
+       c.Check(buf.String(), Not(Equals), "foo")
+       c.Check(buf.Len() < 3, Equals, true)
+
+       err = ks.mountsW[1].BlockWrite(ctx, fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+
+       buf = bytes.NewBuffer(nil)
+       _, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+               Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
+               WriteTo: buf,
+       })
+       c.Check(err, ErrorMatches, "checksum mismatch in stored data")
+       c.Check(buf.Len() < 3, Equals, true)
+}
+
+func (s *keepstoreSuite) TestBlockReadWrite_SigningDisabled(c *C) {
+       origKey := s.cluster.Collections.BlobSigningKey
+       s.cluster.Collections.BlobSigning = false
+       s.cluster.Collections.BlobSigningKey = ""
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+
+       resp, err := ks.BlockWrite(authContext("abcde"), arvados.BlockWriteOptions{
+               Hash: fooHash,
+               Data: []byte("foo"),
+       })
+       c.Assert(err, IsNil)
+       c.Check(resp.Locator, Equals, fooHash+"+3")
+       locUnsigned := resp.Locator
+       ttl := time.Hour
+       locSigned := arvados.SignLocator(locUnsigned, arvadostest.ActiveTokenV2, time.Now().Add(ttl), ttl, []byte(origKey))
+       c.Assert(locSigned, Not(Equals), locUnsigned)
+
+       for _, locator := range []string{locUnsigned, locSigned} {
+               for _, token := range []string{"", "xyzzy", arvadostest.ActiveTokenV2} {
+                       c.Logf("=== locator %q token %q", locator, token)
+                       ctx := authContext(token)
+                       buf := bytes.NewBuffer(nil)
+                       _, err := ks.BlockRead(ctx, arvados.BlockReadOptions{
+                               Locator: locator,
+                               WriteTo: buf,
+                       })
+                       c.Check(err, IsNil)
+                       c.Check(buf.String(), Equals, "foo")
+               }
+       }
+}
+
+func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-111111111111111": {
+                       Driver:         "stub",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class1": true}},
+               "zzzzz-nyw5e-222222222222222": {
+                       Driver:         "stub",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class2": true, "class3": true}},
+       }
+
+       // "foobar" is just some data that happens to result in
+       // rendezvous order {111, 222}
+       data := []byte("foobar")
+       hash := fmt.Sprintf("%x", md5.Sum(data))
+
+       for _, trial := range []struct {
+               priority1 int // priority of class1, thus vol1
+               priority2 int // priority of class2
+               priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
+               expectLog string
+       }{
+               {100, 50, 50, "111 read 385\n"},              // class1 has higher priority => try vol1 first, no need to try vol2
+               {100, 100, 100, "111 read 385\n"},            // same priority, vol2 is first in rendezvous order => try vol1 first and succeed
+               {66, 99, 33, "222 read 385\n111 read 385\n"}, // class2 has higher priority => try vol2 first, then try vol1
+               {66, 33, 99, "222 read 385\n111 read 385\n"}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
+       } {
+               c.Logf("=== %+v", trial)
+
+               s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+                       "class1": {Priority: trial.priority1},
+                       "class2": {Priority: trial.priority2},
+                       "class3": {Priority: trial.priority3},
+               }
+               ks, cancel := testKeepstore(c, s.cluster, nil)
+               defer cancel()
+
+               ctx := authContext(arvadostest.ActiveTokenV2)
+               resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+                       Hash:           hash,
+                       Data:           data,
+                       StorageClasses: []string{"class1"},
+               })
+               c.Assert(err, IsNil)
+
+               // Combine logs into one. (We only want the logs from
+               // the BlockRead below, not from BlockWrite above.)
+               stubLog := &stubLog{}
+               for _, mnt := range ks.mounts {
+                       mnt.volume.(*stubVolume).stubLog = stubLog
+               }
+
+               n, err := ks.BlockRead(ctx, arvados.BlockReadOptions{
+                       Locator: resp.Locator,
+                       WriteTo: io.Discard,
+               })
+               c.Assert(n, Equals, len(data))
+               c.Assert(err, IsNil)
+               c.Check(stubLog.String(), Equals, trial.expectLog)
+       }
+}
+
+func (s *keepstoreSuite) TestBlockWrite_NoWritableVolumes(c *C) {
+       for uuid, v := range s.cluster.Volumes {
+               v.ReadOnly = true
+               s.cluster.Volumes[uuid] = v
+       }
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+       for _, mnt := range ks.mounts {
+               mnt.volume.(*stubVolume).blockWrite = func(context.Context, string, []byte) error {
+                       c.Error("volume BlockWrite called")
+                       return errors.New("fail")
+               }
+       }
+       ctx := authContext(arvadostest.ActiveTokenV2)
+
+       _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+               Hash: fooHash,
+               Data: []byte("foo")})
+       c.Check(err, NotNil)
+       c.Check(err.(interface{ HTTPStatus() int }).HTTPStatus(), Equals, http.StatusInsufficientStorage)
+}
+
+func (s *keepstoreSuite) TestBlockWrite_MultipleStorageClasses(c *C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-111111111111111": {
+                       Driver:         "stub",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class1": true}},
+               "zzzzz-nyw5e-121212121212121": {
+                       Driver:         "stub",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class1": true, "class2": true}},
+               "zzzzz-nyw5e-222222222222222": {
+                       Driver:         "stub",
+                       Replication:    1,
+                       StorageClasses: map[string]bool{"class2": true}},
+       }
+
+       // testData is a block that happens to have rendezvous order 111, 121, 222
+       testData := []byte("qux")
+       testHash := fmt.Sprintf("%x+%d", md5.Sum(testData), len(testData))
+
+       s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+               "class1": {},
+               "class2": {},
+               "class3": {},
+       }
+
+       ctx := authContext(arvadostest.ActiveTokenV2)
+       for idx, trial := range []struct {
+               classes   string // desired classes
+               expectLog string
+       }{
+               {"class1", "" +
+                       "111 read d85\n" +
+                       "121 read d85\n" +
+                       "111 write d85\n" +
+                       "111 read d85\n" +
+                       "111 touch d85\n"},
+               {"class2", "" +
+                       "121 read d85\n" + // write#1
+                       "222 read d85\n" +
+                       "121 write d85\n" +
+                       "121 read d85\n" + // write#2
+                       "121 touch d85\n"},
+               {"class1,class2", "" +
+                       "111 read d85\n" + // write#1
+                       "121 read d85\n" +
+                       "222 read d85\n" +
+                       "121 write d85\n" +
+                       "111 write d85\n" +
+                       "111 read d85\n" + // write#2
+                       "111 touch d85\n" +
+                       "121 read d85\n" +
+                       "121 touch d85\n"},
+               {"class1,class2,class404", "" +
+                       "111 read d85\n" + // write#1
+                       "121 read d85\n" +
+                       "222 read d85\n" +
+                       "121 write d85\n" +
+                       "111 write d85\n" +
+                       "111 read d85\n" + // write#2
+                       "111 touch d85\n" +
+                       "121 read d85\n" +
+                       "121 touch d85\n"},
+       } {
+               c.Logf("=== %d: %+v", idx, trial)
+
+               ks, cancel := testKeepstore(c, s.cluster, nil)
+               defer cancel()
+               stubLog := &stubLog{}
+               for _, mnt := range ks.mounts {
+                       mnt.volume.(*stubVolume).stubLog = stubLog
+               }
+
+               // Check that we chose the right block data
+               rvz := ks.rendezvous(testHash, ks.mountsW)
+               c.Assert(rvz[0].UUID[24:], Equals, "111")
+               c.Assert(rvz[1].UUID[24:], Equals, "121")
+               c.Assert(rvz[2].UUID[24:], Equals, "222")
+
+               for i := 0; i < 2; i++ {
+                       _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+                               Hash:           testHash,
+                               Data:           testData,
+                               StorageClasses: strings.Split(trial.classes, ","),
+                       })
+                       c.Check(err, IsNil)
+               }
+               c.Check(stubLog.String(), Equals, trial.expectLog)
+       }
+}
+
+func (s *keepstoreSuite) TestBlockTrash(c *C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+               "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true},
+               "zzzzz-nyw5e-333333333333333": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+       }
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+
+       var vol []*stubVolume
+       for _, mount := range ks.mountsR {
+               vol = append(vol, mount.volume.(*stubVolume))
+       }
+       sort.Slice(vol, func(i, j int) bool {
+               return vol[i].params.UUID < vol[j].params.UUID
+       })
+
+       ctx := context.Background()
+       loc := fooHash + "+3"
+       tOld := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Second)
+
+       clear := func() {
+               for _, vol := range vol {
+                       err := vol.BlockTrash(fooHash)
+                       if !os.IsNotExist(err) {
+                               c.Assert(err, IsNil)
+                       }
+               }
+       }
+       writeit := func(volidx int) {
+               err := vol[volidx].BlockWrite(ctx, fooHash, []byte("foo"))
+               c.Assert(err, IsNil)
+               err = vol[volidx].blockTouchWithTime(fooHash, tOld)
+               c.Assert(err, IsNil)
+       }
+       trashit := func() error {
+               return ks.BlockTrash(ctx, loc)
+       }
+       checkexists := func(volidx int) bool {
+               err := vol[volidx].BlockRead(ctx, fooHash, brdiscard)
+               if !os.IsNotExist(err) {
+                       c.Check(err, IsNil)
+               }
+               return err == nil
+       }
+
+       clear()
+       c.Check(trashit(), Equals, os.ErrNotExist)
+
+       // one old replica => trash it
+       clear()
+       writeit(0)
+       c.Check(trashit(), IsNil)
+       c.Check(checkexists(0), Equals, false)
+
+       // one old replica + one new replica => keep new, trash old
+       clear()
+       writeit(0)
+       writeit(1)
+       c.Check(vol[1].blockTouchWithTime(fooHash, time.Now()), IsNil)
+       c.Check(trashit(), IsNil)
+       c.Check(checkexists(0), Equals, false)
+       c.Check(checkexists(1), Equals, true)
+
+       // two old replicas => trash both
+       clear()
+       writeit(0)
+       writeit(1)
+       c.Check(trashit(), IsNil)
+       c.Check(checkexists(0), Equals, false)
+       c.Check(checkexists(1), Equals, false)
+
+       // four old replicas => trash all except readonly volume with
+       // AllowTrashWhenReadOnly==false
+       clear()
+       writeit(0)
+       writeit(1)
+       writeit(2)
+       writeit(3)
+       c.Check(trashit(), IsNil)
+       c.Check(checkexists(0), Equals, false)
+       c.Check(checkexists(1), Equals, false)
+       c.Check(checkexists(2), Equals, true)
+       c.Check(checkexists(3), Equals, false)
+
+       // two old replicas but one returns an error => return the
+       // only non-404 backend error
+       clear()
+       vol[0].blockTrash = func(hash string) error {
+               return errors.New("fake error")
+       }
+       writeit(0)
+       writeit(3)
+       c.Check(trashit(), ErrorMatches, "fake error")
+       c.Check(checkexists(0), Equals, true)
+       c.Check(checkexists(1), Equals, false)
+       c.Check(checkexists(2), Equals, false)
+       c.Check(checkexists(3), Equals, false)
+}
+
+func (s *keepstoreSuite) TestBlockWrite_OnlyOneBuffer(c *C) {
+       s.cluster.API.MaxKeepBlobBuffers = 1
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+       ok := make(chan struct{})
+       go func() {
+               defer close(ok)
+               ctx := authContext(arvadostest.ActiveTokenV2)
+               _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+                       Hash: fooHash,
+                       Data: []byte("foo")})
+               c.Check(err, IsNil)
+       }()
+       select {
+       case <-ok:
+       case <-time.After(time.Second):
+               c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
+       }
+}
+
+func (s *keepstoreSuite) TestBufferPoolLeak(c *C) {
+       s.cluster.API.MaxKeepBlobBuffers = 4
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+
+       ctx := authContext(arvadostest.ActiveTokenV2)
+       var wg sync.WaitGroup
+       for range make([]int, 20) {
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+                               Hash: fooHash,
+                               Data: []byte("foo")})
+                       c.Check(err, IsNil)
+                       _, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+                               Locator: resp.Locator,
+                               WriteTo: io.Discard})
+                       c.Check(err, IsNil)
+               }()
+       }
+       ok := make(chan struct{})
+       go func() {
+               wg.Wait()
+               close(ok)
+       }()
+       select {
+       case <-ok:
+       case <-time.After(time.Second):
+               c.Fatal("read/write sequence deadlocks, likely buffer pool leak")
+       }
+}
+
+func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"}, // "default" is implicit
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"special": true, "extra": true}},
+               "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
+       }
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+       ctx := authContext(arvadostest.ActiveTokenV2)
+
+       for _, trial := range []struct {
+               ask            []string
+               expectReplicas int
+               expectClasses  map[string]int
+       }{
+               {nil,
+                       1,
+                       map[string]int{"default": 1}},
+               {[]string{},
+                       1,
+                       map[string]int{"default": 1}},
+               {[]string{"default"},
+                       1,
+                       map[string]int{"default": 1}},
+               {[]string{"default", "default"},
+                       1,
+                       map[string]int{"default": 1}},
+               {[]string{"special"},
+                       1,
+                       map[string]int{"extra": 1, "special": 1}},
+               {[]string{"special", "readonly"},
+                       1,
+                       map[string]int{"extra": 1, "special": 1}},
+               {[]string{"special", "nonexistent"},
+                       1,
+                       map[string]int{"extra": 1, "special": 1}},
+               {[]string{"extra", "special"},
+                       1,
+                       map[string]int{"extra": 1, "special": 1}},
+               {[]string{"default", "special"},
+                       2,
+                       map[string]int{"default": 1, "extra": 1, "special": 1}},
+       } {
+               c.Logf("success case %#v", trial)
+               resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+                       Hash:           fooHash,
+                       Data:           []byte("foo"),
+                       StorageClasses: trial.ask,
+               })
+               if !c.Check(err, IsNil) {
+                       continue
+               }
+               c.Check(resp.Replicas, Equals, trial.expectReplicas)
+               if len(trial.expectClasses) == 0 {
+                       // any non-empty value is correct
+                       c.Check(resp.StorageClasses, Not(HasLen), 0)
+               } else {
+                       c.Check(resp.StorageClasses, DeepEquals, trial.expectClasses)
+               }
+       }
+
+       for _, ask := range [][]string{
+               {"doesnotexist"},
+               {"doesnotexist", "readonly"},
+               {"readonly"},
+       } {
+               c.Logf("failure case %s", ask)
+               _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+                       Hash:           fooHash,
+                       Data:           []byte("foo"),
+                       StorageClasses: ask,
+               })
+               c.Check(err, NotNil)
+       }
+}
+
+func (s *keepstoreSuite) TestUntrashHandlerWithNoWritableVolumes(c *C) {
+       for uuid, v := range s.cluster.Volumes {
+               v.ReadOnly = true
+               s.cluster.Volumes[uuid] = v
+       }
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+
+       for _, mnt := range ks.mounts {
+               err := mnt.BlockWrite(context.Background(), fooHash, []byte("foo"))
+               c.Assert(err, IsNil)
+               err = mnt.BlockRead(context.Background(), fooHash, brdiscard)
+               c.Assert(err, IsNil)
+       }
+
+       err := ks.BlockUntrash(context.Background(), fooHash)
+       c.Check(os.IsNotExist(err), Equals, true)
+
+       for _, mnt := range ks.mounts {
+               err := mnt.BlockRead(context.Background(), fooHash, brdiscard)
+               c.Assert(err, IsNil)
+       }
+}
+
+func (s *keepstoreSuite) TestBlockWrite_SkipReadOnly(c *C) {
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", ReadOnly: true},
+               "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+       }
+       ks, cancel := testKeepstore(c, s.cluster, nil)
+       defer cancel()
+       ctx := authContext(arvadostest.ActiveTokenV2)
+
+       for i := range make([]byte, 32) {
+               data := []byte(fmt.Sprintf("block %d", i))
+               _, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{Data: data})
+               c.Assert(err, IsNil)
+       }
+       c.Check(ks.mounts["zzzzz-nyw5e-000000000000000"].volume.(*stubVolume).stubLog.String(), Matches, "(?ms).*write.*")
+       c.Check(ks.mounts["zzzzz-nyw5e-111111111111111"].volume.(*stubVolume).stubLog.String(), HasLen, 0)
+       c.Check(ks.mounts["zzzzz-nyw5e-222222222222222"].volume.(*stubVolume).stubLog.String(), HasLen, 0)
+}
+
+func (s *keepstoreSuite) TestGetLocatorInfo(c *C) {
+       for _, trial := range []struct {
+               locator string
+               ok      bool
+               expect  locatorInfo
+       }{
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+                       ok: true},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234",
+                       ok: true, expect: locatorInfo{size: 1234}},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234+Abcdef@abcdef",
+                       ok: true, expect: locatorInfo{size: 1234, signed: true}},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234+Rzzzzz-abcdef",
+                       ok: true, expect: locatorInfo{size: 1234, remote: true}},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345+Zexample+Rzzzzz-abcdef",
+                       ok: true, expect: locatorInfo{size: 12345, remote: true}},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+123456+👶🦈+Rzzzzz-abcdef",
+                       ok: true, expect: locatorInfo{size: 123456, remote: true}},
+               // invalid: bad hash char
+               {locator: "aaaaaaaaaaaaaazaaaaaaaaaaaaaaaaa+1234",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaFaaaaaaaaaaaaaaaaa+1234",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaa⛵aaaaaaaaaaaaaaaaa+1234",
+                       ok: false},
+               // invalid: hash length != 32
+               {locator: "",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabb",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabb+1234",
+                       ok: false},
+               // invalid: first hint is not size
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+Abcdef+1234",
+                       ok: false},
+               // invalid: leading/trailing/double +
+               {locator: "+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234+",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa++1234",
+                       ok: false},
+               {locator: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234++Abcdef@abcdef",
+                       ok: false},
+       } {
+               c.Logf("=== %s", trial.locator)
+               li, err := getLocatorInfo(trial.locator)
+               if !trial.ok {
+                       c.Check(err, NotNil)
+                       continue
+               }
+               c.Check(err, IsNil)
+               c.Check(li.hash, Equals, trial.locator[:32])
+               c.Check(li.size, Equals, trial.expect.size)
+               c.Check(li.signed, Equals, trial.expect.signed)
+               c.Check(li.remote, Equals, trial.expect.remote)
+       }
+}
+
+func init() {
+       driver["stub"] = func(params newVolumeParams) (volume, error) {
+               v := &stubVolume{
+                       params:  params,
+                       data:    make(map[string]stubData),
+                       stubLog: &stubLog{},
+               }
+               return v, nil
+       }
+}
+
+type stubLog struct {
+       sync.Mutex
+       bytes.Buffer
+}
+
+func (sl *stubLog) Printf(format string, args ...interface{}) {
+       if sl == nil {
+               return
+       }
+       sl.Lock()
+       defer sl.Unlock()
+       fmt.Fprintf(sl, format+"\n", args...)
+}
+
+type stubData struct {
+       mtime time.Time
+       data  []byte
+       trash time.Time
+}
+
+type stubVolume struct {
+       params  newVolumeParams
+       data    map[string]stubData
+       stubLog *stubLog
+       mtx     sync.Mutex
+
+       // The following funcs enable tests to insert delays and
+       // failures. Each volume operation begins by calling the
+       // corresponding func (if non-nil). If the func returns an
+       // error, that error is returned to caller. Otherwise, the
+       // stub continues normally.
+       blockRead    func(ctx context.Context, hash string, writeTo io.WriterAt) error
+       blockWrite   func(ctx context.Context, hash string, data []byte) error
+       deviceID     func() string
+       blockTouch   func(hash string) error
+       blockTrash   func(hash string) error
+       blockUntrash func(hash string) error
+       index        func(ctx context.Context, prefix string, writeTo io.Writer) error
+       mtime        func(hash string) (time.Time, error)
+       emptyTrash   func()
+}
+
+func (v *stubVolume) log(op, hash string) {
+       // Note this intentionally crashes if UUID or hash is short --
+       // if keepstore ever does that, tests should fail.
+       v.stubLog.Printf("%s %s %s", v.params.UUID[24:27], op, hash[:3])
+}
+
+func (v *stubVolume) BlockRead(ctx context.Context, hash string, writeTo io.WriterAt) error {
+       v.log("read", hash)
+       if v.blockRead != nil {
+               err := v.blockRead(ctx, hash, writeTo)
+               if err != nil {
+                       return err
+               }
+       }
+       v.mtx.Lock()
+       ent, ok := v.data[hash]
+       v.mtx.Unlock()
+       if !ok || !ent.trash.IsZero() {
+               return os.ErrNotExist
+       }
+       wrote := 0
+       for writesize := 1000; wrote < len(ent.data); writesize = writesize * 2 {
+               data := ent.data[wrote:]
+               if len(data) > writesize {
+                       data = data[:writesize]
+               }
+               n, err := writeTo.WriteAt(data, int64(wrote))
+               wrote += n
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func (v *stubVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+       v.log("write", hash)
+       if v.blockWrite != nil {
+               if err := v.blockWrite(ctx, hash, data); err != nil {
+                       return err
+               }
+       }
+       v.mtx.Lock()
+       defer v.mtx.Unlock()
+       v.data[hash] = stubData{
+               mtime: time.Now(),
+               data:  append([]byte(nil), data...),
+       }
+       return nil
+}
+
+func (v *stubVolume) DeviceID() string {
+       return fmt.Sprintf("%p", v)
+}
+
+func (v *stubVolume) BlockTouch(hash string) error {
+       v.log("touch", hash)
+       if v.blockTouch != nil {
+               if err := v.blockTouch(hash); err != nil {
+                       return err
+               }
+       }
+       v.mtx.Lock()
+       defer v.mtx.Unlock()
+       ent, ok := v.data[hash]
+       if !ok || !ent.trash.IsZero() {
+               return os.ErrNotExist
+       }
+       ent.mtime = time.Now()
+       v.data[hash] = ent
+       return nil
+}
+
+// Set mtime to the (presumably old) specified time.
+func (v *stubVolume) blockTouchWithTime(hash string, t time.Time) error {
+       v.log("touchwithtime", hash)
+       v.mtx.Lock()
+       defer v.mtx.Unlock()
+       ent, ok := v.data[hash]
+       if !ok {
+               return os.ErrNotExist
+       }
+       ent.mtime = t
+       v.data[hash] = ent
+       return nil
+}
+
+func (v *stubVolume) BlockTrash(hash string) error {
+       v.log("trash", hash)
+       if v.blockTrash != nil {
+               if err := v.blockTrash(hash); err != nil {
+                       return err
+               }
+       }
+       v.mtx.Lock()
+       defer v.mtx.Unlock()
+       ent, ok := v.data[hash]
+       if !ok || !ent.trash.IsZero() {
+               return os.ErrNotExist
+       }
+       ent.trash = time.Now().Add(v.params.Cluster.Collections.BlobTrashLifetime.Duration())
+       v.data[hash] = ent
+       return nil
+}
+
+func (v *stubVolume) BlockUntrash(hash string) error {
+       v.log("untrash", hash)
+       if v.blockUntrash != nil {
+               if err := v.blockUntrash(hash); err != nil {
+                       return err
+               }
+       }
+       v.mtx.Lock()
+       defer v.mtx.Unlock()
+       ent, ok := v.data[hash]
+       if !ok || ent.trash.IsZero() {
+               return os.ErrNotExist
+       }
+       ent.trash = time.Time{}
+       v.data[hash] = ent
+       return nil
+}
+
+func (v *stubVolume) Index(ctx context.Context, prefix string, writeTo io.Writer) error {
+       v.stubLog.Printf("%s index %s", v.params.UUID, prefix)
+       if v.index != nil {
+               if err := v.index(ctx, prefix, writeTo); err != nil {
+                       return err
+               }
+       }
+       buf := &bytes.Buffer{}
+       v.mtx.Lock()
+       for hash, ent := range v.data {
+               if ent.trash.IsZero() && strings.HasPrefix(hash, prefix) {
+                       fmt.Fprintf(buf, "%s+%d %d\n", hash, len(ent.data), ent.mtime.UnixNano())
+               }
+       }
+       v.mtx.Unlock()
+       _, err := io.Copy(writeTo, buf)
+       return err
+}
+
+func (v *stubVolume) Mtime(hash string) (time.Time, error) {
+       v.log("mtime", hash)
+       if v.mtime != nil {
+               if t, err := v.mtime(hash); err != nil {
+                       return t, err
+               }
+       }
+       v.mtx.Lock()
+       defer v.mtx.Unlock()
+       ent, ok := v.data[hash]
+       if !ok || !ent.trash.IsZero() {
+               return time.Time{}, os.ErrNotExist
+       }
+       return ent.mtime, nil
+}
+
+func (v *stubVolume) EmptyTrash() {
+       v.stubLog.Printf("%s emptytrash", v.params.UUID)
+       v.mtx.Lock()
+       defer v.mtx.Unlock()
+       for hash, ent := range v.data {
+               if !ent.trash.IsZero() && time.Now().After(ent.trash) {
+                       delete(v.data, hash)
+               }
+       }
+}
index d04601fbec84128ff47cf65ea15588aa6212b9c5..4638de544482e18721a6eb9b714f22fdc17a9dba 100644 (file)
@@ -5,66 +5,9 @@
 package keepstore
 
 import (
-       "fmt"
-
        "github.com/prometheus/client_golang/prometheus"
 )
 
-type nodeMetrics struct {
-       reg *prometheus.Registry
-}
-
-func (m *nodeMetrics) setupBufferPoolMetrics(b *bufferPool) {
-       m.reg.MustRegister(prometheus.NewGaugeFunc(
-               prometheus.GaugeOpts{
-                       Namespace: "arvados",
-                       Subsystem: "keepstore",
-                       Name:      "bufferpool_allocated_bytes",
-                       Help:      "Number of bytes allocated to buffers",
-               },
-               func() float64 { return float64(b.Alloc()) },
-       ))
-       m.reg.MustRegister(prometheus.NewGaugeFunc(
-               prometheus.GaugeOpts{
-                       Namespace: "arvados",
-                       Subsystem: "keepstore",
-                       Name:      "bufferpool_max_buffers",
-                       Help:      "Maximum number of buffers allowed",
-               },
-               func() float64 { return float64(b.Cap()) },
-       ))
-       m.reg.MustRegister(prometheus.NewGaugeFunc(
-               prometheus.GaugeOpts{
-                       Namespace: "arvados",
-                       Subsystem: "keepstore",
-                       Name:      "bufferpool_inuse_buffers",
-                       Help:      "Number of buffers in use",
-               },
-               func() float64 { return float64(b.Len()) },
-       ))
-}
-
-func (m *nodeMetrics) setupWorkQueueMetrics(q *WorkQueue, qName string) {
-       m.reg.MustRegister(prometheus.NewGaugeFunc(
-               prometheus.GaugeOpts{
-                       Namespace: "arvados",
-                       Subsystem: "keepstore",
-                       Name:      fmt.Sprintf("%s_queue_inprogress_entries", qName),
-                       Help:      fmt.Sprintf("Number of %s requests in progress", qName),
-               },
-               func() float64 { return float64(getWorkQueueStatus(q).InProgress) },
-       ))
-       m.reg.MustRegister(prometheus.NewGaugeFunc(
-               prometheus.GaugeOpts{
-                       Namespace: "arvados",
-                       Subsystem: "keepstore",
-                       Name:      fmt.Sprintf("%s_queue_pending_entries", qName),
-                       Help:      fmt.Sprintf("Number of queued %s requests", qName),
-               },
-               func() float64 { return float64(getWorkQueueStatus(q).Queued) },
-       ))
-}
-
 type volumeMetricsVecs struct {
        ioBytes     *prometheus.CounterVec
        errCounters *prometheus.CounterVec
diff --git a/services/keepstore/metrics_test.go b/services/keepstore/metrics_test.go
new file mode 100644 (file)
index 0000000..0c8f1e6
--- /dev/null
@@ -0,0 +1,87 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "context"
+       "encoding/json"
+       "net/http"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/prometheus/client_golang/prometheus"
+       . "gopkg.in/check.v1"
+)
+
+func (s *routerSuite) TestMetrics(c *C) {
+       reg := prometheus.NewRegistry()
+       router, cancel := testRouter(c, s.cluster, reg)
+       defer cancel()
+       instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
+       handler := instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
+
+       router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+               Hash: fooHash,
+               Data: []byte("foo"),
+       })
+       router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+               Hash: barHash,
+               Data: []byte("bar"),
+       })
+
+       // prime the metrics by doing a no-op request
+       resp := call(handler, "GET", "/", "", nil, nil)
+
+       resp = call(handler, "GET", "/metrics.json", "", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusUnauthorized)
+       resp = call(handler, "GET", "/metrics.json", "foobar", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusForbidden)
+       resp = call(handler, "GET", "/metrics.json", arvadostest.ManagementToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       var j []struct {
+               Name   string
+               Help   string
+               Type   string
+               Metric []struct {
+                       Label []struct {
+                               Name  string
+                               Value string
+                       }
+                       Summary struct {
+                               SampleCount string
+                               SampleSum   float64
+                       }
+               }
+       }
+       json.NewDecoder(resp.Body).Decode(&j)
+       found := make(map[string]bool)
+       names := map[string]bool{}
+       for _, g := range j {
+               names[g.Name] = true
+               for _, m := range g.Metric {
+                       if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
+                               c.Check(m.Summary.SampleCount, Equals, "2")
+                               found[g.Name] = true
+                       }
+               }
+       }
+
+       metricsNames := []string{
+               "arvados_keepstore_bufferpool_inuse_buffers",
+               "arvados_keepstore_bufferpool_max_buffers",
+               "arvados_keepstore_bufferpool_allocated_bytes",
+               "arvados_keepstore_pull_queue_inprogress_entries",
+               "arvados_keepstore_pull_queue_pending_entries",
+               "arvados_keepstore_trash_queue_inprogress_entries",
+               "arvados_keepstore_trash_queue_pending_entries",
+               "request_duration_seconds",
+       }
+       for _, m := range metricsNames {
+               _, ok := names[m]
+               c.Check(ok, Equals, true, Commentf("checking metric %q", m))
+       }
+}
diff --git a/services/keepstore/mock_mutex_for_test.go b/services/keepstore/mock_mutex_for_test.go
deleted file mode 100644 (file)
index daf0ef0..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-type MockMutex struct {
-       AllowLock   chan struct{}
-       AllowUnlock chan struct{}
-}
-
-func NewMockMutex() *MockMutex {
-       return &MockMutex{
-               AllowLock:   make(chan struct{}),
-               AllowUnlock: make(chan struct{}),
-       }
-}
-
-// Lock waits for someone to send to AllowLock.
-func (m *MockMutex) Lock() {
-       <-m.AllowLock
-}
-
-// Unlock waits for someone to send to AllowUnlock.
-func (m *MockMutex) Unlock() {
-       <-m.AllowUnlock
-}
index e8c248219f77785458110107922983b0917fa51d..d29d5f6dc048e86e76d1498c80e96bb4f9b058e9 100644 (file)
@@ -5,28 +5,24 @@
 package keepstore
 
 import (
-       "bytes"
        "context"
        "encoding/json"
        "net/http"
-       "net/http/httptest"
 
-       "git.arvados.org/arvados.git/sdk/go/arvadostest"
-       "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "git.arvados.org/arvados.git/sdk/go/httpserver"
-       "github.com/prometheus/client_golang/prometheus"
-       check "gopkg.in/check.v1"
+       . "gopkg.in/check.v1"
 )
 
-func (s *HandlerSuite) TestMounts(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *routerSuite) TestMounts(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
 
-       vols := s.handler.volmgr.AllWritable()
-       vols[0].Put(context.Background(), TestHash, TestBlock)
-       vols[1].Put(context.Background(), TestHash2, TestBlock2)
+       router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+       router.keepstore.mountsW[1].BlockWrite(context.Background(), barHash, []byte("bar"))
+
+       resp := call(router, "GET", "/mounts", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Log(resp.Body.String())
 
-       resp := s.call("GET", "/mounts", "", nil)
-       c.Check(resp.Code, check.Equals, http.StatusOK)
        var mntList []struct {
                UUID           string          `json:"uuid"`
                DeviceID       string          `json:"device_id"`
@@ -34,119 +30,56 @@ func (s *HandlerSuite) TestMounts(c *check.C) {
                Replication    int             `json:"replication"`
                StorageClasses map[string]bool `json:"storage_classes"`
        }
-       c.Log(resp.Body.String())
        err := json.Unmarshal(resp.Body.Bytes(), &mntList)
-       c.Assert(err, check.IsNil)
-       c.Assert(len(mntList), check.Equals, 2)
+       c.Assert(err, IsNil)
+       c.Assert(mntList, HasLen, 2)
+
        for _, m := range mntList {
-               c.Check(len(m.UUID), check.Equals, 27)
-               c.Check(m.UUID[:12], check.Equals, "zzzzz-nyw5e-")
-               c.Check(m.DeviceID, check.Equals, "mock-device-id")
-               c.Check(m.ReadOnly, check.Equals, false)
-               c.Check(m.Replication, check.Equals, 1)
-               c.Check(m.StorageClasses, check.DeepEquals, map[string]bool{"default": true})
+               c.Check(len(m.UUID), Equals, 27)
+               c.Check(m.UUID[:12], Equals, "zzzzz-nyw5e-")
+               c.Check(m.DeviceID, Matches, "0x[0-9a-f]+")
+               c.Check(m.ReadOnly, Equals, false)
+               c.Check(m.Replication, Equals, 1)
+               c.Check(m.StorageClasses, HasLen, 1)
+               for k := range m.StorageClasses {
+                       c.Check(k, Matches, "testclass.*")
+               }
        }
-       c.Check(mntList[0].UUID, check.Not(check.Equals), mntList[1].UUID)
+       c.Check(mntList[0].UUID, Not(Equals), mntList[1].UUID)
 
-       // Bad auth
+       c.Logf("=== bad auth")
        for _, tok := range []string{"", "xyzzy"} {
-               resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil)
-               c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
-               c.Check(resp.Body.String(), check.Equals, "Unauthorized\n")
-       }
-
-       tok := arvadostest.SystemRootToken
-
-       // Nonexistent mount UUID
-       resp = s.call("GET", "/mounts/X/blocks", tok, nil)
-       c.Check(resp.Code, check.Equals, http.StatusNotFound)
-       c.Check(resp.Body.String(), check.Equals, "mount not found\n")
-
-       // Complete index of first mount
-       resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks", tok, nil)
-       c.Check(resp.Code, check.Equals, http.StatusOK)
-       c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
-
-       // Partial index of first mount (one block matches prefix)
-       resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+TestHash[:2], tok, nil)
-       c.Check(resp.Code, check.Equals, http.StatusOK)
-       c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
-
-       // Complete index of second mount (note trailing slash)
-       resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/", tok, nil)
-       c.Check(resp.Code, check.Equals, http.StatusOK)
-       c.Check(resp.Body.String(), check.Matches, TestHash2+`\+[0-9]+ [0-9]+\n\n`)
-
-       // Partial index of second mount (no blocks match prefix)
-       resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+TestHash[:2], tok, nil)
-       c.Check(resp.Code, check.Equals, http.StatusOK)
-       c.Check(resp.Body.String(), check.Equals, "\n")
-}
-
-func (s *HandlerSuite) TestMetrics(c *check.C) {
-       reg := prometheus.NewRegistry()
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", reg, testServiceURL), check.IsNil)
-       instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), s.handler.Handler)
-       s.handler.Handler = instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
-
-       s.call("PUT", "/"+TestHash, "", TestBlock)
-       s.call("PUT", "/"+TestHash2, "", TestBlock2)
-       resp := s.call("GET", "/metrics.json", "", nil)
-       c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
-       resp = s.call("GET", "/metrics.json", "foobar", nil)
-       c.Check(resp.Code, check.Equals, http.StatusForbidden)
-       resp = s.call("GET", "/metrics.json", arvadostest.ManagementToken, nil)
-       c.Check(resp.Code, check.Equals, http.StatusOK)
-       var j []struct {
-               Name   string
-               Help   string
-               Type   string
-               Metric []struct {
-                       Label []struct {
-                               Name  string
-                               Value string
-                       }
-                       Summary struct {
-                               SampleCount string
-                               SampleSum   float64
-                       }
-               }
-       }
-       json.NewDecoder(resp.Body).Decode(&j)
-       found := make(map[string]bool)
-       names := map[string]bool{}
-       for _, g := range j {
-               names[g.Name] = true
-               for _, m := range g.Metric {
-                       if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
-                               c.Check(m.Summary.SampleCount, check.Equals, "2")
-                               found[g.Name] = true
-                       }
+               resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil, nil)
+               if tok == "" {
+                       c.Check(resp.Code, Equals, http.StatusUnauthorized)
+                       c.Check(resp.Body.String(), Equals, "Unauthorized\n")
+               } else {
+                       c.Check(resp.Code, Equals, http.StatusForbidden)
+                       c.Check(resp.Body.String(), Equals, "Forbidden\n")
                }
        }
 
-       metricsNames := []string{
-               "arvados_keepstore_bufferpool_inuse_buffers",
-               "arvados_keepstore_bufferpool_max_buffers",
-               "arvados_keepstore_bufferpool_allocated_bytes",
-               "arvados_keepstore_pull_queue_inprogress_entries",
-               "arvados_keepstore_pull_queue_pending_entries",
-               "arvados_keepstore_trash_queue_inprogress_entries",
-               "arvados_keepstore_trash_queue_pending_entries",
-               "request_duration_seconds",
-       }
-       for _, m := range metricsNames {
-               _, ok := names[m]
-               c.Check(ok, check.Equals, true, check.Commentf("checking metric %q", m))
-       }
-}
-
-func (s *HandlerSuite) call(method, path, tok string, body []byte) *httptest.ResponseRecorder {
-       resp := httptest.NewRecorder()
-       req, _ := http.NewRequest(method, path, bytes.NewReader(body))
-       if tok != "" {
-               req.Header.Set("Authorization", "Bearer "+tok)
-       }
-       s.handler.ServeHTTP(resp, req)
-       return resp
+       c.Logf("=== nonexistent mount UUID")
+       resp = call(router, "GET", "/mounts/X/blocks", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusNotFound)
+
+       c.Logf("=== complete index of first mount")
+       resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
+
+       c.Logf("=== partial index of first mount (one block matches prefix)")
+       resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
+
+       c.Logf("=== complete index of second mount (note trailing slash)")
+       resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Matches, barHash+`\+[0-9]+ [0-9]+\n\n`)
+
+       c.Logf("=== partial index of second mount (no blocks match prefix)")
+       resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Equals, "\n")
 }
diff --git a/services/keepstore/perms.go b/services/keepstore/perms.go
deleted file mode 100644 (file)
index 7205a45..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "time"
-
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/keepclient"
-)
-
-// SignLocator takes a blobLocator, an apiToken and an expiry time, and
-// returns a signed locator string.
-func SignLocator(cluster *arvados.Cluster, blobLocator, apiToken string, expiry time.Time) string {
-       return keepclient.SignLocator(blobLocator, apiToken, expiry, cluster.Collections.BlobSigningTTL.Duration(), []byte(cluster.Collections.BlobSigningKey))
-}
-
-// VerifySignature returns nil if the signature on the signedLocator
-// can be verified using the given apiToken. Otherwise it returns
-// either ExpiredError (if the timestamp has expired, which is
-// something the client could have figured out independently) or
-// PermissionError.
-func VerifySignature(cluster *arvados.Cluster, signedLocator, apiToken string) error {
-       err := keepclient.VerifySignature(signedLocator, apiToken, cluster.Collections.BlobSigningTTL.Duration(), []byte(cluster.Collections.BlobSigningKey))
-       if err == keepclient.ErrSignatureExpired {
-               return ExpiredError
-       } else if err != nil {
-               return PermissionError
-       }
-       return nil
-}
diff --git a/services/keepstore/perms_test.go b/services/keepstore/perms_test.go
deleted file mode 100644 (file)
index 1322374..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "strconv"
-       "time"
-
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       check "gopkg.in/check.v1"
-)
-
-const (
-       knownHash    = "acbd18db4cc2f85cedef654fccc4a4d8"
-       knownLocator = knownHash + "+3"
-       knownToken   = "hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk"
-       knownKey     = "13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk" +
-               "p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc" +
-               "ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4" +
-               "jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y" +
-               "gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6" +
-               "vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei" +
-               "786u5rw2a9gx743dj3fgq2irk"
-       knownSignatureTTL  = arvados.Duration(24 * 14 * time.Hour)
-       knownSignature     = "89118b78732c33104a4d6231e8b5a5fa1e4301e3"
-       knownTimestamp     = "7fffffff"
-       knownSigHint       = "+A" + knownSignature + "@" + knownTimestamp
-       knownSignedLocator = knownLocator + knownSigHint
-)
-
-func (s *HandlerSuite) TestSignLocator(c *check.C) {
-       tsInt, err := strconv.ParseInt(knownTimestamp, 16, 0)
-       if err != nil {
-               c.Fatal(err)
-       }
-       t0 := time.Unix(tsInt, 0)
-
-       s.cluster.Collections.BlobSigningTTL = knownSignatureTTL
-       s.cluster.Collections.BlobSigningKey = knownKey
-       if x := SignLocator(s.cluster, knownLocator, knownToken, t0); x != knownSignedLocator {
-               c.Fatalf("Got %+q, expected %+q", x, knownSignedLocator)
-       }
-
-       s.cluster.Collections.BlobSigningKey = "arbitrarykey"
-       if x := SignLocator(s.cluster, knownLocator, knownToken, t0); x == knownSignedLocator {
-               c.Fatalf("Got same signature %+q, even though blobSigningKey changed", x)
-       }
-}
-
-func (s *HandlerSuite) TestVerifyLocator(c *check.C) {
-       s.cluster.Collections.BlobSigningTTL = knownSignatureTTL
-       s.cluster.Collections.BlobSigningKey = knownKey
-       if err := VerifySignature(s.cluster, knownSignedLocator, knownToken); err != nil {
-               c.Fatal(err)
-       }
-
-       s.cluster.Collections.BlobSigningKey = "arbitrarykey"
-       if err := VerifySignature(s.cluster, knownSignedLocator, knownToken); err == nil {
-               c.Fatal("Verified signature even with wrong blobSigningKey")
-       }
-}
diff --git a/services/keepstore/pipe_adapters.go b/services/keepstore/pipe_adapters.go
deleted file mode 100644 (file)
index 6b55505..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "bytes"
-       "context"
-       "io"
-       "io/ioutil"
-)
-
-// getWithPipe invokes getter and copies the resulting data into
-// buf. If ctx is done before all data is copied, getWithPipe closes
-// the pipe with an error, and returns early with an error.
-func getWithPipe(ctx context.Context, loc string, buf []byte, br BlockReader) (int, error) {
-       piper, pipew := io.Pipe()
-       go func() {
-               pipew.CloseWithError(br.ReadBlock(ctx, loc, pipew))
-       }()
-       done := make(chan struct{})
-       var size int
-       var err error
-       go func() {
-               size, err = io.ReadFull(piper, buf)
-               if err == io.EOF || err == io.ErrUnexpectedEOF {
-                       err = nil
-               }
-               close(done)
-       }()
-       select {
-       case <-ctx.Done():
-               piper.CloseWithError(ctx.Err())
-               return 0, ctx.Err()
-       case <-done:
-               piper.Close()
-               return size, err
-       }
-}
-
-// putWithPipe invokes putter with a new pipe, and copies data
-// from buf into the pipe. If ctx is done before all data is copied,
-// putWithPipe closes the pipe with an error, and returns early with
-// an error.
-func putWithPipe(ctx context.Context, loc string, buf []byte, bw BlockWriter) error {
-       piper, pipew := io.Pipe()
-       copyErr := make(chan error)
-       go func() {
-               _, err := io.Copy(pipew, bytes.NewReader(buf))
-               copyErr <- err
-               close(copyErr)
-       }()
-
-       putErr := make(chan error, 1)
-       go func() {
-               putErr <- bw.WriteBlock(ctx, loc, piper)
-               close(putErr)
-       }()
-
-       var err error
-       select {
-       case err = <-copyErr:
-       case err = <-putErr:
-       case <-ctx.Done():
-               err = ctx.Err()
-       }
-
-       // Ensure io.Copy goroutine isn't blocked writing to pipew
-       // (otherwise, io.Copy is still using buf so it isn't safe to
-       // return). This can cause pipew to receive corrupt data if
-       // err came from copyErr or ctx.Done() before the copy
-       // finished. That's OK, though: in that case err != nil, and
-       // CloseWithErr(err) ensures putter() will get an error from
-       // piper.Read() before seeing EOF.
-       go pipew.CloseWithError(err)
-       go io.Copy(ioutil.Discard, piper)
-       <-copyErr
-
-       // Note: io.Copy() is finished now, but putter() might still
-       // be running. If we encounter an error before putter()
-       // returns, we return right away without waiting for putter().
-
-       if err != nil {
-               return err
-       }
-       select {
-       case <-ctx.Done():
-               return ctx.Err()
-       case err = <-putErr:
-               return err
-       }
-}
diff --git a/services/keepstore/proxy_remote.go b/services/keepstore/proxy_remote.go
deleted file mode 100644 (file)
index 325f1cf..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "context"
-       "errors"
-       "io"
-       "net/http"
-       "regexp"
-       "strings"
-       "sync"
-       "time"
-
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
-       "git.arvados.org/arvados.git/sdk/go/auth"
-       "git.arvados.org/arvados.git/sdk/go/keepclient"
-)
-
-type remoteProxy struct {
-       clients map[string]*keepclient.KeepClient
-       mtx     sync.Mutex
-}
-
-func (rp *remoteProxy) Get(ctx context.Context, w http.ResponseWriter, r *http.Request, cluster *arvados.Cluster, volmgr *RRVolumeManager) {
-       // Intervening proxies must not return a cached GET response
-       // to a prior request if a X-Keep-Signature request header has
-       // been added or changed.
-       w.Header().Add("Vary", "X-Keep-Signature")
-
-       token := GetAPIToken(r)
-       if token == "" {
-               http.Error(w, "no token provided in Authorization header", http.StatusUnauthorized)
-               return
-       }
-       if strings.SplitN(r.Header.Get("X-Keep-Signature"), ",", 2)[0] == "local" {
-               buf, err := getBufferWithContext(ctx, bufs, BlockSize)
-               if err != nil {
-                       http.Error(w, err.Error(), http.StatusServiceUnavailable)
-                       return
-               }
-               defer bufs.Put(buf)
-               rrc := &remoteResponseCacher{
-                       Locator:        r.URL.Path[1:],
-                       Token:          token,
-                       Buffer:         buf[:0],
-                       ResponseWriter: w,
-                       Context:        ctx,
-                       Cluster:        cluster,
-                       VolumeManager:  volmgr,
-               }
-               defer rrc.Close()
-               w = rrc
-       }
-       var remoteClient *keepclient.KeepClient
-       var parts []string
-       for i, part := range strings.Split(r.URL.Path[1:], "+") {
-               switch {
-               case i == 0:
-                       // don't try to parse hash part as hint
-               case strings.HasPrefix(part, "A"):
-                       // drop local permission hint
-                       continue
-               case len(part) > 7 && part[0] == 'R' && part[6] == '-':
-                       remoteID := part[1:6]
-                       remote, ok := cluster.RemoteClusters[remoteID]
-                       if !ok {
-                               http.Error(w, "remote cluster not configured", http.StatusBadRequest)
-                               return
-                       }
-                       kc, err := rp.remoteClient(remoteID, remote, token)
-                       if err == auth.ErrObsoleteToken {
-                               http.Error(w, err.Error(), http.StatusBadRequest)
-                               return
-                       } else if err != nil {
-                               http.Error(w, err.Error(), http.StatusInternalServerError)
-                               return
-                       }
-                       remoteClient = kc
-                       part = "A" + part[7:]
-               }
-               parts = append(parts, part)
-       }
-       if remoteClient == nil {
-               http.Error(w, "bad request", http.StatusBadRequest)
-               return
-       }
-       locator := strings.Join(parts, "+")
-       rdr, _, _, err := remoteClient.Get(locator)
-       switch err.(type) {
-       case nil:
-               defer rdr.Close()
-               io.Copy(w, rdr)
-       case *keepclient.ErrNotFound:
-               http.Error(w, err.Error(), http.StatusNotFound)
-       default:
-               http.Error(w, err.Error(), http.StatusBadGateway)
-       }
-}
-
-func (rp *remoteProxy) remoteClient(remoteID string, remoteCluster arvados.RemoteCluster, token string) (*keepclient.KeepClient, error) {
-       rp.mtx.Lock()
-       kc, ok := rp.clients[remoteID]
-       rp.mtx.Unlock()
-       if !ok {
-               c := &arvados.Client{
-                       APIHost:   remoteCluster.Host,
-                       AuthToken: "xxx",
-                       Insecure:  remoteCluster.Insecure,
-               }
-               ac, err := arvadosclient.New(c)
-               if err != nil {
-                       return nil, err
-               }
-               kc, err = keepclient.MakeKeepClient(ac)
-               if err != nil {
-                       return nil, err
-               }
-               kc.DiskCacheSize = keepclient.DiskCacheDisabled
-
-               rp.mtx.Lock()
-               if rp.clients == nil {
-                       rp.clients = map[string]*keepclient.KeepClient{remoteID: kc}
-               } else {
-                       rp.clients[remoteID] = kc
-               }
-               rp.mtx.Unlock()
-       }
-       accopy := *kc.Arvados
-       accopy.ApiToken = token
-       kccopy := kc.Clone()
-       kccopy.Arvados = &accopy
-       token, err := auth.SaltToken(token, remoteID)
-       if err != nil {
-               return nil, err
-       }
-       kccopy.Arvados.ApiToken = token
-       return kccopy, nil
-}
-
-var localOrRemoteSignature = regexp.MustCompile(`\+[AR][^\+]*`)
-
-// remoteResponseCacher wraps http.ResponseWriter. It buffers the
-// response data in the provided buffer, writes/touches a copy on a
-// local volume, adds a response header with a locally-signed locator,
-// and finally writes the data through.
-type remoteResponseCacher struct {
-       Locator       string
-       Token         string
-       Buffer        []byte
-       Context       context.Context
-       Cluster       *arvados.Cluster
-       VolumeManager *RRVolumeManager
-       http.ResponseWriter
-       statusCode int
-}
-
-func (rrc *remoteResponseCacher) Write(p []byte) (int, error) {
-       if len(rrc.Buffer)+len(p) > cap(rrc.Buffer) {
-               return 0, errors.New("buffer full")
-       }
-       rrc.Buffer = append(rrc.Buffer, p...)
-       return len(p), nil
-}
-
-func (rrc *remoteResponseCacher) WriteHeader(statusCode int) {
-       rrc.statusCode = statusCode
-}
-
-func (rrc *remoteResponseCacher) Close() error {
-       if rrc.statusCode == 0 {
-               rrc.statusCode = http.StatusOK
-       } else if rrc.statusCode != http.StatusOK {
-               rrc.ResponseWriter.WriteHeader(rrc.statusCode)
-               rrc.ResponseWriter.Write(rrc.Buffer)
-               return nil
-       }
-       _, err := PutBlock(rrc.Context, rrc.VolumeManager, rrc.Buffer, rrc.Locator[:32], nil)
-       if rrc.Context.Err() != nil {
-               // If caller hung up, log that instead of subsequent/misleading errors.
-               http.Error(rrc.ResponseWriter, rrc.Context.Err().Error(), http.StatusGatewayTimeout)
-               return err
-       }
-       if err == RequestHashError {
-               http.Error(rrc.ResponseWriter, "checksum mismatch in remote response", http.StatusBadGateway)
-               return err
-       }
-       if err, ok := err.(*KeepError); ok {
-               http.Error(rrc.ResponseWriter, err.Error(), err.HTTPCode)
-               return err
-       }
-       if err != nil {
-               http.Error(rrc.ResponseWriter, err.Error(), http.StatusBadGateway)
-               return err
-       }
-
-       unsigned := localOrRemoteSignature.ReplaceAllLiteralString(rrc.Locator, "")
-       expiry := time.Now().Add(rrc.Cluster.Collections.BlobSigningTTL.Duration())
-       signed := SignLocator(rrc.Cluster, unsigned, rrc.Token, expiry)
-       if signed == unsigned {
-               err = errors.New("could not sign locator")
-               http.Error(rrc.ResponseWriter, err.Error(), http.StatusInternalServerError)
-               return err
-       }
-       rrc.Header().Set("X-Keep-Locator", signed)
-       rrc.ResponseWriter.WriteHeader(rrc.statusCode)
-       _, err = rrc.ResponseWriter.Write(rrc.Buffer)
-       return err
-}
index 534371cc0ece83ef3a0cead670d1612ec8f57172..886754e14a422d226ccc34c316a608a10bf36f27 100644 (file)
@@ -5,7 +5,6 @@
 package keepstore
 
 import (
-       "context"
        "crypto/md5"
        "encoding/json"
        "fmt"
@@ -20,16 +19,18 @@ import (
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/arvadostest"
        "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/ctxlog"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
        "git.arvados.org/arvados.git/sdk/go/keepclient"
        "github.com/prometheus/client_golang/prometheus"
        check "gopkg.in/check.v1"
 )
 
-var _ = check.Suite(&ProxyRemoteSuite{})
+var _ = check.Suite(&proxyRemoteSuite{})
 
-type ProxyRemoteSuite struct {
+type proxyRemoteSuite struct {
        cluster *arvados.Cluster
-       handler *handler
+       handler *router
 
        remoteClusterID      string
        remoteBlobSigningKey []byte
@@ -40,7 +41,7 @@ type ProxyRemoteSuite struct {
        remoteAPI            *httptest.Server
 }
 
-func (s *ProxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http.Request) {
+func (s *proxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http.Request) {
        expectToken, err := auth.SaltToken(arvadostest.ActiveTokenV2, s.remoteClusterID)
        if err != nil {
                panic(err)
@@ -57,7 +58,7 @@ func (s *ProxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http
        http.Error(w, "404", 404)
 }
 
-func (s *ProxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Request) {
+func (s *proxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Request) {
        host, port, _ := net.SplitHostPort(strings.Split(s.remoteKeepproxy.URL, "//")[1])
        portnum, _ := strconv.Atoi(port)
        if r.URL.Path == "/arvados/v1/discovery/v1/rest" {
@@ -81,15 +82,13 @@ func (s *ProxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Reque
        http.Error(w, "404", 404)
 }
 
-func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
+func (s *proxyRemoteSuite) SetUpTest(c *check.C) {
        s.remoteClusterID = "z0000"
        s.remoteBlobSigningKey = []byte("3b6df6fb6518afe12922a5bc8e67bf180a358bc8")
-       s.remoteKeepproxy = httptest.NewServer(http.HandlerFunc(s.remoteKeepproxyHandler))
+       s.remoteKeepproxy = httptest.NewServer(httpserver.LogRequests(http.HandlerFunc(s.remoteKeepproxyHandler)))
        s.remoteAPI = httptest.NewUnstartedServer(http.HandlerFunc(s.remoteAPIHandler))
        s.remoteAPI.StartTLS()
        s.cluster = testCluster(c)
-       s.cluster.Collections.BlobSigningKey = knownKey
-       s.cluster.SystemRootToken = arvadostest.SystemRootToken
        s.cluster.RemoteClusters = map[string]arvados.RemoteCluster{
                s.remoteClusterID: {
                        Host:     strings.Split(s.remoteAPI.URL, "//")[1],
@@ -98,17 +97,21 @@ func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
                        Insecure: true,
                },
        }
-       s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "mock"}}
-       s.handler = &handler{}
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+       s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "stub"}}
 }
 
-func (s *ProxyRemoteSuite) TearDownTest(c *check.C) {
+func (s *proxyRemoteSuite) TearDownTest(c *check.C) {
        s.remoteAPI.Close()
        s.remoteKeepproxy.Close()
 }
 
-func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
+func (s *proxyRemoteSuite) TestProxyRemote(c *check.C) {
+       reg := prometheus.NewRegistry()
+       router, cancel := testRouter(c, s.cluster, reg)
+       defer cancel()
+       instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
+       handler := httpserver.LogRequests(instrumented.ServeAPI(s.cluster.ManagementToken, instrumented))
+
        data := []byte("foo bar")
        s.remoteKeepData = data
        locator := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
@@ -172,7 +175,7 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
                        expectSignature:  true,
                },
        } {
-               c.Logf("trial: %s", trial.label)
+               c.Logf("=== trial: %s", trial.label)
 
                s.remoteKeepRequests = 0
 
@@ -184,11 +187,18 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
                        req.Header.Set("X-Keep-Signature", trial.xKeepSignature)
                }
                resp = httptest.NewRecorder()
-               s.handler.ServeHTTP(resp, req)
+               handler.ServeHTTP(resp, req)
                c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
-               c.Check(resp.Code, check.Equals, trial.expectCode)
+               if !c.Check(resp.Code, check.Equals, trial.expectCode) {
+                       c.Logf("resp.Code %d came with resp.Body %q", resp.Code, resp.Body.String())
+               }
                if resp.Code == http.StatusOK {
-                       c.Check(resp.Body.String(), check.Equals, string(data))
+                       if trial.method == "HEAD" {
+                               c.Check(resp.Body.String(), check.Equals, "")
+                               c.Check(resp.Result().ContentLength, check.Equals, int64(len(data)))
+                       } else {
+                               c.Check(resp.Body.String(), check.Equals, string(data))
+                       }
                } else {
                        c.Check(resp.Body.String(), check.Not(check.Equals), string(data))
                }
@@ -203,13 +213,13 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
 
                c.Check(locHdr, check.Not(check.Equals), "")
                c.Check(locHdr, check.Not(check.Matches), `.*\+R.*`)
-               c.Check(VerifySignature(s.cluster, locHdr, trial.token), check.IsNil)
+               c.Check(arvados.VerifySignature(locHdr, trial.token, s.cluster.Collections.BlobSigningTTL.Duration(), []byte(s.cluster.Collections.BlobSigningKey)), check.IsNil)
 
                // Ensure block can be requested using new signature
                req = httptest.NewRequest("GET", "/"+locHdr, nil)
                req.Header.Set("Authorization", "Bearer "+trial.token)
                resp = httptest.NewRecorder()
-               s.handler.ServeHTTP(resp, req)
+               handler.ServeHTTP(resp, req)
                c.Check(resp.Code, check.Equals, http.StatusOK)
                c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
        }
index 348bfb4df00087a1726ef36cbd186fe0eb5ea4c7..dc5eabaa15bbc0b4c5e94add8b5bc461aad998ed 100644 (file)
 package keepstore
 
 import (
+       "bytes"
        "context"
-       "fmt"
-       "io"
-       "io/ioutil"
-       "time"
+       "sync"
+       "sync/atomic"
 
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadosclient"
        "git.arvados.org/arvados.git/sdk/go/keepclient"
+       "github.com/prometheus/client_golang/prometheus"
 )
 
-// RunPullWorker receives PullRequests from pullq, invokes
-// PullItemAndProcess on each one. After each PR, it logs a message
-// indicating whether the pull was successful.
-func (h *handler) runPullWorker(pullq *WorkQueue) {
-       for item := range pullq.NextItem {
-               pr := item.(PullRequest)
-               err := h.pullItemAndProcess(pr)
-               pullq.DoneItem <- struct{}{}
-               if err == nil {
-                       h.Logger.Printf("Pull %s success", pr)
-               } else {
-                       h.Logger.Printf("Pull %s error: %s", pr, err)
-               }
-       }
+type PullListItem struct {
+       Locator   string   `json:"locator"`
+       Servers   []string `json:"servers"`
+       MountUUID string   `json:"mount_uuid"` // Destination mount, or "" for "anywhere"
 }
 
-// PullItemAndProcess executes a pull request by retrieving the
-// specified block from one of the specified servers, and storing it
-// on a local volume.
-//
-// If the PR specifies a non-blank mount UUID, PullItemAndProcess will
-// only attempt to write the data to the corresponding
-// volume. Otherwise it writes to any local volume, as a PUT request
-// would.
-func (h *handler) pullItemAndProcess(pullRequest PullRequest) error {
-       var vol *VolumeMount
-       if uuid := pullRequest.MountUUID; uuid != "" {
-               vol = h.volmgr.Lookup(pullRequest.MountUUID, true)
-               if vol == nil {
-                       return fmt.Errorf("pull req has nonexistent mount: %v", pullRequest)
-               }
-       }
+type puller struct {
+       keepstore  *keepstore
+       todo       []PullListItem
+       cond       *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+       inprogress atomic.Int64
+}
 
-       // Make a private copy of keepClient so we can set
-       // ServiceRoots to the source servers specified in the pull
-       // request.
-       keepClient := h.keepClient.Clone()
-       serviceRoots := make(map[string]string)
-       for _, addr := range pullRequest.Servers {
-               serviceRoots[addr] = addr
+func newPuller(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *puller {
+       p := &puller{
+               keepstore: keepstore,
+               cond:      sync.NewCond(&sync.Mutex{}),
        }
-       keepClient.SetServiceRoots(serviceRoots, nil, nil)
+       reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "pull_queue_pending_entries",
+                       Help:      "Number of queued pull requests",
+               },
+               func() float64 {
+                       p.cond.L.Lock()
+                       defer p.cond.L.Unlock()
+                       return float64(len(p.todo))
+               },
+       ))
+       reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "pull_queue_inprogress_entries",
+                       Help:      "Number of pull requests in progress",
+               },
+               func() float64 {
+                       return float64(p.inprogress.Load())
+               },
+       ))
+       if len(p.keepstore.mountsW) == 0 {
+               keepstore.logger.Infof("not running pull worker because there are no writable volumes")
+               return p
+       }
+       for i := 0; i < 1 || i < keepstore.cluster.Collections.BlobReplicateConcurrency; i++ {
+               go p.runWorker(ctx)
+       }
+       return p
+}
 
-       signedLocator := SignLocator(h.Cluster, pullRequest.Locator, keepClient.Arvados.ApiToken, time.Now().Add(time.Minute))
+func (p *puller) SetPullList(newlist []PullListItem) {
+       p.cond.L.Lock()
+       p.todo = newlist
+       p.cond.L.Unlock()
+       p.cond.Broadcast()
+}
 
-       reader, _, _, err := GetContent(signedLocator, keepClient)
-       if err != nil {
-               return err
+func (p *puller) runWorker(ctx context.Context) {
+       if len(p.keepstore.mountsW) == 0 {
+               p.keepstore.logger.Infof("not running pull worker because there are no writable volumes")
+               return
        }
-       if reader == nil {
-               return fmt.Errorf("No reader found for : %s", signedLocator)
+       c, err := arvados.NewClientFromConfig(p.keepstore.cluster)
+       if err != nil {
+               p.keepstore.logger.Errorf("error setting up pull worker: %s", err)
+               return
        }
-       defer reader.Close()
-
-       readContent, err := ioutil.ReadAll(reader)
+       c.AuthToken = "keepstore-token-used-for-pulling-data-from-same-cluster"
+       ac, err := arvadosclient.New(c)
        if err != nil {
-               return err
+               p.keepstore.logger.Errorf("error setting up pull worker: %s", err)
+               return
        }
-
-       if readContent == nil {
-               return fmt.Errorf("Content not found for: %s", signedLocator)
+       keepClient := &keepclient.KeepClient{
+               Arvados:       ac,
+               Want_replicas: 1,
+               DiskCacheSize: keepclient.DiskCacheDisabled,
        }
+       // Ensure the loop below wakes up and returns when ctx
+       // cancels, even if pull list is empty.
+       go func() {
+               <-ctx.Done()
+               p.cond.Broadcast()
+       }()
+       for {
+               p.cond.L.Lock()
+               for len(p.todo) == 0 && ctx.Err() == nil {
+                       p.cond.Wait()
+               }
+               if ctx.Err() != nil {
+                       return
+               }
+               item := p.todo[0]
+               p.todo = p.todo[1:]
+               p.inprogress.Add(1)
+               p.cond.L.Unlock()
 
-       return writePulledBlock(h.volmgr, vol, readContent, pullRequest.Locator)
-}
+               func() {
+                       defer p.inprogress.Add(-1)
 
-// GetContent fetches the content for the given locator using keepclient.
-var GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (io.ReadCloser, int64, string, error) {
-       return keepClient.Get(signedLocator)
-}
+                       logger := p.keepstore.logger.WithField("locator", item.Locator)
+
+                       li, err := getLocatorInfo(item.Locator)
+                       if err != nil {
+                               logger.Warn("ignoring pull request for invalid locator")
+                               return
+                       }
+
+                       var dst *mount
+                       if item.MountUUID != "" {
+                               dst = p.keepstore.mounts[item.MountUUID]
+                               if dst == nil {
+                                       logger.Warnf("ignoring pull list entry for nonexistent mount %s", item.MountUUID)
+                                       return
+                               } else if !dst.AllowWrite {
+                                       logger.Warnf("ignoring pull list entry for readonly mount %s", item.MountUUID)
+                                       return
+                               }
+                       } else {
+                               dst = p.keepstore.rendezvous(item.Locator, p.keepstore.mountsW)[0]
+                       }
+
+                       serviceRoots := make(map[string]string)
+                       for _, addr := range item.Servers {
+                               serviceRoots[addr] = addr
+                       }
+                       keepClient.SetServiceRoots(serviceRoots, nil, nil)
+
+                       signedLocator := p.keepstore.signLocator(c.AuthToken, item.Locator)
 
-var writePulledBlock = func(volmgr *RRVolumeManager, volume Volume, data []byte, locator string) error {
-       if volume != nil {
-               return volume.Put(context.Background(), locator, data)
+                       buf := bytes.NewBuffer(nil)
+                       _, err = keepClient.BlockRead(ctx, arvados.BlockReadOptions{
+                               Locator: signedLocator,
+                               WriteTo: buf,
+                       })
+                       if err != nil {
+                               logger.WithError(err).Warnf("error pulling data from remote servers (%s)", item.Servers)
+                               return
+                       }
+                       err = dst.BlockWrite(ctx, li.hash, buf.Bytes())
+                       if err != nil {
+                               logger.WithError(err).Warnf("error writing data to %s", dst.UUID)
+                               return
+                       }
+                       logger.Info("block pulled")
+               }()
        }
-       _, err := PutBlock(context.Background(), volmgr, data, locator, nil)
-       return err
 }
diff --git a/services/keepstore/pull_worker_integration_test.go b/services/keepstore/pull_worker_integration_test.go
deleted file mode 100644 (file)
index 3855b4e..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "bytes"
-       "context"
-       "errors"
-       "io"
-       "io/ioutil"
-       "strings"
-
-       "git.arvados.org/arvados.git/sdk/go/arvadostest"
-       "git.arvados.org/arvados.git/sdk/go/keepclient"
-       "github.com/prometheus/client_golang/prometheus"
-       check "gopkg.in/check.v1"
-)
-
-type PullWorkIntegrationTestData struct {
-       Name     string
-       Locator  string
-       Content  string
-       GetError string
-}
-
-func (s *HandlerSuite) setupPullWorkerIntegrationTest(c *check.C, testData PullWorkIntegrationTestData, wantData bool) PullRequest {
-       arvadostest.StartKeep(2, false)
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       // Put content if the test needs it
-       if wantData {
-               locator, _, err := s.handler.keepClient.PutB([]byte(testData.Content))
-               if err != nil {
-                       c.Errorf("Error putting test data in setup for %s %s %v", testData.Content, locator, err)
-               }
-               if locator == "" {
-                       c.Errorf("No locator found after putting test data")
-               }
-       }
-
-       // Create pullRequest for the test
-       pullRequest := PullRequest{
-               Locator: testData.Locator,
-       }
-       return pullRequest
-}
-
-// Do a get on a block that is not existing in any of the keep servers.
-// Expect "block not found" error.
-func (s *HandlerSuite) TestPullWorkerIntegration_GetNonExistingLocator(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       testData := PullWorkIntegrationTestData{
-               Name:     "TestPullWorkerIntegration_GetLocator",
-               Locator:  "5d41402abc4b2a76b9719d911017c592",
-               Content:  "hello",
-               GetError: "Block not found",
-       }
-
-       pullRequest := s.setupPullWorkerIntegrationTest(c, testData, false)
-       defer arvadostest.StopKeep(2)
-
-       s.performPullWorkerIntegrationTest(testData, pullRequest, c)
-}
-
-// Do a get on a block that exists on one of the keep servers.
-// The setup method will create this block before doing the get.
-func (s *HandlerSuite) TestPullWorkerIntegration_GetExistingLocator(c *check.C) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       testData := PullWorkIntegrationTestData{
-               Name:     "TestPullWorkerIntegration_GetLocator",
-               Locator:  "5d41402abc4b2a76b9719d911017c592",
-               Content:  "hello",
-               GetError: "",
-       }
-
-       pullRequest := s.setupPullWorkerIntegrationTest(c, testData, true)
-       defer arvadostest.StopKeep(2)
-
-       s.performPullWorkerIntegrationTest(testData, pullRequest, c)
-}
-
-// Perform the test.
-// The test directly invokes the "PullItemAndProcess" rather than
-// putting an item on the pullq so that the errors can be verified.
-func (s *HandlerSuite) performPullWorkerIntegrationTest(testData PullWorkIntegrationTestData, pullRequest PullRequest, c *check.C) {
-
-       // Override writePulledBlock to mock PutBlock functionality
-       defer func(orig func(*RRVolumeManager, Volume, []byte, string) error) { writePulledBlock = orig }(writePulledBlock)
-       writePulledBlock = func(_ *RRVolumeManager, _ Volume, content []byte, _ string) error {
-               c.Check(string(content), check.Equals, testData.Content)
-               return nil
-       }
-
-       // Override GetContent to mock keepclient Get functionality
-       defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
-               GetContent = orig
-       }(GetContent)
-       GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
-               if testData.GetError != "" {
-                       return nil, 0, "", errors.New(testData.GetError)
-               }
-               rdr := ioutil.NopCloser(bytes.NewBufferString(testData.Content))
-               return rdr, int64(len(testData.Content)), "", nil
-       }
-
-       err := s.handler.pullItemAndProcess(pullRequest)
-
-       if len(testData.GetError) > 0 {
-               if (err == nil) || (!strings.Contains(err.Error(), testData.GetError)) {
-                       c.Errorf("Got error %v, expected %v", err, testData.GetError)
-               }
-       } else {
-               if err != nil {
-                       c.Errorf("Got error %v, expected nil", err)
-               }
-       }
-}
index 2626e66d8898745b9f29c42d9beda9ee580626a4..d109b56df3cee8e2ac3259ebb784fe4cfdacc20b 100644 (file)
@@ -7,309 +7,130 @@ package keepstore
 import (
        "bytes"
        "context"
+       "crypto/md5"
+       "encoding/json"
        "errors"
+       "fmt"
        "io"
-       "io/ioutil"
        "net/http"
+       "net/http/httptest"
+       "sort"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
-       "git.arvados.org/arvados.git/sdk/go/keepclient"
-       "github.com/prometheus/client_golang/prometheus"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "github.com/sirupsen/logrus"
        . "gopkg.in/check.v1"
-       check "gopkg.in/check.v1"
 )
 
-var _ = Suite(&PullWorkerTestSuite{})
-
-type PullWorkerTestSuite struct {
-       cluster *arvados.Cluster
-       handler *handler
-
-       testPullLists map[string]string
-       readContent   string
-       readError     error
-       putContent    []byte
-       putError      error
-}
-
-func (s *PullWorkerTestSuite) SetUpTest(c *C) {
-       s.cluster = testCluster(c)
-       s.cluster.Volumes = map[string]arvados.Volume{
-               "zzzzz-nyw5e-000000000000000": {Driver: "mock"},
-               "zzzzz-nyw5e-111111111111111": {Driver: "mock"},
+func (s *routerSuite) TestPullList_Execute(c *C) {
+       remotecluster := testCluster(c)
+       remotecluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-rrrrrrrrrrrrrrr": {Replication: 1, Driver: "stub"},
        }
-       s.cluster.Collections.BlobReplicateConcurrency = 1
-
-       s.handler = &handler{}
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-       s.readContent = ""
-       s.readError = nil
-       s.putContent = []byte{}
-       s.putError = nil
-
-       // When a new pull request arrives, the old one will be overwritten.
-       // This behavior is verified using these two maps in the
-       // "TestPullWorkerPullList_with_two_items_latest_replacing_old"
-       s.testPullLists = make(map[string]string)
-}
-
-var firstPullList = []byte(`[
-               {
-                       "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
-                       "servers":[
-                               "server_1",
-                               "server_2"
-                       ]
-               },{
-                       "locator":"37b51d194a7513e45b56f6524f2d51f2+3",
-                       "servers":[
-                               "server_3"
-                       ]
-               }
-       ]`)
-
-var secondPullList = []byte(`[
-               {
-                       "locator":"73feffa4b7f6bb68e44cf984c85f6e88+3",
-                       "servers":[
-                               "server_1",
-                               "server_2"
-                       ]
-               }
-       ]`)
-
-type PullWorkerTestData struct {
-       name         string
-       req          RequestTester
-       responseCode int
-       responseBody string
-       readContent  string
-       readError    bool
-       putError     bool
-}
-
-// Ensure MountUUID in a pull list is correctly translated to a Volume
-// argument passed to writePulledBlock().
-func (s *PullWorkerTestSuite) TestSpecifyMountUUID(c *C) {
-       defer func(f func(*RRVolumeManager, Volume, []byte, string) error) {
-               writePulledBlock = f
-       }(writePulledBlock)
-       pullq := s.handler.Handler.(*router).pullq
-
-       for _, spec := range []struct {
-               sendUUID     string
-               expectVolume Volume
-       }{
-               {
-                       sendUUID:     "",
-                       expectVolume: nil,
-               },
-               {
-                       sendUUID:     s.handler.volmgr.Mounts()[0].UUID,
-                       expectVolume: s.handler.volmgr.Mounts()[0].Volume,
-               },
-       } {
-               writePulledBlock = func(_ *RRVolumeManager, v Volume, _ []byte, _ string) error {
-                       c.Check(v, Equals, spec.expectVolume)
-                       return nil
+       remoterouter, cancel := testRouter(c, remotecluster, nil)
+       defer cancel()
+       remoteserver := httptest.NewServer(remoterouter)
+       defer remoteserver.Close()
+
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       executePullList := func(pullList []PullListItem) string {
+               var logbuf bytes.Buffer
+               logger := logrus.New()
+               logger.Out = &logbuf
+               router.keepstore.logger = logger
+
+               listjson, err := json.Marshal(pullList)
+               c.Assert(err, IsNil)
+               resp := call(router, "PUT", "http://example/pull", s.cluster.SystemRootToken, listjson, nil)
+               c.Check(resp.Code, Equals, http.StatusOK)
+               for {
+                       router.puller.cond.L.Lock()
+                       todolen := len(router.puller.todo)
+                       router.puller.cond.L.Unlock()
+                       if todolen == 0 && router.puller.inprogress.Load() == 0 {
+                               break
+                       }
+                       time.Sleep(time.Millisecond)
                }
-
-               resp := IssueRequest(s.handler, &RequestTester{
-                       uri:      "/pull",
-                       apiToken: s.cluster.SystemRootToken,
-                       method:   "PUT",
-                       requestBody: []byte(`[{
-                               "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
-                               "servers":["server_1","server_2"],
-                               "mount_uuid":"` + spec.sendUUID + `"}]`),
-               })
-               c.Assert(resp.Code, Equals, http.StatusOK)
-               expectEqualWithin(c, time.Second, 0, func() interface{} {
-                       st := pullq.Status()
-                       return st.InProgress + st.Queued
-               })
-       }
-}
-
-func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_two_locators(c *C) {
-       testData := PullWorkerTestData{
-               name:         "TestPullWorkerPullList_with_two_locators",
-               req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
-               responseCode: http.StatusOK,
-               responseBody: "Received 2 pull requests\n",
-               readContent:  "hello",
-               readError:    false,
-               putError:     false,
-       }
-
-       s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_one_locator(c *C) {
-       testData := PullWorkerTestData{
-               name:         "TestPullWorkerPullList_with_one_locator",
-               req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
-               responseCode: http.StatusOK,
-               responseBody: "Received 1 pull requests\n",
-               readContent:  "hola",
-               readError:    false,
-               putError:     false,
-       }
-
-       s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_one_locator(c *C) {
-       testData := PullWorkerTestData{
-               name:         "TestPullWorker_error_on_get_one_locator",
-               req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
-               responseCode: http.StatusOK,
-               responseBody: "Received 1 pull requests\n",
-               readContent:  "unused",
-               readError:    true,
-               putError:     false,
+               return logbuf.String()
        }
 
-       s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_two_locators(c *C) {
-       testData := PullWorkerTestData{
-               name:         "TestPullWorker_error_on_get_two_locators",
-               req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
-               responseCode: http.StatusOK,
-               responseBody: "Received 2 pull requests\n",
-               readContent:  "unused",
-               readError:    true,
-               putError:     false,
-       }
-
-       s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_one_locator(c *C) {
-       testData := PullWorkerTestData{
-               name:         "TestPullWorker_error_on_put_one_locator",
-               req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
-               responseCode: http.StatusOK,
-               responseBody: "Received 1 pull requests\n",
-               readContent:  "hello hello",
-               readError:    false,
-               putError:     true,
-       }
-
-       s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_two_locators(c *C) {
-       testData := PullWorkerTestData{
-               name:         "TestPullWorker_error_on_put_two_locators",
-               req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
-               responseCode: http.StatusOK,
-               responseBody: "Received 2 pull requests\n",
-               readContent:  "hello again",
-               readError:    false,
-               putError:     true,
-       }
-
-       s.performTest(testData, c)
-}
-
-// In this case, the item will not be placed on pullq
-func (s *PullWorkerTestSuite) TestPullWorker_invalidToken(c *C) {
-       testData := PullWorkerTestData{
-               name:         "TestPullWorkerPullList_with_two_locators",
-               req:          RequestTester{"/pull", "invalidToken", "PUT", firstPullList, ""},
-               responseCode: http.StatusUnauthorized,
-               responseBody: "Unauthorized\n",
-               readContent:  "hello",
-               readError:    false,
-               putError:     false,
-       }
-
-       s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) performTest(testData PullWorkerTestData, c *C) {
-       pullq := s.handler.Handler.(*router).pullq
-
-       s.testPullLists[testData.name] = testData.responseBody
-
-       processedPullLists := make(map[string]string)
-
-       // Override GetContent to mock keepclient Get functionality
-       defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
-               GetContent = orig
-       }(GetContent)
-       GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
-               c.Assert(getStatusItem(s.handler, "PullQueue", "InProgress"), Equals, float64(1))
-               processedPullLists[testData.name] = testData.responseBody
-               if testData.readError {
-                       err = errors.New("Error getting data")
-                       s.readError = err
-                       return
-               }
-               s.readContent = testData.readContent
-               reader = ioutil.NopCloser(bytes.NewBufferString(testData.readContent))
-               contentLength = int64(len(testData.readContent))
-               return
+       newRemoteBlock := func(datastring string) string {
+               data := []byte(datastring)
+               hash := fmt.Sprintf("%x", md5.Sum(data))
+               locator := fmt.Sprintf("%s+%d", hash, len(data))
+               _, err := remoterouter.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+                       Hash: hash,
+                       Data: data,
+               })
+               c.Assert(err, IsNil)
+               return locator
        }
 
-       // Override writePulledBlock to mock PutBlock functionality
-       defer func(orig func(*RRVolumeManager, Volume, []byte, string) error) { writePulledBlock = orig }(writePulledBlock)
-       writePulledBlock = func(_ *RRVolumeManager, v Volume, content []byte, locator string) error {
-               if testData.putError {
-                       s.putError = errors.New("Error putting data")
-                       return s.putError
-               }
-               s.putContent = content
-               return nil
+       mounts := append([]*mount(nil), router.keepstore.mountsR...)
+       sort.Slice(mounts, func(i, j int) bool { return mounts[i].UUID < mounts[j].UUID })
+       var vols []*stubVolume
+       for _, mount := range mounts {
+               vols = append(vols, mount.volume.(*stubVolume))
        }
 
-       c.Check(getStatusItem(s.handler, "PullQueue", "InProgress"), Equals, float64(0))
-       c.Check(getStatusItem(s.handler, "PullQueue", "Queued"), Equals, float64(0))
-       c.Check(getStatusItem(s.handler, "Version"), Not(Equals), "")
-
-       response := IssueRequest(s.handler, &testData.req)
-       c.Assert(response.Code, Equals, testData.responseCode)
-       c.Assert(response.Body.String(), Equals, testData.responseBody)
+       ctx := authContext(arvadostest.ActiveTokenV2)
 
-       expectEqualWithin(c, time.Second, 0, func() interface{} {
-               st := pullq.Status()
-               return st.InProgress + st.Queued
-       })
+       locator := newRemoteBlock("pull available block to unspecified volume")
+       executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{remoteserver.URL}}})
+       _, err := router.keepstore.BlockRead(ctx, arvados.BlockReadOptions{
+               Locator: router.keepstore.signLocator(arvadostest.ActiveTokenV2, locator),
+               WriteTo: io.Discard})
+       c.Check(err, IsNil)
 
-       if testData.name == "TestPullWorkerPullList_with_two_items_latest_replacing_old" {
-               c.Assert(len(s.testPullLists), Equals, 2)
-               c.Assert(len(processedPullLists), Equals, 1)
-               c.Assert(s.testPullLists["Added_before_actual_test_item"], NotNil)
-               c.Assert(s.testPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
-               c.Assert(processedPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
-       } else {
-               if testData.responseCode == http.StatusOK {
-                       c.Assert(len(s.testPullLists), Equals, 1)
-                       c.Assert(len(processedPullLists), Equals, 1)
-                       c.Assert(s.testPullLists[testData.name], NotNil)
-               } else {
-                       c.Assert(len(s.testPullLists), Equals, 1)
-                       c.Assert(len(processedPullLists), Equals, 0)
-               }
-       }
-
-       if testData.readError {
-               c.Assert(s.readError, NotNil)
-       } else if testData.responseCode == http.StatusOK {
-               c.Assert(s.readError, IsNil)
-               c.Assert(s.readContent, Equals, testData.readContent)
-               if testData.putError {
-                       c.Assert(s.putError, NotNil)
-               } else {
-                       c.Assert(s.putError, IsNil)
-                       c.Assert(string(s.putContent), Equals, testData.readContent)
-               }
-       }
-
-       expectChannelEmpty(c, pullq.NextItem)
+       locator0 := newRemoteBlock("pull available block to specified volume 0")
+       locator1 := newRemoteBlock("pull available block to specified volume 1")
+       executePullList([]PullListItem{
+               {
+                       Locator:   locator0,
+                       Servers:   []string{remoteserver.URL},
+                       MountUUID: vols[0].params.UUID},
+               {
+                       Locator:   locator1,
+                       Servers:   []string{remoteserver.URL},
+                       MountUUID: vols[1].params.UUID}})
+       c.Check(vols[0].data[locator0[:32]].data, NotNil)
+       c.Check(vols[1].data[locator1[:32]].data, NotNil)
+
+       locator = fooHash + "+3"
+       logs := executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{remoteserver.URL}}})
+       c.Check(logs, Matches, ".*error pulling data from remote servers.*Block not found.*locator=acbd.*\n")
+
+       locator = fooHash + "+3"
+       logs = executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{"http://0.0.0.0:9/"}}})
+       c.Check(logs, Matches, ".*error pulling data from remote servers.*connection refused.*locator=acbd.*\n")
+
+       locator = newRemoteBlock("log error writing to local volume")
+       vols[0].blockWrite = func(context.Context, string, []byte) error { return errors.New("test error") }
+       vols[1].blockWrite = vols[0].blockWrite
+       logs = executePullList([]PullListItem{{
+               Locator: locator,
+               Servers: []string{remoteserver.URL}}})
+       c.Check(logs, Matches, ".*error writing data to zzzzz-nyw5e-.*error=\"test error\".*locator=.*\n")
+       vols[0].blockWrite = nil
+       vols[1].blockWrite = nil
+
+       locator = newRemoteBlock("log error when destination mount does not exist")
+       logs = executePullList([]PullListItem{{
+               Locator:   locator,
+               Servers:   []string{remoteserver.URL},
+               MountUUID: "bogus-mount-uuid"}})
+       c.Check(logs, Matches, ".*ignoring pull list entry for nonexistent mount bogus-mount-uuid.*locator=.*\n")
+
+       logs = executePullList([]PullListItem{})
+       c.Logf("%s", logs)
 }
diff --git a/services/keepstore/putprogress.go b/services/keepstore/putprogress.go
new file mode 100644 (file)
index 0000000..e02b2d0
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "github.com/sirupsen/logrus"
+)
+
+type putProgress struct {
+       classNeeded      map[string]bool
+       classTodo        map[string]bool
+       mountUsed        map[*mount]bool
+       totalReplication int
+       classDone        map[string]int
+}
+
+func (pr *putProgress) Add(mnt *mount) {
+       if pr.mountUsed[mnt] {
+               logrus.Warnf("BUG? superfluous extra write to mount %s", mnt.UUID)
+               return
+       }
+       pr.mountUsed[mnt] = true
+       pr.totalReplication += mnt.Replication
+       for class := range mnt.StorageClasses {
+               pr.classDone[class] += mnt.Replication
+               delete(pr.classTodo, class)
+       }
+}
+
+func (pr *putProgress) Sub(mnt *mount) {
+       if !pr.mountUsed[mnt] {
+               logrus.Warnf("BUG? Sub called with no prior matching Add: %s", mnt.UUID)
+               return
+       }
+       pr.mountUsed[mnt] = false
+       pr.totalReplication -= mnt.Replication
+       for class := range mnt.StorageClasses {
+               pr.classDone[class] -= mnt.Replication
+               if pr.classNeeded[class] {
+                       pr.classTodo[class] = true
+               }
+       }
+}
+
+func (pr *putProgress) Done() bool {
+       return len(pr.classTodo) == 0 && pr.totalReplication > 0
+}
+
+func (pr *putProgress) Want(mnt *mount) bool {
+       if pr.Done() || pr.mountUsed[mnt] {
+               return false
+       }
+       if len(pr.classTodo) == 0 {
+               // none specified == "any"
+               return true
+       }
+       for class := range mnt.StorageClasses {
+               if pr.classTodo[class] {
+                       return true
+               }
+       }
+       return false
+}
+
+func (pr *putProgress) Copy() *putProgress {
+       cp := putProgress{
+               classNeeded:      pr.classNeeded,
+               classTodo:        make(map[string]bool, len(pr.classTodo)),
+               classDone:        make(map[string]int, len(pr.classDone)),
+               mountUsed:        make(map[*mount]bool, len(pr.mountUsed)),
+               totalReplication: pr.totalReplication,
+       }
+       for k, v := range pr.classTodo {
+               cp.classTodo[k] = v
+       }
+       for k, v := range pr.classDone {
+               cp.classDone[k] = v
+       }
+       for k, v := range pr.mountUsed {
+               cp.mountUsed[k] = v
+       }
+       return &cp
+}
+
+func newPutProgress(classes []string) putProgress {
+       pr := putProgress{
+               classNeeded: make(map[string]bool, len(classes)),
+               classTodo:   make(map[string]bool, len(classes)),
+               classDone:   map[string]int{},
+               mountUsed:   map[*mount]bool{},
+       }
+       for _, c := range classes {
+               if c != "" {
+                       pr.classNeeded[c] = true
+                       pr.classTodo[c] = true
+               }
+       }
+       return pr
+}
diff --git a/services/keepstore/router.go b/services/keepstore/router.go
new file mode 100644 (file)
index 0000000..0c8182c
--- /dev/null
@@ -0,0 +1,276 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "net/http"
+       "os"
+       "strconv"
+       "strings"
+       "sync/atomic"
+
+       "git.arvados.org/arvados.git/lib/service"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/auth"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/gorilla/mux"
+)
+
+type router struct {
+       http.Handler
+       keepstore *keepstore
+       puller    *puller
+       trasher   *trasher
+}
+
+func newRouter(keepstore *keepstore, puller *puller, trasher *trasher) service.Handler {
+       rtr := &router{
+               keepstore: keepstore,
+               puller:    puller,
+               trasher:   trasher,
+       }
+       adminonly := func(h http.HandlerFunc) http.HandlerFunc {
+               return auth.RequireLiteralToken(keepstore.cluster.SystemRootToken, h).ServeHTTP
+       }
+
+       r := mux.NewRouter()
+       locatorPath := `/{locator:[0-9a-f]{32}.*}`
+       get := r.Methods(http.MethodGet, http.MethodHead).Subrouter()
+       get.HandleFunc(locatorPath, rtr.handleBlockRead)
+       get.HandleFunc(`/index`, adminonly(rtr.handleIndex))
+       get.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, adminonly(rtr.handleIndex))
+       get.HandleFunc(`/mounts`, adminonly(rtr.handleMounts))
+       get.HandleFunc(`/mounts/{uuid}/blocks`, adminonly(rtr.handleIndex))
+       get.HandleFunc(`/mounts/{uuid}/blocks/{prefix:[0-9a-f]{0,32}}`, adminonly(rtr.handleIndex))
+       put := r.Methods(http.MethodPut).Subrouter()
+       put.HandleFunc(locatorPath, rtr.handleBlockWrite)
+       put.HandleFunc(`/pull`, adminonly(rtr.handlePullList))
+       put.HandleFunc(`/trash`, adminonly(rtr.handleTrashList))
+       put.HandleFunc(`/untrash`+locatorPath, adminonly(rtr.handleUntrash))
+       touch := r.Methods("TOUCH").Subrouter()
+       touch.HandleFunc(locatorPath, adminonly(rtr.handleBlockTouch))
+       delete := r.Methods(http.MethodDelete).Subrouter()
+       delete.HandleFunc(locatorPath, adminonly(rtr.handleBlockTrash))
+       r.NotFoundHandler = http.HandlerFunc(rtr.handleBadRequest)
+       r.MethodNotAllowedHandler = http.HandlerFunc(rtr.handleBadRequest)
+       rtr.Handler = auth.LoadToken(r)
+       return rtr
+}
+
+func (rtr *router) CheckHealth() error {
+       return nil
+}
+
+func (rtr *router) Done() <-chan struct{} {
+       return nil
+}
+
+func (rtr *router) handleBlockRead(w http.ResponseWriter, req *http.Request) {
+       // Intervening proxies must not return a cached GET response
+       // to a prior request if a X-Keep-Signature request header has
+       // been added or changed.
+       w.Header().Add("Vary", "X-Keep-Signature")
+       var localLocator func(string)
+       if strings.SplitN(req.Header.Get("X-Keep-Signature"), ",", 2)[0] == "local" {
+               localLocator = func(locator string) {
+                       w.Header().Set("X-Keep-Locator", locator)
+               }
+       }
+       out := w
+       if req.Method == http.MethodHead {
+               out = discardWrite{ResponseWriter: w}
+       } else if li, err := getLocatorInfo(mux.Vars(req)["locator"]); err != nil {
+               rtr.handleError(w, req, err)
+               return
+       } else if li.size == 0 && li.hash != "d41d8cd98f00b204e9800998ecf8427e" {
+               // GET {hash} (with no size hint) is not allowed
+               // because we can't report md5 mismatches.
+               rtr.handleError(w, req, errMethodNotAllowed)
+               return
+       }
+       n, err := rtr.keepstore.BlockRead(req.Context(), arvados.BlockReadOptions{
+               Locator:      mux.Vars(req)["locator"],
+               WriteTo:      out,
+               LocalLocator: localLocator,
+       })
+       if err != nil && (n == 0 || req.Method == http.MethodHead) {
+               rtr.handleError(w, req, err)
+               return
+       }
+}
+
+func (rtr *router) handleBlockWrite(w http.ResponseWriter, req *http.Request) {
+       dataSize, _ := strconv.Atoi(req.Header.Get("Content-Length"))
+       replicas, _ := strconv.Atoi(req.Header.Get("X-Arvados-Replicas-Desired"))
+       resp, err := rtr.keepstore.BlockWrite(req.Context(), arvados.BlockWriteOptions{
+               Hash:           mux.Vars(req)["locator"],
+               Reader:         req.Body,
+               DataSize:       dataSize,
+               RequestID:      req.Header.Get("X-Request-Id"),
+               StorageClasses: trimSplit(req.Header.Get("X-Keep-Storage-Classes"), ","),
+               Replicas:       replicas,
+       })
+       if err != nil {
+               rtr.handleError(w, req, err)
+               return
+       }
+       w.Header().Set("X-Keep-Replicas-Stored", fmt.Sprintf("%d", resp.Replicas))
+       scc := ""
+       for k, n := range resp.StorageClasses {
+               if n > 0 {
+                       if scc != "" {
+                               scc += "; "
+                       }
+                       scc += fmt.Sprintf("%s=%d", k, n)
+               }
+       }
+       w.Header().Set("X-Keep-Storage-Classes-Confirmed", scc)
+       w.WriteHeader(http.StatusOK)
+       fmt.Fprintln(w, resp.Locator)
+}
+
+func (rtr *router) handleBlockTouch(w http.ResponseWriter, req *http.Request) {
+       err := rtr.keepstore.BlockTouch(req.Context(), mux.Vars(req)["locator"])
+       rtr.handleError(w, req, err)
+}
+
+func (rtr *router) handleBlockTrash(w http.ResponseWriter, req *http.Request) {
+       err := rtr.keepstore.BlockTrash(req.Context(), mux.Vars(req)["locator"])
+       rtr.handleError(w, req, err)
+}
+
+func (rtr *router) handleMounts(w http.ResponseWriter, req *http.Request) {
+       json.NewEncoder(w).Encode(rtr.keepstore.Mounts())
+}
+
+func (rtr *router) handleIndex(w http.ResponseWriter, req *http.Request) {
+       prefix := req.FormValue("prefix")
+       if prefix == "" {
+               prefix = mux.Vars(req)["prefix"]
+       }
+       cw := &countingWriter{writer: w}
+       err := rtr.keepstore.Index(req.Context(), indexOptions{
+               MountUUID: mux.Vars(req)["uuid"],
+               Prefix:    prefix,
+               WriteTo:   cw,
+       })
+       if err != nil && cw.n.Load() == 0 {
+               // Nothing was written, so it's not too late to report
+               // an error via http response header. (Otherwise, all
+               // we can do is omit the trailing newline below to
+               // indicate something went wrong.)
+               rtr.handleError(w, req, err)
+               return
+       }
+       if err == nil {
+               // A trailing blank line signals to the caller that
+               // the response is complete.
+               w.Write([]byte("\n"))
+       }
+}
+
+func (rtr *router) handlePullList(w http.ResponseWriter, req *http.Request) {
+       var pl []PullListItem
+       err := json.NewDecoder(req.Body).Decode(&pl)
+       if err != nil {
+               rtr.handleError(w, req, err)
+               return
+       }
+       req.Body.Close()
+       if len(pl) > 0 && len(pl[0].Locator) == 32 {
+               rtr.handleError(w, req, httpserver.ErrorWithStatus(errors.New("rejecting pull list containing a locator without a size hint -- this probably means keep-balance needs to be upgraded"), http.StatusBadRequest))
+               return
+       }
+       rtr.puller.SetPullList(pl)
+}
+
+func (rtr *router) handleTrashList(w http.ResponseWriter, req *http.Request) {
+       var tl []TrashListItem
+       err := json.NewDecoder(req.Body).Decode(&tl)
+       if err != nil {
+               rtr.handleError(w, req, err)
+               return
+       }
+       req.Body.Close()
+       rtr.trasher.SetTrashList(tl)
+}
+
+func (rtr *router) handleUntrash(w http.ResponseWriter, req *http.Request) {
+       err := rtr.keepstore.BlockUntrash(req.Context(), mux.Vars(req)["locator"])
+       rtr.handleError(w, req, err)
+}
+
+func (rtr *router) handleBadRequest(w http.ResponseWriter, req *http.Request) {
+       http.Error(w, "Bad Request", http.StatusBadRequest)
+}
+
+func (rtr *router) handleError(w http.ResponseWriter, req *http.Request, err error) {
+       if req.Context().Err() != nil {
+               w.WriteHeader(499)
+               return
+       }
+       if err == nil {
+               return
+       } else if os.IsNotExist(err) {
+               w.WriteHeader(http.StatusNotFound)
+       } else if statusErr := interface{ HTTPStatus() int }(nil); errors.As(err, &statusErr) {
+               w.WriteHeader(statusErr.HTTPStatus())
+       } else {
+               w.WriteHeader(http.StatusInternalServerError)
+       }
+       fmt.Fprintln(w, err.Error())
+}
+
+type countingWriter struct {
+       writer io.Writer
+       n      atomic.Int64
+}
+
+func (cw *countingWriter) Write(p []byte) (int, error) {
+       n, err := cw.writer.Write(p)
+       cw.n.Add(int64(n))
+       return n, err
+}
+
+// Split s by sep, trim whitespace from each part, and drop empty
+// parts.
+func trimSplit(s, sep string) []string {
+       var r []string
+       for _, part := range strings.Split(s, sep) {
+               part = strings.TrimSpace(part)
+               if part != "" {
+                       r = append(r, part)
+               }
+       }
+       return r
+}
+
+// setSizeOnWrite sets the Content-Length header to the given size on
+// first write.
+type setSizeOnWrite struct {
+       http.ResponseWriter
+       size  int
+       wrote bool
+}
+
+func (ss *setSizeOnWrite) Write(p []byte) (int, error) {
+       if !ss.wrote {
+               ss.Header().Set("Content-Length", fmt.Sprintf("%d", ss.size))
+               ss.wrote = true
+       }
+       return ss.ResponseWriter.Write(p)
+}
+
+type discardWrite struct {
+       http.ResponseWriter
+}
+
+func (discardWrite) Write(p []byte) (int, error) {
+       return len(p), nil
+}
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
new file mode 100644 (file)
index 0000000..ee7be47
--- /dev/null
@@ -0,0 +1,510 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "bytes"
+       "context"
+       "crypto/md5"
+       "errors"
+       "fmt"
+       "io"
+       "net/http"
+       "net/http/httptest"
+       "os"
+       "sort"
+       "strings"
+       "time"
+
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       "git.arvados.org/arvados.git/sdk/go/arvadostest"
+       "git.arvados.org/arvados.git/sdk/go/httpserver"
+       "github.com/prometheus/client_golang/prometheus"
+       . "gopkg.in/check.v1"
+)
+
+// routerSuite tests that the router correctly translates HTTP
+// requests to the appropriate keepstore functionality, and translates
+// the results to HTTP responses.
+type routerSuite struct {
+       cluster *arvados.Cluster
+}
+
+var _ = Suite(&routerSuite{})
+
+func testRouter(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*router, context.CancelFunc) {
+       if reg == nil {
+               reg = prometheus.NewRegistry()
+       }
+       ctx, cancel := context.WithCancel(context.Background())
+       ks, kcancel := testKeepstore(t, cluster, reg)
+       go func() {
+               <-ctx.Done()
+               kcancel()
+       }()
+       puller := newPuller(ctx, ks, reg)
+       trasher := newTrasher(ctx, ks, reg)
+       return newRouter(ks, puller, trasher).(*router), cancel
+}
+
+func (s *routerSuite) SetUpTest(c *C) {
+       s.cluster = testCluster(c)
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"testclass1": true}},
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"testclass2": true}},
+       }
+       s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+               "testclass1": arvados.StorageClassConfig{
+                       Default: true,
+               },
+               "testclass2": arvados.StorageClassConfig{
+                       Default: true,
+               },
+       }
+}
+
+func (s *routerSuite) TestBlockRead_Token(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       err := router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+       locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+       c.Assert(locSigned, Not(Equals), fooHash+"+3")
+
+       // No token provided
+       resp := call(router, "GET", "http://example/"+locSigned, "", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusUnauthorized)
+       c.Check(resp.Body.String(), Matches, "no token provided in Authorization header\n")
+
+       // Different token => invalid signature
+       resp = call(router, "GET", "http://example/"+locSigned, "badtoken", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusBadRequest)
+       c.Check(resp.Body.String(), Equals, "invalid signature\n")
+
+       // Correct token
+       resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Equals, "foo")
+
+       // HEAD
+       resp = call(router, "HEAD", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Result().ContentLength, Equals, int64(3))
+       c.Check(resp.Body.String(), Equals, "")
+}
+
+// As a special case we allow HEAD requests that only provide a hash
+// without a size hint. This accommodates uses of keep-block-check
+// where it's inconvenient to attach size hints to known hashes.
+//
+// GET requests must provide a size hint -- otherwise we can't
+// propagate a checksum mismatch error.
+func (s *routerSuite) TestBlockRead_NoSizeHint(c *C) {
+       s.cluster.Collections.BlobSigning = true
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+       err := router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+
+       // hash+signature
+       hashSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash)
+       resp := call(router, "GET", "http://example/"+hashSigned, arvadostest.ActiveTokenV2, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusMethodNotAllowed)
+
+       resp = call(router, "HEAD", "http://example/"+fooHash, "", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusUnauthorized)
+       resp = call(router, "HEAD", "http://example/"+fooHash+"+3", "", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusUnauthorized)
+
+       s.cluster.Collections.BlobSigning = false
+       router, cancel = testRouter(c, s.cluster, nil)
+       defer cancel()
+       err = router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+
+       resp = call(router, "GET", "http://example/"+fooHash, "", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusMethodNotAllowed)
+
+       resp = call(router, "HEAD", "http://example/"+fooHash, "", nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Equals, "")
+       c.Check(resp.Result().ContentLength, Equals, int64(3))
+       c.Check(resp.Header().Get("Content-Length"), Equals, "3")
+}
+
+// By the time we discover the checksum mismatch, it's too late to
+// change the response code, but the expected block size is given in
+// the Content-Length response header, so a generic http client can
+// detect the problem.
+func (s *routerSuite) TestBlockRead_ChecksumMismatch(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       gooddata := make([]byte, 10_000_000)
+       gooddata[0] = 'a'
+       hash := fmt.Sprintf("%x", md5.Sum(gooddata))
+       locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fmt.Sprintf("%s+%d", hash, len(gooddata)))
+
+       for _, baddata := range [][]byte{
+               make([]byte, 3),
+               make([]byte, len(gooddata)),
+               make([]byte, len(gooddata)-1),
+               make([]byte, len(gooddata)+1),
+               make([]byte, len(gooddata)*2),
+       } {
+               c.Logf("=== baddata len %d", len(baddata))
+               err := router.keepstore.mountsW[0].BlockWrite(context.Background(), hash, baddata)
+               c.Assert(err, IsNil)
+
+               resp := call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+               if !c.Check(resp.Code, Equals, http.StatusOK) {
+                       c.Logf("resp.Body: %s", resp.Body.String())
+               }
+               c.Check(resp.Body.Len(), Not(Equals), len(gooddata))
+               c.Check(resp.Result().ContentLength, Equals, int64(len(gooddata)))
+
+               resp = call(router, "HEAD", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+               c.Check(resp.Code, Equals, http.StatusBadGateway)
+
+               hashSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, hash)
+               resp = call(router, "HEAD", "http://example/"+hashSigned, arvadostest.ActiveTokenV2, nil, nil)
+               c.Check(resp.Code, Equals, http.StatusBadGateway)
+       }
+}
+
+func (s *routerSuite) TestBlockWrite(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       resp := call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       locator := strings.TrimSpace(resp.Body.String())
+
+       resp = call(router, "GET", "http://example/"+locator, arvadostest.ActiveTokenV2, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Equals, "foo")
+}
+
+func (s *routerSuite) TestBlockWrite_Headers(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       resp := call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Arvados-Replicas-Desired": []string{"2"}})
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+       c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), Equals, "testclass1=1")
+
+       resp = call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Keep-Storage-Classes": []string{"testclass1"}})
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+       c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1")
+
+       resp = call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Keep-Storage-Classes": []string{" , testclass2 , "}})
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+       c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass2=1")
+}
+
+func sortCommaSeparated(s string) string {
+       slice := strings.Split(s, ", ")
+       sort.Strings(slice)
+       return strings.Join(slice, ", ")
+}
+
+func (s *routerSuite) TestBlockTouch(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       resp := call(router, "TOUCH", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusNotFound)
+
+       vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+       err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+       vol1 := router.keepstore.mountsW[1].volume.(*stubVolume)
+       err = vol1.BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+
+       t1 := time.Now()
+       resp = call(router, "TOUCH", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       t2 := time.Now()
+
+       // Unauthorized request is a no-op
+       resp = call(router, "TOUCH", "http://example/"+fooHash+"+3", arvadostest.ActiveTokenV2, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusForbidden)
+
+       // Volume 0 mtime should be updated
+       t, err := vol0.Mtime(fooHash)
+       c.Check(err, IsNil)
+       c.Check(t.After(t1), Equals, true)
+       c.Check(t.Before(t2), Equals, true)
+
+       // Volume 1 mtime should not be updated
+       t, err = vol1.Mtime(fooHash)
+       c.Check(err, IsNil)
+       c.Check(t.Before(t1), Equals, true)
+
+       err = vol0.BlockTrash(fooHash)
+       c.Assert(err, IsNil)
+       err = vol1.BlockTrash(fooHash)
+       c.Assert(err, IsNil)
+       resp = call(router, "TOUCH", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusNotFound)
+}
+
+func (s *routerSuite) TestBlockTrash(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+       err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+       err = vol0.blockTouchWithTime(fooHash, time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration()))
+       c.Assert(err, IsNil)
+       resp := call(router, "DELETE", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(vol0.stubLog.String(), Matches, `(?ms).* trash .*`)
+       err = vol0.BlockRead(context.Background(), fooHash, brdiscard)
+       c.Assert(err, Equals, os.ErrNotExist)
+}
+
+func (s *routerSuite) TestBlockUntrash(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+       err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+       err = vol0.BlockTrash(fooHash)
+       c.Assert(err, IsNil)
+       err = vol0.BlockRead(context.Background(), fooHash, brdiscard)
+       c.Assert(err, Equals, os.ErrNotExist)
+       resp := call(router, "PUT", "http://example/untrash/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(vol0.stubLog.String(), Matches, `(?ms).* untrash .*`)
+       err = vol0.BlockRead(context.Background(), fooHash, brdiscard)
+       c.Check(err, IsNil)
+}
+
+func (s *routerSuite) TestBadRequest(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       for _, trial := range []string{
+               "GET /",
+               "GET /xyz",
+               "GET /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabcdefg",
+               "GET /untrash",
+               "GET /mounts/blocks/123",
+               "GET /trash",
+               "GET /pull",
+               "GET /debug.json",  // old endpoint, no longer exists
+               "GET /status.json", // old endpoint, no longer exists
+               "POST /",
+               "POST /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+               "POST /trash",
+               "PROPFIND /",
+               "MAKE-COFFEE /",
+       } {
+               c.Logf("=== %s", trial)
+               methodpath := strings.Split(trial, " ")
+               req := httptest.NewRequest(methodpath[0], "http://example"+methodpath[1], nil)
+               resp := httptest.NewRecorder()
+               router.ServeHTTP(resp, req)
+               c.Check(resp.Code, Equals, http.StatusBadRequest)
+       }
+}
+
+func (s *routerSuite) TestRequireAdminMgtToken(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       for _, token := range []string{"badtoken", ""} {
+               for _, trial := range []string{
+                       "PUT /pull",
+                       "PUT /trash",
+                       "GET /index",
+                       "GET /index/",
+                       "GET /index/1234",
+                       "PUT /untrash/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+               } {
+                       c.Logf("=== %s", trial)
+                       methodpath := strings.Split(trial, " ")
+                       req := httptest.NewRequest(methodpath[0], "http://example"+methodpath[1], nil)
+                       if token != "" {
+                               req.Header.Set("Authorization", "Bearer "+token)
+                       }
+                       resp := httptest.NewRecorder()
+                       router.ServeHTTP(resp, req)
+                       if token == "" {
+                               c.Check(resp.Code, Equals, http.StatusUnauthorized)
+                       } else {
+                               c.Check(resp.Code, Equals, http.StatusForbidden)
+                       }
+               }
+       }
+       req := httptest.NewRequest("TOUCH", "http://example/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", nil)
+       resp := httptest.NewRecorder()
+       router.ServeHTTP(resp, req)
+       c.Check(resp.Code, Equals, http.StatusUnauthorized)
+}
+
+func (s *routerSuite) TestVolumeErrorStatusCode(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+       router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(_ context.Context, hash string, w io.WriterAt) error {
+               return httpserver.ErrorWithStatus(errors.New("test error"), http.StatusBadGateway)
+       }
+
+       // To test whether we fall back to volume 1 after volume 0
+       // returns an error, we need to use a block whose rendezvous
+       // order has volume 0 first. Luckily "bar" is such a block.
+       c.Assert(router.keepstore.rendezvous(barHash, router.keepstore.mountsR)[0].UUID, DeepEquals, router.keepstore.mountsR[0].UUID)
+
+       locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, barHash+"+3")
+
+       // Volume 0 fails with an error that specifies an HTTP status
+       // code, so that code should be propagated to caller.
+       resp := call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusBadGateway)
+       c.Check(resp.Body.String(), Equals, "test error\n")
+
+       c.Assert(router.keepstore.mountsW[1].volume.BlockWrite(context.Background(), barHash, []byte("bar")), IsNil)
+
+       // If the requested block is available on the second volume,
+       // it doesn't matter that the first volume failed.
+       resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Equals, "bar")
+}
+
+func (s *routerSuite) TestIndex(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       resp := call(router, "GET", "http://example/index", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Equals, "\n")
+
+       resp = call(router, "GET", "http://example/index?prefix=fff", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(resp.Body.String(), Equals, "\n")
+
+       t0 := time.Now().Add(-time.Hour)
+       vol0 := router.keepstore.mounts["zzzzz-nyw5e-000000000000000"].volume.(*stubVolume)
+       err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+       c.Assert(err, IsNil)
+       err = vol0.blockTouchWithTime(fooHash, t0)
+       c.Assert(err, IsNil)
+       err = vol0.BlockWrite(context.Background(), barHash, []byte("bar"))
+       c.Assert(err, IsNil)
+       err = vol0.blockTouchWithTime(barHash, t0)
+       c.Assert(err, IsNil)
+       t1 := time.Now().Add(-time.Minute)
+       vol1 := router.keepstore.mounts["zzzzz-nyw5e-111111111111111"].volume.(*stubVolume)
+       err = vol1.BlockWrite(context.Background(), barHash, []byte("bar"))
+       c.Assert(err, IsNil)
+       err = vol1.blockTouchWithTime(barHash, t1)
+       c.Assert(err, IsNil)
+
+       for _, path := range []string{
+               "/index?prefix=acb",
+               "/index/acb",
+               "/index/?prefix=acb",
+               "/mounts/zzzzz-nyw5e-000000000000000/blocks?prefix=acb",
+               "/mounts/zzzzz-nyw5e-000000000000000/blocks/?prefix=acb",
+               "/mounts/zzzzz-nyw5e-000000000000000/blocks/acb",
+       } {
+               c.Logf("=== %s", path)
+               resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+               c.Check(resp.Code, Equals, http.StatusOK)
+               c.Check(resp.Body.String(), Equals, fooHash+"+3 "+fmt.Sprintf("%d", t0.UnixNano())+"\n\n")
+       }
+
+       for _, path := range []string{
+               "/index?prefix=37",
+               "/index/37",
+               "/index/?prefix=37",
+       } {
+               c.Logf("=== %s", path)
+               resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+               c.Check(resp.Code, Equals, http.StatusOK)
+               c.Check(resp.Body.String(), Equals, ""+
+                       barHash+"+3 "+fmt.Sprintf("%d", t0.UnixNano())+"\n"+
+                       barHash+"+3 "+fmt.Sprintf("%d", t1.UnixNano())+"\n\n")
+       }
+
+       for _, path := range []string{
+               "/mounts/zzzzz-nyw5e-111111111111111/blocks",
+               "/mounts/zzzzz-nyw5e-111111111111111/blocks/",
+               "/mounts/zzzzz-nyw5e-111111111111111/blocks?prefix=37",
+               "/mounts/zzzzz-nyw5e-111111111111111/blocks/?prefix=37",
+               "/mounts/zzzzz-nyw5e-111111111111111/blocks/37",
+       } {
+               c.Logf("=== %s", path)
+               resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+               c.Check(resp.Code, Equals, http.StatusOK)
+               c.Check(resp.Body.String(), Equals, barHash+"+3 "+fmt.Sprintf("%d", t1.UnixNano())+"\n\n")
+       }
+
+       for _, path := range []string{
+               "/index",
+               "/index?prefix=",
+               "/index/",
+               "/index/?prefix=",
+       } {
+               c.Logf("=== %s", path)
+               resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+               c.Check(resp.Code, Equals, http.StatusOK)
+               c.Check(strings.Split(resp.Body.String(), "\n"), HasLen, 5)
+       }
+
+}
+
+// Check that the context passed to a volume method gets cancelled
+// when the http client hangs up.
+func (s *routerSuite) TestCancelOnDisconnect(c *C) {
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       unblock := make(chan struct{})
+       router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(ctx context.Context, hash string, w io.WriterAt) error {
+               <-unblock
+               c.Check(ctx.Err(), NotNil)
+               return ctx.Err()
+       }
+       go func() {
+               time.Sleep(time.Second / 10)
+               cancel()
+               close(unblock)
+       }()
+       locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+       ctx, cancel := context.WithCancel(context.Background())
+       defer cancel()
+       req, err := http.NewRequestWithContext(ctx, "GET", "http://example/"+locSigned, nil)
+       c.Assert(err, IsNil)
+       req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+       resp := httptest.NewRecorder()
+       router.ServeHTTP(resp, req)
+       c.Check(resp.Code, Equals, 499)
+}
+
+func call(handler http.Handler, method, path, tok string, body []byte, hdr http.Header) *httptest.ResponseRecorder {
+       resp := httptest.NewRecorder()
+       req, err := http.NewRequest(method, path, bytes.NewReader(body))
+       if err != nil {
+               panic(err)
+       }
+       for k := range hdr {
+               req.Header.Set(k, hdr.Get(k))
+       }
+       if tok != "" {
+               req.Header.Set("Authorization", "Bearer "+tok)
+       }
+       handler.ServeHTTP(resp, req)
+       return resp
+}
similarity index 75%
rename from services/keepstore/s3aws_volume.go
rename to services/keepstore/s3_volume.go
index 18b30f463806f996639579689987502f354d411a..dc857c32646b2aced992243122b94750607cf4e8 100644 (file)
@@ -34,55 +34,53 @@ import (
 )
 
 func init() {
-       driver["S3"] = newS3AWSVolume
+       driver["S3"] = news3Volume
 }
 
 const (
-       s3DefaultReadTimeout    = arvados.Duration(10 * time.Minute)
-       s3DefaultConnectTimeout = arvados.Duration(time.Minute)
-       maxClockSkew            = 600 * time.Second
-       nearlyRFC1123           = "Mon, 2 Jan 2006 15:04:05 GMT"
+       s3DefaultReadTimeout        = arvados.Duration(10 * time.Minute)
+       s3DefaultConnectTimeout     = arvados.Duration(time.Minute)
+       maxClockSkew                = 600 * time.Second
+       nearlyRFC1123               = "Mon, 2 Jan 2006 15:04:05 GMT"
+       s3downloaderPartSize        = 6 * 1024 * 1024
+       s3downloaderReadConcurrency = 11
+       s3uploaderPartSize          = 5 * 1024 * 1024
+       s3uploaderWriteConcurrency  = 5
 )
 
 var (
-       ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
+       errS3TrashDisabled   = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
+       s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
+       s3AWSZeroTime        time.Time
 )
 
-// S3AWSVolume implements Volume using an S3 bucket.
-type S3AWSVolume struct {
+// s3Volume implements Volume using an S3 bucket.
+type s3Volume struct {
        arvados.S3VolumeDriverParameters
        AuthToken      string    // populated automatically when IAMRole is used
        AuthExpiration time.Time // populated automatically when IAMRole is used
 
-       cluster   *arvados.Cluster
-       volume    arvados.Volume
-       logger    logrus.FieldLogger
-       metrics   *volumeMetricsVecs
-       bucket    *s3AWSbucket
-       region    string
-       startOnce sync.Once
+       cluster    *arvados.Cluster
+       volume     arvados.Volume
+       logger     logrus.FieldLogger
+       metrics    *volumeMetricsVecs
+       bufferPool *bufferPool
+       bucket     *s3Bucket
+       region     string
+       startOnce  sync.Once
 }
 
 // s3bucket wraps s3.bucket and counts I/O and API usage stats. The
 // wrapped bucket can be replaced atomically with SetBucket in order
 // to update credentials.
-type s3AWSbucket struct {
+type s3Bucket struct {
        bucket string
        svc    *s3.Client
        stats  s3awsbucketStats
        mu     sync.Mutex
 }
 
-const (
-       PartSize         = 5 * 1024 * 1024
-       ReadConcurrency  = 13
-       WriteConcurrency = 5
-)
-
-var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
-var s3AWSZeroTime time.Time
-
-func (v *S3AWSVolume) isKeepBlock(s string) (string, bool) {
+func (v *s3Volume) isKeepBlock(s string) (string, bool) {
        if v.PrefixLength > 0 && len(s) == v.PrefixLength+33 && s[:v.PrefixLength] == s[v.PrefixLength+1:v.PrefixLength*2+1] {
                s = s[v.PrefixLength+1:]
        }
@@ -92,7 +90,7 @@ func (v *S3AWSVolume) isKeepBlock(s string) (string, bool) {
 // Return the key used for a given loc. If PrefixLength==0 then
 // key("abcdef0123") is "abcdef0123", if PrefixLength==3 then key is
 // "abc/abcdef0123", etc.
-func (v *S3AWSVolume) key(loc string) string {
+func (v *s3Volume) key(loc string) string {
        if v.PrefixLength > 0 && v.PrefixLength < len(loc)-1 {
                return loc[:v.PrefixLength] + "/" + loc
        } else {
@@ -100,17 +98,22 @@ func (v *S3AWSVolume) key(loc string) string {
        }
 }
 
-func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
-       v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
-       err := json.Unmarshal(volume.DriverParameters, v)
+func news3Volume(params newVolumeParams) (volume, error) {
+       v := &s3Volume{
+               cluster:    params.Cluster,
+               volume:     params.ConfigVolume,
+               metrics:    params.MetricsVecs,
+               bufferPool: params.BufferPool,
+       }
+       err := json.Unmarshal(params.ConfigVolume.DriverParameters, v)
        if err != nil {
                return nil, err
        }
-       v.logger = logger.WithField("Volume", v.String())
+       v.logger = params.Logger.WithField("Volume", v.DeviceID())
        return v, v.check("")
 }
 
-func (v *S3AWSVolume) translateError(err error) error {
+func (v *s3Volume) translateError(err error) error {
        if _, ok := err.(*aws.RequestCanceledError); ok {
                return context.Canceled
        } else if aerr, ok := err.(awserr.Error); ok {
@@ -129,7 +132,7 @@ func (v *S3AWSVolume) translateError(err error) error {
 //
 // (If something goes wrong during the copy, the error will be
 // embedded in the 200 OK response)
-func (v *S3AWSVolume) safeCopy(dst, src string) error {
+func (v *s3Volume) safeCopy(dst, src string) error {
        input := &s3.CopyObjectInput{
                Bucket:      aws.String(v.bucket.bucket),
                ContentType: aws.String("application/octet-stream"),
@@ -155,7 +158,7 @@ func (v *S3AWSVolume) safeCopy(dst, src string) error {
        return nil
 }
 
-func (v *S3AWSVolume) check(ec2metadataHostname string) error {
+func (v *s3Volume) check(ec2metadataHostname string) error {
        if v.Bucket == "" {
                return errors.New("DriverParameters: Bucket must be provided")
        }
@@ -219,82 +222,26 @@ func (v *S3AWSVolume) check(ec2metadataHostname string) error {
 
        cfg.Credentials = creds
 
-       v.bucket = &s3AWSbucket{
+       v.bucket = &s3Bucket{
                bucket: v.Bucket,
                svc:    s3.New(cfg),
        }
 
        // Set up prometheus metrics
-       lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
+       lbls := prometheus.Labels{"device_id": v.DeviceID()}
        v.bucket.stats.opsCounters, v.bucket.stats.errCounters, v.bucket.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
 
        return nil
 }
 
-// String implements fmt.Stringer.
-func (v *S3AWSVolume) String() string {
-       return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
-}
-
-// GetDeviceID returns a globally unique ID for the storage bucket.
-func (v *S3AWSVolume) GetDeviceID() string {
+// DeviceID returns a globally unique ID for the storage bucket.
+func (v *s3Volume) DeviceID() string {
        return "s3://" + v.Endpoint + "/" + v.Bucket
 }
 
-// Compare the given data with the stored data.
-func (v *S3AWSVolume) Compare(ctx context.Context, loc string, expect []byte) error {
-       key := v.key(loc)
-       errChan := make(chan error, 1)
-       go func() {
-               _, err := v.head("recent/" + key)
-               errChan <- err
-       }()
-       var err error
-       select {
-       case <-ctx.Done():
-               return ctx.Err()
-       case err = <-errChan:
-       }
-       if err != nil {
-               // Checking for the key itself here would interfere
-               // with future GET requests.
-               //
-               // On AWS, if X doesn't exist, a HEAD or GET request
-               // for X causes X's non-existence to be cached. Thus,
-               // if we test for X, then create X and return a
-               // signature to our client, the client might still get
-               // 404 from all keepstores when trying to read it.
-               //
-               // To avoid this, we avoid doing HEAD X or GET X until
-               // we know X has been written.
-               //
-               // Note that X might exist even though recent/X
-               // doesn't: for example, the response to HEAD recent/X
-               // might itself come from a stale cache. In such
-               // cases, we will return a false negative and
-               // PutHandler might needlessly create another replica
-               // on a different volume. That's not ideal, but it's
-               // better than passing the eventually-consistent
-               // problem on to our clients.
-               return v.translateError(err)
-       }
-
-       input := &s3.GetObjectInput{
-               Bucket: aws.String(v.bucket.bucket),
-               Key:    aws.String(key),
-       }
-
-       req := v.bucket.svc.GetObjectRequest(input)
-       result, err := req.Send(ctx)
-       if err != nil {
-               return v.translateError(err)
-       }
-       return v.translateError(compareReaderWithBuf(ctx, result.Body, expect, loc[:32]))
-}
-
 // EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
-func (v *S3AWSVolume) EmptyTrash() {
+func (v *s3Volume) EmptyTrash() {
        var bytesInTrash, blocksInTrash, bytesDeleted, blocksDeleted int64
 
        // Define "ready to delete" as "...when EmptyTrash started".
@@ -313,7 +260,7 @@ func (v *S3AWSVolume) EmptyTrash() {
                recent, err := v.head("recent/" + key)
                if err != nil && os.IsNotExist(v.translateError(err)) {
                        v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", *trash.Key, "recent/"+key, err)
-                       err = v.Untrash(loc)
+                       err = v.BlockUntrash(loc)
                        if err != nil {
                                v.logger.WithError(err).Errorf("EmptyTrash: Untrash(%q) failed", loc)
                        }
@@ -334,7 +281,7 @@ func (v *S3AWSVolume) EmptyTrash() {
                                // necessary to avoid starvation.
                                v.logger.Infof("EmptyTrash: detected old race for %q, calling fixRace + Touch", loc)
                                v.fixRace(key)
-                               v.Touch(loc)
+                               v.BlockTouch(loc)
                                return
                        }
                        _, err := v.head(key)
@@ -401,14 +348,14 @@ func (v *S3AWSVolume) EmptyTrash() {
        if err := trashL.Error(); err != nil {
                v.logger.WithError(err).Error("EmptyTrash: lister failed")
        }
-       v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+       v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
 // fixRace(X) is called when "recent/X" exists but "X" doesn't
 // exist. If the timestamps on "recent/X" and "trash/X" indicate there
 // was a race between Put and Trash, fixRace recovers from the race by
 // Untrashing the block.
-func (v *S3AWSVolume) fixRace(key string) bool {
+func (v *s3Volume) fixRace(key string) bool {
        trash, err := v.head("trash/" + key)
        if err != nil {
                if !os.IsNotExist(v.translateError(err)) {
@@ -442,7 +389,7 @@ func (v *S3AWSVolume) fixRace(key string) bool {
        return true
 }
 
-func (v *S3AWSVolume) head(key string) (result *s3.HeadObjectOutput, err error) {
+func (v *s3Volume) head(key string) (result *s3.HeadObjectOutput, err error) {
        input := &s3.HeadObjectInput{
                Bucket: aws.String(v.bucket.bucket),
                Key:    aws.String(key),
@@ -462,55 +409,45 @@ func (v *S3AWSVolume) head(key string) (result *s3.HeadObjectOutput, err error)
        return
 }
 
-// Get a block: copy the block data into buf, and return the number of
-// bytes copied.
-func (v *S3AWSVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
-       // Do not use getWithPipe here: the BlockReader interface does not pass
-       // through 'buf []byte', and we don't want to allocate two buffers for each
-       // read request. Instead, use a version of ReadBlock that accepts 'buf []byte'
-       // as an input.
-       key := v.key(loc)
-       count, err := v.readWorker(ctx, key, buf)
-       if err == nil {
-               return count, err
-       }
-
-       err = v.translateError(err)
-       if !os.IsNotExist(err) {
-               return 0, err
-       }
-
-       _, err = v.head("recent/" + key)
-       err = v.translateError(err)
+// BlockRead reads a Keep block that has been stored as a block blob
+// in the S3 bucket.
+func (v *s3Volume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+       key := v.key(hash)
+       err := v.readWorker(ctx, key, w)
        if err != nil {
-               // If we can't read recent/X, there's no point in
-               // trying fixRace. Give up.
-               return 0, err
-       }
-       if !v.fixRace(key) {
-               err = os.ErrNotExist
-               return 0, err
-       }
+               err = v.translateError(err)
+               if !os.IsNotExist(err) {
+                       return err
+               }
 
-       count, err = v.readWorker(ctx, key, buf)
-       if err != nil {
-               v.logger.Warnf("reading %s after successful fixRace: %s", loc, err)
+               _, err = v.head("recent/" + key)
                err = v.translateError(err)
-               return 0, err
+               if err != nil {
+                       // If we can't read recent/X, there's no point in
+                       // trying fixRace. Give up.
+                       return err
+               }
+               if !v.fixRace(key) {
+                       err = os.ErrNotExist
+                       return err
+               }
+
+               err = v.readWorker(ctx, key, w)
+               if err != nil {
+                       v.logger.Warnf("reading %s after successful fixRace: %s", hash, err)
+                       err = v.translateError(err)
+                       return err
+               }
        }
-       return count, err
+       return nil
 }
 
-func (v *S3AWSVolume) readWorker(ctx context.Context, key string, buf []byte) (int, error) {
-       awsBuf := aws.NewWriteAtBuffer(buf)
+func (v *s3Volume) readWorker(ctx context.Context, key string, dst io.WriterAt) error {
        downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
-               u.PartSize = PartSize
-               u.Concurrency = ReadConcurrency
+               u.PartSize = s3downloaderPartSize
+               u.Concurrency = s3downloaderReadConcurrency
        })
-
-       v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
-
-       count, err := downloader.DownloadWithContext(ctx, awsBuf, &s3.GetObjectInput{
+       count, err := downloader.DownloadWithContext(ctx, dst, &s3.GetObjectInput{
                Bucket: aws.String(v.bucket.bucket),
                Key:    aws.String(key),
        })
@@ -518,10 +455,10 @@ func (v *S3AWSVolume) readWorker(ctx context.Context, key string, buf []byte) (i
        v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.GetOps)
        v.bucket.stats.TickErr(err)
        v.bucket.stats.TickInBytes(uint64(count))
-       return int(count), v.translateError(err)
+       return v.translateError(err)
 }
 
-func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader) error {
+func (v *s3Volume) writeObject(ctx context.Context, key string, r io.Reader) error {
        if r == nil {
                // r == nil leads to a memory violation in func readFillBuf in
                // aws-sdk-go-v2@v0.23.0/service/s3/s3manager/upload.go
@@ -547,10 +484,10 @@ func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader)
        // Experimentation indicated that using concurrency 5 yields the best
        // throughput, better than higher concurrency (10 or 13) by ~5%.
        // Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
-       // is detrimental to througput (minus ~15%).
+       // is detrimental to throughput (minus ~15%).
        uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
-               u.PartSize = PartSize
-               u.Concurrency = WriteConcurrency
+               u.PartSize = s3uploaderPartSize
+               u.Concurrency = s3uploaderWriteConcurrency
        })
 
        // Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
@@ -571,16 +508,12 @@ func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader)
 }
 
 // Put writes a block.
-func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
+func (v *s3Volume) BlockWrite(ctx context.Context, hash string, data []byte) error {
        // Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
        // sdk to avoid memory allocation there. See #17339 for more information.
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-
-       rdr := bytes.NewReader(block)
-       r := NewCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
-       key := v.key(loc)
+       rdr := bytes.NewReader(data)
+       r := newCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
+       key := v.key(hash)
        err := v.writeObject(ctx, key, r)
        if err != nil {
                return err
@@ -590,7 +523,7 @@ func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
 
 type s3awsLister struct {
        Logger            logrus.FieldLogger
-       Bucket            *s3AWSbucket
+       Bucket            *s3Bucket
        Prefix            string
        PageSize          int
        Stats             *s3awsbucketStats
@@ -675,9 +608,9 @@ func (lister *s3awsLister) pop() (k *s3.Object) {
        return
 }
 
-// IndexTo writes a complete list of locators with the given prefix
+// Index writes a complete list of locators with the given prefix
 // for which Get() can retrieve data.
-func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
+func (v *s3Volume) Index(ctx context.Context, prefix string, writer io.Writer) error {
        prefix = v.key(prefix)
        // Use a merge sort to find matching sets of X and recent/X.
        dataL := s3awsLister{
@@ -695,6 +628,9 @@ func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
                Stats:    &v.bucket.stats,
        }
        for data, recent := dataL.First(), recentL.First(); data != nil && dataL.Error() == nil; data = dataL.Next() {
+               if ctx.Err() != nil {
+                       return ctx.Err()
+               }
                if *data.Key >= "g" {
                        // Conveniently, "recent/*" and "trash/*" are
                        // lexically greater than all hex-encoded data
@@ -741,7 +677,7 @@ func (v *S3AWSVolume) IndexTo(prefix string, writer io.Writer) error {
 }
 
 // Mtime returns the stored timestamp for the given locator.
-func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
+func (v *s3Volume) Mtime(loc string) (time.Time, error) {
        key := v.key(loc)
        _, err := v.head(key)
        if err != nil {
@@ -769,28 +705,14 @@ func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
        return *resp.LastModified, err
 }
 
-// Status returns a *VolumeStatus representing the current in-use
-// storage capacity and a fake available capacity that doesn't make
-// the volume seem full or nearly-full.
-func (v *S3AWSVolume) Status() *VolumeStatus {
-       return &VolumeStatus{
-               DeviceNum: 1,
-               BytesFree: BlockSize * 1000,
-               BytesUsed: 1,
-       }
-}
-
 // InternalStats returns bucket I/O and API call counters.
-func (v *S3AWSVolume) InternalStats() interface{} {
+func (v *s3Volume) InternalStats() interface{} {
        return &v.bucket.stats
 }
 
-// Touch sets the timestamp for the given locator to the current time.
-func (v *S3AWSVolume) Touch(loc string) error {
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-       key := v.key(loc)
+// BlockTouch sets the timestamp for the given locator to the current time.
+func (v *s3Volume) BlockTouch(hash string) error {
+       key := v.key(hash)
        _, err := v.head(key)
        err = v.translateError(err)
        if os.IsNotExist(err) && v.fixRace(key) {
@@ -805,7 +727,7 @@ func (v *S3AWSVolume) Touch(loc string) error {
 
 // checkRaceWindow returns a non-nil error if trash/key is, or might
 // be, in the race window (i.e., it's not safe to trash key).
-func (v *S3AWSVolume) checkRaceWindow(key string) error {
+func (v *s3Volume) checkRaceWindow(key string) error {
        resp, err := v.head("trash/" + key)
        err = v.translateError(err)
        if os.IsNotExist(err) {
@@ -831,7 +753,7 @@ func (v *S3AWSVolume) checkRaceWindow(key string) error {
        return nil
 }
 
-func (b *s3AWSbucket) Del(path string) error {
+func (b *s3Bucket) Del(path string) error {
        input := &s3.DeleteObjectInput{
                Bucket: aws.String(b.bucket),
                Key:    aws.String(path),
@@ -845,10 +767,7 @@ func (b *s3AWSbucket) Del(path string) error {
 }
 
 // Trash a Keep block.
-func (v *S3AWSVolume) Trash(loc string) error {
-       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
-               return MethodDisabledError
-       }
+func (v *s3Volume) BlockTrash(loc string) error {
        if t, err := v.Mtime(loc); err != nil {
                return err
        } else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
@@ -857,7 +776,7 @@ func (v *S3AWSVolume) Trash(loc string) error {
        key := v.key(loc)
        if v.cluster.Collections.BlobTrashLifetime == 0 {
                if !v.UnsafeDelete {
-                       return ErrS3TrashDisabled
+                       return errS3TrashDisabled
                }
                return v.translateError(v.bucket.Del(key))
        }
@@ -872,9 +791,9 @@ func (v *S3AWSVolume) Trash(loc string) error {
        return v.translateError(v.bucket.Del(key))
 }
 
-// Untrash moves block from trash back into store
-func (v *S3AWSVolume) Untrash(loc string) error {
-       key := v.key(loc)
+// BlockUntrash moves block from trash back into store
+func (v *s3Volume) BlockUntrash(hash string) error {
+       key := v.key(hash)
        err := v.safeCopy(key, "trash/"+key)
        if err != nil {
                return err
similarity index 77%
rename from services/keepstore/s3aws_volume_test.go
rename to services/keepstore/s3_volume_test.go
index c7e2d485dfc6f793f107947a3340606c993f63ac..fb68e1c0574c338e9c016404e456f623acdcb477 100644 (file)
@@ -32,7 +32,7 @@ import (
 )
 
 const (
-       S3AWSTestBucketName = "testbucket"
+       s3TestBucketName = "testbucket"
 )
 
 type s3AWSFakeClock struct {
@@ -50,19 +50,18 @@ func (c *s3AWSFakeClock) Since(t time.Time) time.Duration {
        return c.Now().Sub(t)
 }
 
-var _ = check.Suite(&StubbedS3AWSSuite{})
+var _ = check.Suite(&stubbedS3Suite{})
 
 var srv httptest.Server
 
-type StubbedS3AWSSuite struct {
+type stubbedS3Suite struct {
        s3server *httptest.Server
        metadata *httptest.Server
        cluster  *arvados.Cluster
-       handler  *handler
-       volumes  []*TestableS3AWSVolume
+       volumes  []*testableS3Volume
 }
 
-func (s *StubbedS3AWSSuite) SetUpTest(c *check.C) {
+func (s *stubbedS3Suite) SetUpTest(c *check.C) {
        s.s3server = nil
        s.metadata = nil
        s.cluster = testCluster(c)
@@ -70,36 +69,41 @@ func (s *StubbedS3AWSSuite) SetUpTest(c *check.C) {
                "zzzzz-nyw5e-000000000000000": {Driver: "S3"},
                "zzzzz-nyw5e-111111111111111": {Driver: "S3"},
        }
-       s.handler = &handler{}
 }
 
-func (s *StubbedS3AWSSuite) TestGeneric(c *check.C) {
-       DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
+func (s *stubbedS3Suite) TestGeneric(c *check.C) {
+       DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
                // Use a negative raceWindow so s3test's 1-second
                // timestamp precision doesn't confuse fixRace.
-               return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+               return s.newTestableVolume(c, params, -2*time.Second)
        })
 }
 
-func (s *StubbedS3AWSSuite) TestGenericReadOnly(c *check.C) {
-       DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+func (s *stubbedS3Suite) TestGenericReadOnly(c *check.C) {
+       DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+               return s.newTestableVolume(c, params, -2*time.Second)
        })
 }
 
-func (s *StubbedS3AWSSuite) TestGenericWithPrefix(c *check.C) {
-       DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               v := s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+func (s *stubbedS3Suite) TestGenericWithPrefix(c *check.C) {
+       DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+               v := s.newTestableVolume(c, params, -2*time.Second)
                v.PrefixLength = 3
                return v
        })
 }
 
-func (s *StubbedS3AWSSuite) TestIndex(c *check.C) {
-       v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 0)
+func (s *stubbedS3Suite) TestIndex(c *check.C) {
+       v := s.newTestableVolume(c, newVolumeParams{
+               Cluster:      s.cluster,
+               ConfigVolume: arvados.Volume{Replication: 2},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       }, 0)
        v.IndexPageSize = 3
        for i := 0; i < 256; i++ {
-               v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
+               err := v.blockWriteWithoutMD5Check(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
+               c.Assert(err, check.IsNil)
        }
        for _, spec := range []struct {
                prefix      string
@@ -111,7 +115,7 @@ func (s *StubbedS3AWSSuite) TestIndex(c *check.C) {
                {"abc", 0},
        } {
                buf := new(bytes.Buffer)
-               err := v.IndexTo(spec.prefix, buf)
+               err := v.Index(context.Background(), spec.prefix, buf)
                c.Check(err, check.IsNil)
 
                idx := bytes.SplitAfter(buf.Bytes(), []byte{10})
@@ -120,7 +124,7 @@ func (s *StubbedS3AWSSuite) TestIndex(c *check.C) {
        }
 }
 
-func (s *StubbedS3AWSSuite) TestSignature(c *check.C) {
+func (s *stubbedS3Suite) TestSignature(c *check.C) {
        var header http.Header
        stub := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
                header = r.Header
@@ -129,7 +133,7 @@ func (s *StubbedS3AWSSuite) TestSignature(c *check.C) {
 
        // The aws-sdk-go-v2 driver only supports S3 V4 signatures. S3 v2 signatures are being phased out
        // as of June 24, 2020. Cf. https://forums.aws.amazon.com/ann.jspa?annID=5816
-       vol := S3AWSVolume{
+       vol := s3Volume{
                S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
                        AccessKeyID:     "xxx",
                        SecretAccessKey: "xxx",
@@ -146,12 +150,12 @@ func (s *StubbedS3AWSSuite) TestSignature(c *check.C) {
        vol.bucket.svc.ForcePathStyle = true
 
        c.Check(err, check.IsNil)
-       err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
+       err = vol.BlockWrite(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
        c.Check(err, check.IsNil)
        c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`)
 }
 
-func (s *StubbedS3AWSSuite) TestIAMRoleCredentials(c *check.C) {
+func (s *stubbedS3Suite) TestIAMRoleCredentials(c *check.C) {
        s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
                upd := time.Now().UTC().Add(-time.Hour).Format(time.RFC3339)
                exp := time.Now().UTC().Add(time.Hour).Format(time.RFC3339)
@@ -162,7 +166,7 @@ func (s *StubbedS3AWSSuite) TestIAMRoleCredentials(c *check.C) {
        }))
        defer s.metadata.Close()
 
-       v := &S3AWSVolume{
+       v := &s3Volume{
                S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
                        IAMRole:  s.metadata.URL + "/latest/api/token",
                        Endpoint: "http://localhost:12345",
@@ -183,7 +187,7 @@ func (s *StubbedS3AWSSuite) TestIAMRoleCredentials(c *check.C) {
        s.metadata = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
                w.WriteHeader(http.StatusNotFound)
        }))
-       deadv := &S3AWSVolume{
+       deadv := &s3Volume{
                S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
                        IAMRole:  s.metadata.URL + "/fake-metadata/test-role",
                        Endpoint: "http://localhost:12345",
@@ -201,8 +205,13 @@ func (s *StubbedS3AWSSuite) TestIAMRoleCredentials(c *check.C) {
        c.Check(err, check.ErrorMatches, `(?s).*404.*`)
 }
 
-func (s *StubbedS3AWSSuite) TestStats(c *check.C) {
-       v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+func (s *stubbedS3Suite) TestStats(c *check.C) {
+       v := s.newTestableVolume(c, newVolumeParams{
+               Cluster:      s.cluster,
+               ConfigVolume: arvados.Volume{Replication: 2},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       }, 5*time.Minute)
        stats := func() string {
                buf, err := json.Marshal(v.InternalStats())
                c.Check(err, check.IsNil)
@@ -212,20 +221,20 @@ func (s *StubbedS3AWSSuite) TestStats(c *check.C) {
        c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
 
        loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-       _, err := v.Get(context.Background(), loc, make([]byte, 3))
+       err := v.BlockRead(context.Background(), loc, brdiscard)
        c.Check(err, check.NotNil)
        c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
        c.Check(stats(), check.Matches, `.*"s3.requestFailure 404 NoSuchKey[^"]*":[^0].*`)
        c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
 
-       err = v.Put(context.Background(), loc, []byte("foo"))
+       err = v.BlockWrite(context.Background(), loc, []byte("foo"))
        c.Check(err, check.IsNil)
        c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
        c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
 
-       _, err = v.Get(context.Background(), loc, make([]byte, 3))
+       err = v.BlockRead(context.Background(), loc, brdiscard)
        c.Check(err, check.IsNil)
-       _, err = v.Get(context.Background(), loc, make([]byte, 3))
+       err = v.BlockRead(context.Background(), loc, brdiscard)
        c.Check(err, check.IsNil)
        c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
 }
@@ -250,40 +259,29 @@ func (h *s3AWSBlockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
        http.Error(w, "nothing here", http.StatusNotFound)
 }
 
-func (s *StubbedS3AWSSuite) TestGetContextCancel(c *check.C) {
-       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-       buf := make([]byte, 3)
-
-       s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
-               _, err := v.Get(ctx, loc, buf)
-               return err
+func (s *stubbedS3Suite) TestGetContextCancel(c *check.C) {
+       s.testContextCancel(c, func(ctx context.Context, v *testableS3Volume) error {
+               return v.BlockRead(ctx, fooHash, brdiscard)
        })
 }
 
-func (s *StubbedS3AWSSuite) TestCompareContextCancel(c *check.C) {
-       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-       buf := []byte("bar")
-
-       s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
-               return v.Compare(ctx, loc, buf)
+func (s *stubbedS3Suite) TestPutContextCancel(c *check.C) {
+       s.testContextCancel(c, func(ctx context.Context, v *testableS3Volume) error {
+               return v.BlockWrite(ctx, fooHash, []byte("foo"))
        })
 }
 
-func (s *StubbedS3AWSSuite) TestPutContextCancel(c *check.C) {
-       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-       buf := []byte("foo")
-
-       s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
-               return v.Put(ctx, loc, buf)
-       })
-}
-
-func (s *StubbedS3AWSSuite) testContextCancel(c *check.C, testFunc func(context.Context, *TestableS3AWSVolume) error) {
+func (s *stubbedS3Suite) testContextCancel(c *check.C, testFunc func(context.Context, *testableS3Volume) error) {
        handler := &s3AWSBlockingHandler{}
        s.s3server = httptest.NewServer(handler)
        defer s.s3server.Close()
 
-       v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+       v := s.newTestableVolume(c, newVolumeParams{
+               Cluster:      s.cluster,
+               ConfigVolume: arvados.Volume{Replication: 2},
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       }, 5*time.Minute)
 
        ctx, cancel := context.WithCancel(context.Background())
 
@@ -319,11 +317,17 @@ func (s *StubbedS3AWSSuite) testContextCancel(c *check.C, testFunc func(context.
        }
 }
 
-func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
+func (s *stubbedS3Suite) TestBackendStates(c *check.C) {
        s.cluster.Collections.BlobTrashLifetime.Set("1h")
        s.cluster.Collections.BlobSigningTTL.Set("1h")
 
-       v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+       v := s.newTestableVolume(c, newVolumeParams{
+               Cluster:      s.cluster,
+               ConfigVolume: arvados.Volume{Replication: 2},
+               Logger:       ctxlog.TestLogger(c),
+               MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+               BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+       }, 5*time.Minute)
        var none time.Time
 
        putS3Obj := func(t time.Time, key string, data []byte) {
@@ -475,8 +479,7 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 
                        // Check canGet
                        loc, blk := setupScenario()
-                       buf := make([]byte, len(blk))
-                       _, err := v.Get(context.Background(), loc, buf)
+                       err := v.BlockRead(context.Background(), loc, brdiscard)
                        c.Check(err == nil, check.Equals, scenario.canGet)
                        if err != nil {
                                c.Check(os.IsNotExist(err), check.Equals, true)
@@ -484,9 +487,9 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 
                        // Call Trash, then check canTrash and canGetAfterTrash
                        loc, _ = setupScenario()
-                       err = v.Trash(loc)
+                       err = v.BlockTrash(loc)
                        c.Check(err == nil, check.Equals, scenario.canTrash)
-                       _, err = v.Get(context.Background(), loc, buf)
+                       err = v.BlockRead(context.Background(), loc, brdiscard)
                        c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
                        if err != nil {
                                c.Check(os.IsNotExist(err), check.Equals, true)
@@ -494,14 +497,14 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 
                        // Call Untrash, then check canUntrash
                        loc, _ = setupScenario()
-                       err = v.Untrash(loc)
+                       err = v.BlockUntrash(loc)
                        c.Check(err == nil, check.Equals, scenario.canUntrash)
                        if scenario.dataT != none || scenario.trashT != none {
                                // In all scenarios where the data exists, we
                                // should be able to Get after Untrash --
                                // regardless of timestamps, errors, race
                                // conditions, etc.
-                               _, err = v.Get(context.Background(), loc, buf)
+                               err = v.BlockRead(context.Background(), loc, brdiscard)
                                c.Check(err, check.IsNil)
                        }
 
@@ -522,7 +525,7 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
                        // Check for current Mtime after Put (applies to all
                        // scenarios)
                        loc, blk = setupScenario()
-                       err = v.Put(context.Background(), loc, blk)
+                       err = v.BlockWrite(context.Background(), loc, blk)
                        c.Check(err, check.IsNil)
                        t, err := v.Mtime(loc)
                        c.Check(err, check.IsNil)
@@ -531,8 +534,8 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
        }
 }
 
-type TestableS3AWSVolume struct {
-       *S3AWSVolume
+type testableS3Volume struct {
+       *s3Volume
        server      *httptest.Server
        c           *check.C
        serverClock *s3AWSFakeClock
@@ -555,7 +558,7 @@ func (l LogrusLog) Print(level gofakes3.LogLevel, v ...interface{}) {
        }
 }
 
-func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, raceWindow time.Duration) *TestableS3AWSVolume {
+func (s *stubbedS3Suite) newTestableVolume(c *check.C, params newVolumeParams, raceWindow time.Duration) *testableS3Volume {
 
        clock := &s3AWSFakeClock{}
        // fake s3
@@ -578,48 +581,48 @@ func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, cluster *arvados.Clust
                iamRole, accessKey, secretKey = s.metadata.URL+"/fake-metadata/test-role", "", ""
        }
 
-       v := &TestableS3AWSVolume{
-               S3AWSVolume: &S3AWSVolume{
+       v := &testableS3Volume{
+               s3Volume: &s3Volume{
                        S3VolumeDriverParameters: arvados.S3VolumeDriverParameters{
                                IAMRole:            iamRole,
                                AccessKeyID:        accessKey,
                                SecretAccessKey:    secretKey,
-                               Bucket:             S3AWSTestBucketName,
+                               Bucket:             s3TestBucketName,
                                Endpoint:           endpoint,
                                Region:             "test-region-1",
                                LocationConstraint: true,
                                UnsafeDelete:       true,
                                IndexPageSize:      1000,
                        },
-                       cluster: cluster,
-                       volume:  volume,
-                       logger:  ctxlog.TestLogger(c),
-                       metrics: metrics,
+                       cluster:    params.Cluster,
+                       volume:     params.ConfigVolume,
+                       logger:     params.Logger,
+                       metrics:    params.MetricsVecs,
+                       bufferPool: params.BufferPool,
                },
                c:           c,
                server:      srv,
                serverClock: clock,
        }
-       c.Assert(v.S3AWSVolume.check(""), check.IsNil)
+       c.Assert(v.s3Volume.check(""), check.IsNil)
        // Our test S3 server uses the older 'Path Style'
-       v.S3AWSVolume.bucket.svc.ForcePathStyle = true
+       v.s3Volume.bucket.svc.ForcePathStyle = true
        // Create the testbucket
        input := &s3.CreateBucketInput{
-               Bucket: aws.String(S3AWSTestBucketName),
+               Bucket: aws.String(s3TestBucketName),
        }
-       req := v.S3AWSVolume.bucket.svc.CreateBucketRequest(input)
+       req := v.s3Volume.bucket.svc.CreateBucketRequest(input)
        _, err := req.Send(context.Background())
        c.Assert(err, check.IsNil)
        // We couldn't set RaceWindow until now because check()
        // rejects negative values.
-       v.S3AWSVolume.RaceWindow = arvados.Duration(raceWindow)
+       v.s3Volume.RaceWindow = arvados.Duration(raceWindow)
        return v
 }
 
-// PutRaw skips the ContentMD5 test
-func (v *TestableS3AWSVolume) PutRaw(loc string, block []byte) {
+func (v *testableS3Volume) blockWriteWithoutMD5Check(loc string, block []byte) error {
        key := v.key(loc)
-       r := NewCountingReader(bytes.NewReader(block), v.bucket.stats.TickOutBytes)
+       r := newCountingReader(bytes.NewReader(block), v.bucket.stats.TickOutBytes)
 
        uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
                u.PartSize = 5 * 1024 * 1024
@@ -632,7 +635,7 @@ func (v *TestableS3AWSVolume) PutRaw(loc string, block []byte) {
                Body:   r,
        })
        if err != nil {
-               v.logger.Printf("PutRaw: %s: %+v", key, err)
+               return err
        }
 
        empty := bytes.NewReader([]byte{})
@@ -641,15 +644,13 @@ func (v *TestableS3AWSVolume) PutRaw(loc string, block []byte) {
                Key:    aws.String("recent/" + key),
                Body:   empty,
        })
-       if err != nil {
-               v.logger.Printf("PutRaw: recent/%s: %+v", key, err)
-       }
+       return err
 }
 
 // TouchWithDate turns back the clock while doing a Touch(). We assume
 // there are no other operations happening on the same s3test server
 // while we do this.
-func (v *TestableS3AWSVolume) TouchWithDate(loc string, lastPut time.Time) {
+func (v *testableS3Volume) TouchWithDate(loc string, lastPut time.Time) {
        v.serverClock.now = &lastPut
 
        uploader := s3manager.NewUploaderWithClient(v.bucket.svc)
@@ -666,10 +667,10 @@ func (v *TestableS3AWSVolume) TouchWithDate(loc string, lastPut time.Time) {
        v.serverClock.now = nil
 }
 
-func (v *TestableS3AWSVolume) Teardown() {
+func (v *testableS3Volume) Teardown() {
        v.server.Close()
 }
 
-func (v *TestableS3AWSVolume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableS3Volume) ReadWriteOperationLabelValues() (r, w string) {
        return "get", "put"
 }
diff --git a/services/keepstore/status_test.go b/services/keepstore/status_test.go
deleted file mode 100644 (file)
index 80f98ad..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "encoding/json"
-)
-
-// We don't have isolated unit tests for /status.json yet, but we do
-// check (e.g., in pull_worker_test.go) that /status.json reports
-// specific statistics correctly at the appropriate times.
-
-// getStatusItem("foo","bar","baz") retrieves /status.json, decodes
-// the response body into resp, and returns resp["foo"]["bar"]["baz"].
-func getStatusItem(h *handler, keys ...string) interface{} {
-       resp := IssueRequest(h, &RequestTester{"/status.json", "", "GET", nil, ""})
-       var s interface{}
-       json.NewDecoder(resp.Body).Decode(&s)
-       for _, k := range keys {
-               s = s.(map[string]interface{})[k]
-       }
-       return s
-}
diff --git a/services/keepstore/streamwriterat.go b/services/keepstore/streamwriterat.go
new file mode 100644 (file)
index 0000000..02dce6e
--- /dev/null
@@ -0,0 +1,160 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "errors"
+       "fmt"
+       "io"
+       "sync"
+)
+
+// streamWriterAt translates random-access writes to sequential
+// writes. The caller is expected to use an arbitrary sequence of
+// non-overlapping WriteAt calls covering all positions between 0 and
+// N, for any N < len(buf), then call Close.
+//
+// streamWriterAt writes the data to the provided io.Writer in
+// sequential order.
+//
+// streamWriterAt can also be wrapped with an io.OffsetWriter to
+// provide an asynchronous buffer: the caller can use the io.Writer
+// interface to write into a memory buffer and return without waiting
+// for the wrapped writer to catch up.
+//
+// Close returns when all data has been written through.
+type streamWriterAt struct {
+       writer     io.Writer
+       buf        []byte
+       writepos   int         // target offset if Write is called
+       partsize   int         // size of each part written through to writer
+       endpos     int         // portion of buf actually used, judging by WriteAt calls so far
+       partfilled []int       // number of bytes written to each part so far
+       partready  chan []byte // parts of buf fully written / waiting for writer goroutine
+       partnext   int         // index of next part we will send to partready when it's ready
+       wroteAt    int         // bytes we copied to buf in WriteAt
+       wrote      int         // bytes successfully written through to writer
+       errWrite   chan error  // final outcome of writer goroutine
+       closed     bool        // streamWriterAt has been closed
+       mtx        sync.Mutex  // guard internal fields during concurrent calls to WriteAt and Close
+}
+
+// newStreamWriterAt creates a new streamWriterAt.
+func newStreamWriterAt(w io.Writer, partsize int, buf []byte) *streamWriterAt {
+       if partsize == 0 {
+               partsize = 65536
+       }
+       nparts := (len(buf) + partsize - 1) / partsize
+       swa := &streamWriterAt{
+               writer:     w,
+               partsize:   partsize,
+               buf:        buf,
+               partfilled: make([]int, nparts),
+               partready:  make(chan []byte, nparts),
+               errWrite:   make(chan error, 1),
+       }
+       go swa.writeToWriter()
+       return swa
+}
+
+// Wrote returns the number of bytes written through to the
+// io.Writer.
+//
+// Wrote must not be called until after Close.
+func (swa *streamWriterAt) Wrote() int {
+       return swa.wrote
+}
+
+// Wrote returns the number of bytes passed to WriteAt, regardless of
+// whether they were written through to the io.Writer.
+func (swa *streamWriterAt) WroteAt() int {
+       swa.mtx.Lock()
+       defer swa.mtx.Unlock()
+       return swa.wroteAt
+}
+
+func (swa *streamWriterAt) writeToWriter() {
+       defer close(swa.errWrite)
+       for p := range swa.partready {
+               n, err := swa.writer.Write(p)
+               if err != nil {
+                       swa.errWrite <- err
+                       return
+               }
+               swa.wrote += n
+       }
+}
+
+// WriteAt implements io.WriterAt. WriteAt is goroutine-safe.
+func (swa *streamWriterAt) WriteAt(p []byte, offset int64) (int, error) {
+       pos := int(offset)
+       n := 0
+       if pos <= len(swa.buf) {
+               n = copy(swa.buf[pos:], p)
+       }
+       if n < len(p) {
+               return n, fmt.Errorf("write beyond end of buffer: offset %d len %d buf %d", offset, len(p), len(swa.buf))
+       }
+       endpos := pos + n
+
+       swa.mtx.Lock()
+       defer swa.mtx.Unlock()
+       swa.wroteAt += len(p)
+       if swa.endpos < endpos {
+               swa.endpos = endpos
+       }
+       if swa.closed {
+               return 0, errors.New("invalid use of closed streamWriterAt")
+       }
+       // Track the number of bytes that landed in each of our
+       // (output) parts.
+       for i := pos; i < endpos; {
+               j := i + swa.partsize - (i % swa.partsize)
+               if j > endpos {
+                       j = endpos
+               }
+               pf := swa.partfilled[i/swa.partsize]
+               pf += j - i
+               if pf > swa.partsize {
+                       return 0, errors.New("streamWriterAt: overlapping WriteAt calls")
+               }
+               swa.partfilled[i/swa.partsize] = pf
+               i = j
+       }
+       // Flush filled parts to partready.
+       for swa.partnext < len(swa.partfilled) && swa.partfilled[swa.partnext] == swa.partsize {
+               offset := swa.partnext * swa.partsize
+               swa.partready <- swa.buf[offset : offset+swa.partsize]
+               swa.partnext++
+       }
+       return len(p), nil
+}
+
+// Close flushes all buffered data through to the io.Writer.
+func (swa *streamWriterAt) Close() error {
+       swa.mtx.Lock()
+       defer swa.mtx.Unlock()
+       if swa.closed {
+               return errors.New("invalid use of closed streamWriterAt")
+       }
+       swa.closed = true
+       // Flush last part if needed. If the input doesn't end on a
+       // part boundary, the last part never appears "filled" when we
+       // check in WriteAt.  But here, we know endpos is the end of
+       // the stream, so we can check whether the last part is ready.
+       if offset := swa.partnext * swa.partsize; offset < swa.endpos && offset+swa.partfilled[swa.partnext] == swa.endpos {
+               swa.partready <- swa.buf[offset:swa.endpos]
+               swa.partnext++
+       }
+       close(swa.partready)
+       err := <-swa.errWrite
+       if err != nil {
+               return err
+       }
+       if swa.wrote != swa.wroteAt {
+               return fmt.Errorf("streamWriterAt: detected hole in input: wrote %d but flushed %d", swa.wroteAt, swa.wrote)
+       }
+       return nil
+}
diff --git a/services/keepstore/streamwriterat_test.go b/services/keepstore/streamwriterat_test.go
new file mode 100644 (file)
index 0000000..fe6837e
--- /dev/null
@@ -0,0 +1,83 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+       "bytes"
+       "sync"
+
+       . "gopkg.in/check.v1"
+)
+
+var _ = Suite(&streamWriterAtSuite{})
+
+type streamWriterAtSuite struct{}
+
+func (s *streamWriterAtSuite) TestPartSizes(c *C) {
+       for partsize := 1; partsize < 5; partsize++ {
+               for writesize := 1; writesize < 5; writesize++ {
+                       for datasize := 1; datasize < 100; datasize += 13 {
+                               for bufextra := 0; bufextra < 5; bufextra++ {
+                                       c.Logf("=== partsize %d writesize %d datasize %d bufextra %d", partsize, writesize, datasize, bufextra)
+                                       outbuf := bytes.NewBuffer(nil)
+                                       indata := make([]byte, datasize)
+                                       for i := range indata {
+                                               indata[i] = byte(i)
+                                       }
+                                       swa := newStreamWriterAt(outbuf, partsize, make([]byte, datasize+bufextra))
+                                       var wg sync.WaitGroup
+                                       for pos := 0; pos < datasize; pos += writesize {
+                                               pos := pos
+                                               wg.Add(1)
+                                               go func() {
+                                                       defer wg.Done()
+                                                       endpos := pos + writesize
+                                                       if endpos > datasize {
+                                                               endpos = datasize
+                                                       }
+                                                       swa.WriteAt(indata[pos:endpos], int64(pos))
+                                               }()
+                                       }
+                                       wg.Wait()
+                                       swa.Close()
+                                       c.Check(outbuf.Bytes(), DeepEquals, indata)
+                               }
+                       }
+               }
+       }
+}
+
+func (s *streamWriterAtSuite) TestOverflow(c *C) {
+       for offset := -1; offset < 2; offset++ {
+               buf := make([]byte, 50)
+               swa := newStreamWriterAt(bytes.NewBuffer(nil), 20, buf)
+               _, err := swa.WriteAt([]byte("foo"), int64(len(buf)+offset))
+               c.Check(err, NotNil)
+               err = swa.Close()
+               c.Check(err, IsNil)
+       }
+}
+
+func (s *streamWriterAtSuite) TestIncompleteWrite(c *C) {
+       for _, partsize := range []int{20, 25} {
+               for _, bufsize := range []int{50, 55, 60} {
+                       for offset := 0; offset < 3; offset++ {
+                               swa := newStreamWriterAt(bytes.NewBuffer(nil), partsize, make([]byte, bufsize))
+                               _, err := swa.WriteAt(make([]byte, 1), 49)
+                               c.Check(err, IsNil)
+                               _, err = swa.WriteAt(make([]byte, 46), int64(offset))
+                               c.Check(err, IsNil)
+                               err = swa.Close()
+                               c.Check(err, NotNil)
+                               c.Check(swa.WroteAt(), Equals, 47)
+                               if offset == 0 {
+                                       c.Check(swa.Wrote(), Equals, 40/partsize*partsize)
+                               } else {
+                                       c.Check(swa.Wrote(), Equals, 0)
+                               }
+                       }
+               }
+       }
+}
index 5e8a5a963ceaad37527e652ca19460dce349fcd4..819c25acc1385d11202256bc73ac7e94ed92ab49 100644 (file)
 package keepstore
 
 import (
-       "errors"
+       "context"
+       "sync"
+       "sync/atomic"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
-       "github.com/sirupsen/logrus"
+       "github.com/prometheus/client_golang/prometheus"
 )
 
-// RunTrashWorker processes the trash request queue.
-func RunTrashWorker(volmgr *RRVolumeManager, logger logrus.FieldLogger, cluster *arvados.Cluster, trashq *WorkQueue) {
-       for item := range trashq.NextItem {
-               trashRequest := item.(TrashRequest)
-               TrashItem(volmgr, logger, cluster, trashRequest)
-               trashq.DoneItem <- struct{}{}
-       }
+type TrashListItem struct {
+       Locator    string `json:"locator"`
+       BlockMtime int64  `json:"block_mtime"`
+       MountUUID  string `json:"mount_uuid"` // Target mount, or "" for "everywhere"
+}
+
+type trasher struct {
+       keepstore  *keepstore
+       todo       []TrashListItem
+       cond       *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+       inprogress atomic.Int64
 }
 
-// TrashItem deletes the indicated block from every writable volume.
-func TrashItem(volmgr *RRVolumeManager, logger logrus.FieldLogger, cluster *arvados.Cluster, trashRequest TrashRequest) {
-       reqMtime := time.Unix(0, trashRequest.BlockMtime)
-       if time.Since(reqMtime) < cluster.Collections.BlobSigningTTL.Duration() {
-               logger.Warnf("client asked to delete a %v old block %v (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
-                       arvados.Duration(time.Since(reqMtime)),
-                       trashRequest.Locator,
-                       trashRequest.BlockMtime,
-                       reqMtime,
-                       cluster.Collections.BlobSigningTTL)
-               return
+func newTrasher(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *trasher {
+       t := &trasher{
+               keepstore: keepstore,
+               cond:      sync.NewCond(&sync.Mutex{}),
+       }
+       reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "trash_queue_pending_entries",
+                       Help:      "Number of queued trash requests",
+               },
+               func() float64 {
+                       t.cond.L.Lock()
+                       defer t.cond.L.Unlock()
+                       return float64(len(t.todo))
+               },
+       ))
+       reg.MustRegister(prometheus.NewGaugeFunc(
+               prometheus.GaugeOpts{
+                       Namespace: "arvados",
+                       Subsystem: "keepstore",
+                       Name:      "trash_queue_inprogress_entries",
+                       Help:      "Number of trash requests in progress",
+               },
+               func() float64 {
+                       return float64(t.inprogress.Load())
+               },
+       ))
+       if !keepstore.cluster.Collections.BlobTrash {
+               keepstore.logger.Info("not running trash worker because Collections.BlobTrash == false")
+               return t
        }
 
-       var volumes []*VolumeMount
-       if uuid := trashRequest.MountUUID; uuid == "" {
-               volumes = volmgr.Mounts()
-       } else if mnt := volmgr.Lookup(uuid, false); mnt == nil {
-               logger.Warnf("trash request for nonexistent mount: %v", trashRequest)
-               return
-       } else if !mnt.KeepMount.AllowTrash {
-               logger.Warnf("trash request for mount with ReadOnly=true, AllowTrashWhenReadOnly=false: %v", trashRequest)
+       var mntsAllowTrash []*mount
+       for _, mnt := range t.keepstore.mounts {
+               if mnt.AllowTrash {
+                       mntsAllowTrash = append(mntsAllowTrash, mnt)
+               }
+       }
+       if len(mntsAllowTrash) == 0 {
+               t.keepstore.logger.Info("not running trash worker because there are no writable or trashable volumes")
        } else {
-               volumes = []*VolumeMount{mnt}
+               for i := 0; i < keepstore.cluster.Collections.BlobTrashConcurrency; i++ {
+                       go t.runWorker(ctx, mntsAllowTrash)
+               }
        }
+       return t
+}
+
+func (t *trasher) SetTrashList(newlist []TrashListItem) {
+       t.cond.L.Lock()
+       t.todo = newlist
+       t.cond.L.Unlock()
+       t.cond.Broadcast()
+}
 
-       for _, volume := range volumes {
-               mtime, err := volume.Mtime(trashRequest.Locator)
-               if err != nil {
-                       logger.WithError(err).Errorf("%v Trash(%v)", volume, trashRequest.Locator)
-                       continue
+func (t *trasher) runWorker(ctx context.Context, mntsAllowTrash []*mount) {
+       go func() {
+               <-ctx.Done()
+               t.cond.Broadcast()
+       }()
+       for {
+               t.cond.L.Lock()
+               for len(t.todo) == 0 && ctx.Err() == nil {
+                       t.cond.Wait()
                }
-               if trashRequest.BlockMtime != mtime.UnixNano() {
-                       logger.Infof("%v Trash(%v): stored mtime %v does not match trash list value %v; skipping", volume, trashRequest.Locator, mtime.UnixNano(), trashRequest.BlockMtime)
-                       continue
+               if ctx.Err() != nil {
+                       t.cond.L.Unlock()
+                       return
                }
+               item := t.todo[0]
+               t.todo = t.todo[1:]
+               t.inprogress.Add(1)
+               t.cond.L.Unlock()
 
-               if !cluster.Collections.BlobTrash {
-                       err = errors.New("skipping because Collections.BlobTrash is false")
-               } else {
-                       err = volume.Trash(trashRequest.Locator)
-               }
+               func() {
+                       defer t.inprogress.Add(-1)
+                       logger := t.keepstore.logger.WithField("locator", item.Locator)
 
-               if err != nil {
-                       logger.WithError(err).Errorf("%v Trash(%v)", volume, trashRequest.Locator)
-               } else {
-                       logger.Infof("%v Trash(%v) OK", volume, trashRequest.Locator)
-               }
+                       li, err := getLocatorInfo(item.Locator)
+                       if err != nil {
+                               logger.Warn("ignoring trash request for invalid locator")
+                               return
+                       }
+
+                       reqMtime := time.Unix(0, item.BlockMtime)
+                       if time.Since(reqMtime) < t.keepstore.cluster.Collections.BlobSigningTTL.Duration() {
+                               logger.Warnf("client asked to delete a %v old block (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
+                                       arvados.Duration(time.Since(reqMtime)),
+                                       item.BlockMtime,
+                                       reqMtime,
+                                       t.keepstore.cluster.Collections.BlobSigningTTL)
+                               return
+                       }
+
+                       var mnts []*mount
+                       if item.MountUUID == "" {
+                               mnts = mntsAllowTrash
+                       } else if mnt := t.keepstore.mounts[item.MountUUID]; mnt == nil {
+                               logger.Warnf("ignoring trash request for nonexistent mount %s", item.MountUUID)
+                               return
+                       } else if !mnt.AllowTrash {
+                               logger.Warnf("ignoring trash request for readonly mount %s with AllowTrashWhenReadOnly==false", item.MountUUID)
+                               return
+                       } else {
+                               mnts = []*mount{mnt}
+                       }
+
+                       for _, mnt := range mnts {
+                               logger := logger.WithField("mount", mnt.UUID)
+                               mtime, err := mnt.Mtime(li.hash)
+                               if err != nil {
+                                       logger.WithError(err).Error("error getting stored mtime")
+                                       continue
+                               }
+                               if !mtime.Equal(reqMtime) {
+                                       logger.Infof("stored mtime (%v) does not match trash list mtime (%v); skipping", mtime, reqMtime)
+                                       continue
+                               }
+                               err = mnt.BlockTrash(li.hash)
+                               if err != nil {
+                                       logger.WithError(err).Info("error trashing block")
+                                       continue
+                               }
+                               logger.Info("block trashed")
+                       }
+               }()
        }
 }
+
+type trashEmptier struct{}
+
+func newTrashEmptier(ctx context.Context, ks *keepstore, reg *prometheus.Registry) *trashEmptier {
+       d := ks.cluster.Collections.BlobTrashCheckInterval.Duration()
+       if d <= 0 ||
+               !ks.cluster.Collections.BlobTrash ||
+               ks.cluster.Collections.BlobDeleteConcurrency <= 0 {
+               ks.logger.Infof("not running trash emptier because disabled by config (enabled=%t, interval=%v, concurrency=%d)", ks.cluster.Collections.BlobTrash, d, ks.cluster.Collections.BlobDeleteConcurrency)
+               return &trashEmptier{}
+       }
+       go func() {
+               ticker := time.NewTicker(d)
+               for {
+                       select {
+                       case <-ctx.Done():
+                               return
+                       case <-ticker.C:
+                       }
+                       for _, mnt := range ks.mounts {
+                               if mnt.KeepMount.AllowTrash {
+                                       mnt.volume.EmptyTrash()
+                               }
+                       }
+               }
+       }()
+       return &trashEmptier{}
+}
index a1648c52cc9312b65339a348a94c306a9d5c1c29..0c304dbadec5498d8f736bb83cfeab88cbda6de4 100644 (file)
 package keepstore
 
 import (
-       "container/list"
        "context"
+       "crypto/md5"
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "sort"
        "time"
 
-       "git.arvados.org/arvados.git/sdk/go/ctxlog"
-       "github.com/prometheus/client_golang/prometheus"
-       check "gopkg.in/check.v1"
+       "git.arvados.org/arvados.git/sdk/go/arvados"
+       . "gopkg.in/check.v1"
 )
 
-type TrashWorkerTestData struct {
-       Locator1    string
-       Block1      []byte
-       BlockMtime1 int64
-
-       Locator2    string
-       Block2      []byte
-       BlockMtime2 int64
-
-       CreateData      bool
-       CreateInVolume1 bool
-
-       UseTrashLifeTime bool
-       DifferentMtimes  bool
-
-       DeleteLocator    string
-       SpecifyMountUUID bool
-
-       ExpectLocator1 bool
-       ExpectLocator2 bool
-}
-
-// Delete block that does not exist in any of the keep volumes.
-// Expect no errors.
-func (s *HandlerSuite) TestTrashWorkerIntegration_GetNonExistingLocator(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: "5d41402abc4b2a76b9719d911017c592",
-               Block1:   []byte("hello"),
-
-               Locator2: "5d41402abc4b2a76b9719d911017c592",
-               Block2:   []byte("hello"),
-
-               CreateData: false,
-
-               DeleteLocator: "5d41402abc4b2a76b9719d911017c592",
-
-               ExpectLocator1: false,
-               ExpectLocator2: false,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block that exists on volume 1 of the keep servers. Expect
-// the second locator in volume 2 to be unaffected.
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume1(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash2,
-               Block2:   TestBlock2,
-
-               CreateData: true,
-
-               DeleteLocator: TestHash, // first locator
-
-               ExpectLocator1: false,
-               ExpectLocator2: true,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block that exists on volume 2 of the keep servers. Expect
-// the first locator in volume 1 to be unaffected.
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume2(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash2,
-               Block2:   TestBlock2,
-
-               CreateData: true,
-
-               DeleteLocator: TestHash2, // locator 2
-
-               ExpectLocator1: true,
-               ExpectLocator2: false,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block with matching mtime for locator in both
-// volumes. Expect locator to be deleted from both volumes.
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInBothVolumes(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash,
-               Block2:   TestBlock,
-
-               CreateData: true,
-
-               DeleteLocator: TestHash,
-
-               ExpectLocator1: false,
-               ExpectLocator2: false,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Same locator with different Mtimes exists in both volumes. Delete
-// the second and expect the first to be still around.
-func (s *HandlerSuite) TestTrashWorkerIntegration_MtimeMatchesForLocator1ButNotForLocator2(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash,
-               Block2:   TestBlock,
-
-               CreateData:      true,
-               DifferentMtimes: true,
-
-               DeleteLocator: TestHash,
-
-               ExpectLocator1: true,
-               ExpectLocator2: false,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block that exists on both volumes with matching mtimes,
-// but specify a MountUUID in the request so it only gets deleted from
-// the first volume.
-func (s *HandlerSuite) TestTrashWorkerIntegration_SpecifyMountUUID(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash,
-               Block2:   TestBlock,
-
-               CreateData: true,
-
-               DeleteLocator:    TestHash,
-               SpecifyMountUUID: true,
-
-               ExpectLocator1: true,
-               ExpectLocator2: true,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Two different locators in volume 1. Delete one of them. Expect the
-// other unaffected.
-func (s *HandlerSuite) TestTrashWorkerIntegration_TwoDifferentLocatorsInVolume1(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash2,
-               Block2:   TestBlock2,
-
-               CreateData:      true,
-               CreateInVolume1: true,
-
-               DeleteLocator: TestHash, // locator 1
-
-               ExpectLocator1: false,
-               ExpectLocator2: true,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Allow default Trash Life time to be used. Thus, the newly created
-// block will not be deleted because its Mtime is within the trash
-// life time.
-func (s *HandlerSuite) TestTrashWorkerIntegration_SameLocatorInTwoVolumesWithDefaultTrashLifeTime(c *check.C) {
-       s.cluster.Collections.BlobTrash = true
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash2,
-               Block2:   TestBlock2,
-
-               CreateData:      true,
-               CreateInVolume1: true,
-
-               UseTrashLifeTime: true,
-
-               DeleteLocator: TestHash, // locator 1
-
-               // Since trash life time is in effect, block won't be deleted.
-               ExpectLocator1: true,
-               ExpectLocator2: true,
-       }
-       s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block with matching mtime for locator in both volumes, but
-// EnableDelete is false, so block won't be deleted.
-func (s *HandlerSuite) TestTrashWorkerIntegration_DisabledDelete(c *check.C) {
+func (s *routerSuite) TestTrashList_Clear(c *C) {
        s.cluster.Collections.BlobTrash = false
-       testData := TrashWorkerTestData{
-               Locator1: TestHash,
-               Block1:   TestBlock,
-
-               Locator2: TestHash,
-               Block2:   TestBlock,
-
-               CreateData: true,
-
-               DeleteLocator: TestHash,
-
-               ExpectLocator1: true,
-               ExpectLocator2: true,
-       }
-       s.performTrashWorkerTest(c, testData)
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       resp := call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, []byte(`
+               [
+                {
+                 "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
+                 "block_mtime":1707249451308502672,
+                 "mount_uuid":"zzzzz-nyw5e-000000000000000"
+                }
+               ]
+               `), nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(router.trasher.todo, DeepEquals, []TrashListItem{{
+               Locator:    "acbd18db4cc2f85cedef654fccc4a4d8+3",
+               BlockMtime: 1707249451308502672,
+               MountUUID:  "zzzzz-nyw5e-000000000000000",
+       }})
+
+       resp = call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, []byte("[]"), nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       c.Check(router.trasher.todo, HasLen, 0)
 }
 
-func (s *HandlerSuite) performTrashWorkerTest(c *check.C, testData TrashWorkerTestData) {
-       c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-       // Replace the router's trashq -- which the worker goroutines
-       // started by setup() are now receiving from -- with a new
-       // one, so we can see what the handler sends to it.
-       trashq := NewWorkQueue()
-       s.handler.Handler.(*router).trashq = trashq
-
-       // Put test content
-       mounts := s.handler.volmgr.AllWritable()
-       if testData.CreateData {
-               mounts[0].Put(context.Background(), testData.Locator1, testData.Block1)
-               mounts[0].Put(context.Background(), testData.Locator1+".meta", []byte("metadata"))
-
-               if testData.CreateInVolume1 {
-                       mounts[0].Put(context.Background(), testData.Locator2, testData.Block2)
-                       mounts[0].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
-               } else {
-                       mounts[1].Put(context.Background(), testData.Locator2, testData.Block2)
-                       mounts[1].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
-               }
-       }
-
-       oldBlockTime := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Minute)
-
-       // Create TrashRequest for the test
-       trashRequest := TrashRequest{
-               Locator:    testData.DeleteLocator,
-               BlockMtime: oldBlockTime.UnixNano(),
-       }
-       if testData.SpecifyMountUUID {
-               trashRequest.MountUUID = s.handler.volmgr.Mounts()[0].UUID
-       }
-
-       // Run trash worker and put the trashRequest on trashq
-       trashList := list.New()
-       trashList.PushBack(trashRequest)
-
-       if !testData.UseTrashLifeTime {
-               // Trash worker would not delete block if its Mtime is
-               // within trash life time. Back-date the block to
-               // allow the deletion to succeed.
-               for _, mnt := range mounts {
-                       mnt.Volume.(*MockVolume).Timestamps[testData.DeleteLocator] = oldBlockTime
-                       if testData.DifferentMtimes {
-                               oldBlockTime = oldBlockTime.Add(time.Second)
+func (s *routerSuite) TestTrashList_Execute(c *C) {
+       s.cluster.Collections.BlobTrashConcurrency = 1
+       s.cluster.Volumes = map[string]arvados.Volume{
+               "zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+               "zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+               "zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true},
+               "zzzzz-nyw5e-333333333333333": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+       }
+       router, cancel := testRouter(c, s.cluster, nil)
+       defer cancel()
+
+       var mounts []struct {
+               UUID     string
+               DeviceID string `json:"device_id"`
+       }
+       resp := call(router, "GET", "http://example/mounts", s.cluster.SystemRootToken, nil, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+       err := json.Unmarshal(resp.Body.Bytes(), &mounts)
+       c.Assert(err, IsNil)
+       c.Assert(mounts, HasLen, 4)
+
+       // Sort mounts by UUID
+       sort.Slice(mounts, func(i, j int) bool {
+               return mounts[i].UUID < mounts[j].UUID
+       })
+
+       // Make vols (stub volumes) in same order as mounts
+       var vols []*stubVolume
+       for _, mount := range mounts {
+               vols = append(vols, router.keepstore.mounts[mount.UUID].volume.(*stubVolume))
+       }
+
+       // The "trial" loop below will construct the trashList which
+       // we'll send to trasher via router, plus a slice of checks
+       // which we'll run after the trasher has finished executing
+       // the list.
+       var trashList []TrashListItem
+       var checks []func()
+
+       tNew := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() / 2)
+       tOld := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Second)
+
+       for _, trial := range []struct {
+               comment        string
+               storeMtime     []time.Time
+               trashListItems []TrashListItem
+               expectData     []bool
+       }{
+               {
+                       comment:    "timestamp matches, but is not old enough to trash => skip",
+                       storeMtime: []time.Time{tNew},
+                       trashListItems: []TrashListItem{
+                               {
+                                       BlockMtime: tNew.UnixNano(),
+                                       MountUUID:  mounts[0].UUID,
+                               },
+                       },
+                       expectData: []bool{true},
+               },
+               {
+                       comment:    "timestamp matches, and is old enough => trash",
+                       storeMtime: []time.Time{tOld},
+                       trashListItems: []TrashListItem{
+                               {
+                                       BlockMtime: tOld.UnixNano(),
+                                       MountUUID:  mounts[0].UUID,
+                               },
+                       },
+                       expectData: []bool{false},
+               },
+               {
+                       comment:    "timestamp matches and is old enough on mount 0, but the request specifies mount 1, where timestamp does not match => skip",
+                       storeMtime: []time.Time{tOld, tOld.Add(-time.Second)},
+                       trashListItems: []TrashListItem{
+                               {
+                                       BlockMtime: tOld.UnixNano(),
+                                       MountUUID:  mounts[1].UUID,
+                               },
+                       },
+                       expectData: []bool{true, true},
+               },
+               {
+                       comment:    "MountUUID unspecified => trash from any mount where timestamp matches, leave alone elsewhere",
+                       storeMtime: []time.Time{tOld, tOld.Add(-time.Second)},
+                       trashListItems: []TrashListItem{
+                               {
+                                       BlockMtime: tOld.UnixNano(),
+                               },
+                       },
+                       expectData: []bool{false, true},
+               },
+               {
+                       comment:    "MountUUID unspecified => trash from multiple mounts if timestamp matches, but skip readonly volumes unless AllowTrashWhenReadOnly",
+                       storeMtime: []time.Time{tOld, tOld, tOld, tOld},
+                       trashListItems: []TrashListItem{
+                               {
+                                       BlockMtime: tOld.UnixNano(),
+                               },
+                       },
+                       expectData: []bool{false, false, true, false},
+               },
+               {
+                       comment:    "readonly MountUUID specified => skip",
+                       storeMtime: []time.Time{tOld, tOld, tOld},
+                       trashListItems: []TrashListItem{
+                               {
+                                       BlockMtime: tOld.UnixNano(),
+                                       MountUUID:  mounts[2].UUID,
+                               },
+                       },
+                       expectData: []bool{true, true, true},
+               },
+       } {
+               trial := trial
+               data := []byte(fmt.Sprintf("trial %+v", trial))
+               hash := fmt.Sprintf("%x", md5.Sum(data))
+               for i, t := range trial.storeMtime {
+                       if t.IsZero() {
+                               continue
                        }
+                       err := vols[i].BlockWrite(context.Background(), hash, data)
+                       c.Assert(err, IsNil)
+                       err = vols[i].blockTouchWithTime(hash, t)
+                       c.Assert(err, IsNil)
                }
-       }
-       go RunTrashWorker(s.handler.volmgr, ctxlog.TestLogger(c), s.cluster, trashq)
-
-       // Install gate so all local operations block until we say go
-       gate := make(chan struct{})
-       for _, mnt := range mounts {
-               mnt.Volume.(*MockVolume).Gate = gate
-       }
-
-       assertStatusItem := func(k string, expect float64) {
-               if v := getStatusItem(s.handler, "TrashQueue", k); v != expect {
-                       c.Errorf("Got %s %v, expected %v", k, v, expect)
-               }
-       }
-
-       assertStatusItem("InProgress", 0)
-       assertStatusItem("Queued", 0)
-
-       listLen := trashList.Len()
-       trashq.ReplaceQueue(trashList)
-
-       // Wait for worker to take request(s)
-       expectEqualWithin(c, time.Second, listLen, func() interface{} { return trashq.Status().InProgress })
-
-       // Ensure status.json also reports work is happening
-       assertStatusItem("InProgress", float64(1))
-       assertStatusItem("Queued", float64(listLen-1))
-
-       // Let worker proceed
-       close(gate)
-
-       // Wait for worker to finish
-       expectEqualWithin(c, time.Second, 0, func() interface{} { return trashq.Status().InProgress })
-
-       // Verify Locator1 to be un/deleted as expected
-       buf := make([]byte, BlockSize)
-       size, err := GetBlock(context.Background(), s.handler.volmgr, testData.Locator1, buf, nil)
-       if testData.ExpectLocator1 {
-               if size == 0 || err != nil {
-                       c.Errorf("Expected Locator1 to be still present: %s", testData.Locator1)
+               for _, item := range trial.trashListItems {
+                       item.Locator = fmt.Sprintf("%s+%d", hash, len(data))
+                       trashList = append(trashList, item)
                }
-       } else {
-               if size > 0 || err == nil {
-                       c.Errorf("Expected Locator1 to be deleted: %s", testData.Locator1)
+               for i, expect := range trial.expectData {
+                       i, expect := i, expect
+                       checks = append(checks, func() {
+                               ent := vols[i].data[hash]
+                               dataPresent := ent.data != nil && ent.trash.IsZero()
+                               c.Check(dataPresent, Equals, expect, Commentf("%s mount %d (%s) expect present=%v but got len(ent.data)=%d ent.trash=%v // %s\nlog:\n%s", hash, i, vols[i].params.UUID, expect, len(ent.data), !ent.trash.IsZero(), trial.comment, vols[i].stubLog.String()))
+                       })
                }
        }
 
-       // Verify Locator2 to be un/deleted as expected
-       if testData.Locator1 != testData.Locator2 {
-               size, err = GetBlock(context.Background(), s.handler.volmgr, testData.Locator2, buf, nil)
-               if testData.ExpectLocator2 {
-                       if size == 0 || err != nil {
-                               c.Errorf("Expected Locator2 to be still present: %s", testData.Locator2)
-                       }
-               } else {
-                       if size > 0 || err == nil {
-                               c.Errorf("Expected Locator2 to be deleted: %s", testData.Locator2)
-                       }
+       listjson, err := json.Marshal(trashList)
+       resp = call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, listjson, nil)
+       c.Check(resp.Code, Equals, http.StatusOK)
+
+       for {
+               router.trasher.cond.L.Lock()
+               todolen := len(router.trasher.todo)
+               router.trasher.cond.L.Unlock()
+               if todolen == 0 && router.trasher.inprogress.Load() == 0 {
+                       break
                }
+               time.Sleep(time.Millisecond)
        }
 
-       // The DifferentMtimes test puts the same locator in two
-       // different volumes, but only one copy has an Mtime matching
-       // the trash request.
-       if testData.DifferentMtimes {
-               locatorFoundIn := 0
-               for _, volume := range s.handler.volmgr.AllReadable() {
-                       buf := make([]byte, BlockSize)
-                       if _, err := volume.Get(context.Background(), testData.Locator1, buf); err == nil {
-                               locatorFoundIn = locatorFoundIn + 1
-                       }
-               }
-               c.Check(locatorFoundIn, check.Equals, 1)
+       for _, check := range checks {
+               check()
        }
 }
index dee4bdc1c1ed1d59badb076dedac2615eef3f2d7..92cf12ac189803d4f72f120708aced520a252c7f 100644 (file)
@@ -28,20 +28,27 @@ import (
 )
 
 func init() {
-       driver["Directory"] = newDirectoryVolume
+       driver["Directory"] = newUnixVolume
 }
 
-func newDirectoryVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
-       v := &UnixVolume{cluster: cluster, volume: volume, logger: logger, metrics: metrics}
-       err := json.Unmarshal(volume.DriverParameters, &v)
+func newUnixVolume(params newVolumeParams) (volume, error) {
+       v := &unixVolume{
+               uuid:       params.UUID,
+               cluster:    params.Cluster,
+               volume:     params.ConfigVolume,
+               logger:     params.Logger,
+               metrics:    params.MetricsVecs,
+               bufferPool: params.BufferPool,
+       }
+       err := json.Unmarshal(params.ConfigVolume.DriverParameters, &v)
        if err != nil {
                return nil, err
        }
-       v.logger = v.logger.WithField("Volume", v.String())
+       v.logger = v.logger.WithField("Volume", v.DeviceID())
        return v, v.check()
 }
 
-func (v *UnixVolume) check() error {
+func (v *unixVolume) check() error {
        if v.Root == "" {
                return errors.New("DriverParameters.Root was not provided")
        }
@@ -53,22 +60,24 @@ func (v *UnixVolume) check() error {
        }
 
        // Set up prometheus metrics
-       lbls := prometheus.Labels{"device_id": v.GetDeviceID()}
+       lbls := prometheus.Labels{"device_id": v.DeviceID()}
        v.os.stats.opsCounters, v.os.stats.errCounters, v.os.stats.ioBytes = v.metrics.getCounterVecsFor(lbls)
 
        _, err := v.os.Stat(v.Root)
        return err
 }
 
-// A UnixVolume stores and retrieves blocks in a local directory.
-type UnixVolume struct {
+// A unixVolume stores and retrieves blocks in a local directory.
+type unixVolume struct {
        Root      string // path to the volume's root directory
        Serialize bool
 
-       cluster *arvados.Cluster
-       volume  arvados.Volume
-       logger  logrus.FieldLogger
-       metrics *volumeMetricsVecs
+       uuid       string
+       cluster    *arvados.Cluster
+       volume     arvados.Volume
+       logger     logrus.FieldLogger
+       metrics    *volumeMetricsVecs
+       bufferPool *bufferPool
 
        // something to lock during IO, typically a sync.Mutex (or nil
        // to skip locking)
@@ -77,15 +86,16 @@ type UnixVolume struct {
        os osWithStats
 }
 
-// GetDeviceID returns a globally unique ID for the volume's root
+// DeviceID returns a globally unique ID for the volume's root
 // directory, consisting of the filesystem's UUID and the path from
 // filesystem root to storage directory, joined by "/". For example,
 // the device ID for a local directory "/mnt/xvda1/keep" might be
 // "fa0b6166-3b55-4994-bd3f-92f4e00a1bb0/keep".
-func (v *UnixVolume) GetDeviceID() string {
+func (v *unixVolume) DeviceID() string {
        giveup := func(f string, args ...interface{}) string {
-               v.logger.Infof(f+"; using blank DeviceID for volume %s", append(args, v)...)
-               return ""
+               v.logger.Infof(f+"; using hostname:path for volume %s", append(args, v.uuid)...)
+               host, _ := os.Hostname()
+               return host + ":" + v.Root
        }
        buf, err := exec.Command("findmnt", "--noheadings", "--target", v.Root).CombinedOutput()
        if err != nil {
@@ -154,12 +164,9 @@ func (v *UnixVolume) GetDeviceID() string {
        return giveup("could not find entry in %q matching %q", udir, dev)
 }
 
-// Touch sets the timestamp for the given locator to the current time
-func (v *UnixVolume) Touch(loc string) error {
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-       p := v.blockPath(loc)
+// BlockTouch sets the timestamp for the given locator to the current time
+func (v *unixVolume) BlockTouch(hash string) error {
+       p := v.blockPath(hash)
        f, err := v.os.OpenFile(p, os.O_RDWR|os.O_APPEND, 0644)
        if err != nil {
                return err
@@ -182,7 +189,7 @@ func (v *UnixVolume) Touch(loc string) error {
 }
 
 // Mtime returns the stored timestamp for the given locator.
-func (v *UnixVolume) Mtime(loc string) (time.Time, error) {
+func (v *unixVolume) Mtime(loc string) (time.Time, error) {
        p := v.blockPath(loc)
        fi, err := v.os.Stat(p)
        if err != nil {
@@ -191,94 +198,59 @@ func (v *UnixVolume) Mtime(loc string) (time.Time, error) {
        return fi.ModTime(), nil
 }
 
-// Lock the locker (if one is in use), open the file for reading, and
-// call the given function if and when the file is ready to read.
-func (v *UnixVolume) getFunc(ctx context.Context, path string, fn func(io.Reader) error) error {
-       if err := v.lock(ctx); err != nil {
-               return err
-       }
-       defer v.unlock()
-       f, err := v.os.Open(path)
-       if err != nil {
-               return err
-       }
-       defer f.Close()
-       return fn(NewCountingReader(ioutil.NopCloser(f), v.os.stats.TickInBytes))
-}
-
 // stat is os.Stat() with some extra sanity checks.
-func (v *UnixVolume) stat(path string) (os.FileInfo, error) {
+func (v *unixVolume) stat(path string) (os.FileInfo, error) {
        stat, err := v.os.Stat(path)
        if err == nil {
                if stat.Size() < 0 {
                        err = os.ErrInvalid
                } else if stat.Size() > BlockSize {
-                       err = TooLongError
+                       err = errTooLarge
                }
        }
        return stat, err
 }
 
-// Get retrieves a block, copies it to the given slice, and returns
-// the number of bytes copied.
-func (v *UnixVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
-       return getWithPipe(ctx, loc, buf, v)
-}
-
-// ReadBlock implements BlockReader.
-func (v *UnixVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) error {
-       path := v.blockPath(loc)
+// BlockRead reads a block from the volume.
+func (v *unixVolume) BlockRead(ctx context.Context, hash string, w io.WriterAt) error {
+       path := v.blockPath(hash)
        stat, err := v.stat(path)
        if err != nil {
                return v.translateError(err)
        }
-       return v.getFunc(ctx, path, func(rdr io.Reader) error {
-               n, err := io.Copy(w, rdr)
-               if err == nil && n != stat.Size() {
-                       err = io.ErrUnexpectedEOF
-               }
+       if err := v.lock(ctx); err != nil {
                return err
-       })
-}
-
-// Compare returns nil if Get(loc) would return the same content as
-// expect. It is functionally equivalent to Get() followed by
-// bytes.Compare(), but uses less memory.
-func (v *UnixVolume) Compare(ctx context.Context, loc string, expect []byte) error {
-       path := v.blockPath(loc)
-       if _, err := v.stat(path); err != nil {
-               return v.translateError(err)
        }
-       return v.getFunc(ctx, path, func(rdr io.Reader) error {
-               return compareReaderWithBuf(ctx, rdr, expect, loc[:32])
-       })
-}
-
-// Put stores a block of data identified by the locator string
-// "loc".  It returns nil on success.  If the volume is full, it
-// returns a FullError.  If the write fails due to some other error,
-// that error is returned.
-func (v *UnixVolume) Put(ctx context.Context, loc string, block []byte) error {
-       return putWithPipe(ctx, loc, block, v)
+       defer v.unlock()
+       f, err := v.os.Open(path)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+       src := newCountingReader(ioutil.NopCloser(f), v.os.stats.TickInBytes)
+       dst := io.NewOffsetWriter(w, 0)
+       n, err := io.Copy(dst, src)
+       if err == nil && n != stat.Size() {
+               err = io.ErrUnexpectedEOF
+       }
+       return err
 }
 
-// WriteBlock implements BlockWriter.
-func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
-       if v.volume.ReadOnly {
-               return MethodDisabledError
+// BlockWrite stores a block on the volume. If it already exists, its
+// timestamp is updated.
+func (v *unixVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+       if v.isFull() {
+               return errFull
        }
-       if v.IsFull() {
-               return FullError
-       }
-       bdir := v.blockDir(loc)
+       bdir := v.blockDir(hash)
        if err := os.MkdirAll(bdir, 0755); err != nil {
                return fmt.Errorf("error creating directory %s: %s", bdir, err)
        }
 
-       bpath := v.blockPath(loc)
-       tmpfile, err := v.os.TempFile(bdir, "tmp"+loc)
+       bpath := v.blockPath(hash)
+       tmpfile, err := v.os.TempFile(bdir, "tmp"+hash)
        if err != nil {
-               return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, err)
+               return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, hash, err)
        }
        defer v.os.Remove(tmpfile.Name())
        defer tmpfile.Close()
@@ -287,7 +259,7 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
                return err
        }
        defer v.unlock()
-       n, err := io.Copy(tmpfile, rdr)
+       n, err := tmpfile.Write(data)
        v.os.stats.TickOutBytes(uint64(n))
        if err != nil {
                return fmt.Errorf("error writing %s: %s", bpath, err)
@@ -312,56 +284,10 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
        return nil
 }
 
-// Status returns a VolumeStatus struct describing the volume's
-// current state, or nil if an error occurs.
-func (v *UnixVolume) Status() *VolumeStatus {
-       fi, err := v.os.Stat(v.Root)
-       if err != nil {
-               v.logger.WithError(err).Error("stat failed")
-               return nil
-       }
-       // uint64() cast here supports GOOS=darwin where Dev is
-       // int32. If the device number is negative, the unsigned
-       // devnum won't be the real device number any more, but that's
-       // fine -- all we care about is getting the same number each
-       // time.
-       devnum := uint64(fi.Sys().(*syscall.Stat_t).Dev)
-
-       var fs syscall.Statfs_t
-       if err := syscall.Statfs(v.Root, &fs); err != nil {
-               v.logger.WithError(err).Error("statfs failed")
-               return nil
-       }
-       // These calculations match the way df calculates disk usage:
-       // "free" space is measured by fs.Bavail, but "used" space
-       // uses fs.Blocks - fs.Bfree.
-       free := fs.Bavail * uint64(fs.Bsize)
-       used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
-       return &VolumeStatus{
-               MountPoint: v.Root,
-               DeviceNum:  devnum,
-               BytesFree:  free,
-               BytesUsed:  used,
-       }
-}
-
 var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)
 var blockFileRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
 
-// IndexTo writes (to the given Writer) a list of blocks found on this
-// volume which begin with the specified prefix. If the prefix is an
-// empty string, IndexTo writes a complete list of blocks.
-//
-// Each block is given in the format
-//
-//     locator+size modification-time {newline}
-//
-// e.g.:
-//
-//     e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
-//     e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
-//     e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
-func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
+func (v *unixVolume) Index(ctx context.Context, prefix string, w io.Writer) error {
        rootdir, err := v.os.Open(v.Root)
        if err != nil {
                return err
@@ -374,6 +300,9 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
                return err
        }
        for _, subdir := range subdirs {
+               if ctx.Err() != nil {
+                       return ctx.Err()
+               }
                if !strings.HasPrefix(subdir, prefix) && !strings.HasPrefix(prefix, subdir) {
                        // prefix excludes all blocks stored in this dir
                        continue
@@ -388,7 +317,9 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
                        v.os.stats.TickOps("readdir")
                        v.os.stats.Tick(&v.os.stats.ReaddirOps)
                        dirents, err = os.ReadDir(blockdirpath)
-                       if err == nil {
+                       if ctx.Err() != nil {
+                               return ctx.Err()
+                       } else if err == nil {
                                break
                        } else if attempt < 5 && strings.Contains(err.Error(), "errno 523") {
                                // EBADCOOKIE (NFS stopped accepting
@@ -402,6 +333,9 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
                }
 
                for _, dirent := range dirents {
+                       if ctx.Err() != nil {
+                               return ctx.Err()
+                       }
                        fileInfo, err := dirent.Info()
                        if os.IsNotExist(err) {
                                // File disappeared between ReadDir() and now
@@ -430,11 +364,11 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
        return nil
 }
 
-// Trash trashes the block data from the unix storage
-// If BlobTrashLifetime == 0, the block is deleted
-// Else, the block is renamed as path/{loc}.trash.{deadline},
-// where deadline = now + BlobTrashLifetime
-func (v *UnixVolume) Trash(loc string) error {
+// BlockTrash trashes the block data from the unix storage.  If
+// BlobTrashLifetime == 0, the block is deleted; otherwise, the block
+// is renamed as path/{loc}.trash.{deadline}, where deadline = now +
+// BlobTrashLifetime.
+func (v *unixVolume) BlockTrash(loc string) error {
        // Touch() must be called before calling Write() on a block.  Touch()
        // also uses lockfile().  This avoids a race condition between Write()
        // and Trash() because either (a) the file will be trashed and Touch()
@@ -442,9 +376,6 @@ func (v *UnixVolume) Trash(loc string) error {
        // be re-written), or (b) Touch() will update the file's timestamp and
        // Trash() will read the correct up-to-date timestamp and choose not to
        // trash the file.
-       if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
-               return MethodDisabledError
-       }
        if err := v.lock(context.TODO()); err != nil {
                return err
        }
@@ -477,17 +408,13 @@ func (v *UnixVolume) Trash(loc string) error {
        return v.os.Rename(p, fmt.Sprintf("%v.trash.%d", p, time.Now().Add(v.cluster.Collections.BlobTrashLifetime.Duration()).Unix()))
 }
 
-// Untrash moves block from trash back into store
+// BlockUntrash moves block from trash back into store
 // Look for path/{loc}.trash.{deadline} in storage,
 // and rename the first such file as path/{loc}
-func (v *UnixVolume) Untrash(loc string) (err error) {
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-
+func (v *unixVolume) BlockUntrash(hash string) error {
        v.os.stats.TickOps("readdir")
        v.os.stats.Tick(&v.os.stats.ReaddirOps)
-       files, err := ioutil.ReadDir(v.blockDir(loc))
+       files, err := ioutil.ReadDir(v.blockDir(hash))
        if err != nil {
                return err
        }
@@ -497,11 +424,11 @@ func (v *UnixVolume) Untrash(loc string) (err error) {
        }
 
        foundTrash := false
-       prefix := fmt.Sprintf("%v.trash.", loc)
+       prefix := fmt.Sprintf("%v.trash.", hash)
        for _, f := range files {
                if strings.HasPrefix(f.Name(), prefix) {
                        foundTrash = true
-                       err = v.os.Rename(v.blockPath(f.Name()), v.blockPath(loc))
+                       err = v.os.Rename(v.blockPath(f.Name()), v.blockPath(hash))
                        if err == nil {
                                break
                        }
@@ -512,24 +439,24 @@ func (v *UnixVolume) Untrash(loc string) (err error) {
                return os.ErrNotExist
        }
 
-       return
+       return nil
 }
 
 // blockDir returns the fully qualified directory name for the directory
 // where loc is (or would be) stored on this volume.
-func (v *UnixVolume) blockDir(loc string) string {
+func (v *unixVolume) blockDir(loc string) string {
        return filepath.Join(v.Root, loc[0:3])
 }
 
 // blockPath returns the fully qualified pathname for the path to loc
 // on this volume.
-func (v *UnixVolume) blockPath(loc string) string {
+func (v *unixVolume) blockPath(loc string) string {
        return filepath.Join(v.blockDir(loc), loc)
 }
 
-// IsFull returns true if the free space on the volume is less than
+// isFull returns true if the free space on the volume is less than
 // MinFreeKilobytes.
-func (v *UnixVolume) IsFull() (isFull bool) {
+func (v *unixVolume) isFull() (isFull bool) {
        fullSymlink := v.Root + "/full"
 
        // Check if the volume has been marked as full in the last hour.
@@ -543,9 +470,9 @@ func (v *UnixVolume) IsFull() (isFull bool) {
        }
 
        if avail, err := v.FreeDiskSpace(); err == nil {
-               isFull = avail < MinFreeKilobytes
+               isFull = avail < BlockSize
        } else {
-               v.logger.WithError(err).Errorf("%s: FreeDiskSpace failed", v)
+               v.logger.WithError(err).Errorf("%s: FreeDiskSpace failed", v.DeviceID())
                isFull = false
        }
 
@@ -559,30 +486,26 @@ func (v *UnixVolume) IsFull() (isFull bool) {
 
 // FreeDiskSpace returns the number of unused 1k blocks available on
 // the volume.
-func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
+func (v *unixVolume) FreeDiskSpace() (free uint64, err error) {
        var fs syscall.Statfs_t
        err = syscall.Statfs(v.Root, &fs)
        if err == nil {
                // Statfs output is not guaranteed to measure free
                // space in terms of 1K blocks.
-               free = fs.Bavail * uint64(fs.Bsize) / 1024
+               free = fs.Bavail * uint64(fs.Bsize)
        }
        return
 }
 
-func (v *UnixVolume) String() string {
-       return fmt.Sprintf("[UnixVolume %s]", v.Root)
-}
-
 // InternalStats returns I/O and filesystem ops counters.
-func (v *UnixVolume) InternalStats() interface{} {
+func (v *unixVolume) InternalStats() interface{} {
        return &v.os.stats
 }
 
 // lock acquires the serialize lock, if one is in use. If ctx is done
 // before the lock is acquired, lock returns ctx.Err() instead of
 // acquiring the lock.
-func (v *UnixVolume) lock(ctx context.Context) error {
+func (v *unixVolume) lock(ctx context.Context) error {
        if v.locker == nil {
                return nil
        }
@@ -606,7 +529,7 @@ func (v *UnixVolume) lock(ctx context.Context) error {
 }
 
 // unlock releases the serialize lock, if one is in use.
-func (v *UnixVolume) unlock() {
+func (v *unixVolume) unlock() {
        if v.locker == nil {
                return
        }
@@ -614,7 +537,7 @@ func (v *UnixVolume) unlock() {
 }
 
 // lockfile and unlockfile use flock(2) to manage kernel file locks.
-func (v *UnixVolume) lockfile(f *os.File) error {
+func (v *unixVolume) lockfile(f *os.File) error {
        v.os.stats.TickOps("flock")
        v.os.stats.Tick(&v.os.stats.FlockOps)
        err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
@@ -622,7 +545,7 @@ func (v *UnixVolume) lockfile(f *os.File) error {
        return err
 }
 
-func (v *UnixVolume) unlockfile(f *os.File) error {
+func (v *unixVolume) unlockfile(f *os.File) error {
        err := syscall.Flock(int(f.Fd()), syscall.LOCK_UN)
        v.os.stats.TickErr(err)
        return err
@@ -630,7 +553,7 @@ func (v *UnixVolume) unlockfile(f *os.File) error {
 
 // Where appropriate, translate a more specific filesystem error to an
 // error recognized by handlers, like os.ErrNotExist.
-func (v *UnixVolume) translateError(err error) error {
+func (v *unixVolume) translateError(err error) error {
        switch err.(type) {
        case *os.PathError:
                // stat() returns a PathError if the parent directory
@@ -645,7 +568,7 @@ var unixTrashLocRegexp = regexp.MustCompile(`/([0-9a-f]{32})\.trash\.(\d+)$`)
 
 // EmptyTrash walks hierarchy looking for {hash}.trash.*
 // and deletes those with deadline < now.
-func (v *UnixVolume) EmptyTrash() {
+func (v *unixVolume) EmptyTrash() {
        var bytesDeleted, bytesInTrash int64
        var blocksDeleted, blocksInTrash int64
 
index 75d9b22de55604cc01a2d1f6f4ffaad7b9b585a7..bcdb5f6358652eb02fe8024b268d02b23f2eb8cf 100644 (file)
@@ -8,91 +8,82 @@ import (
        "bytes"
        "context"
        "encoding/json"
-       "errors"
        "fmt"
-       "io"
        "io/ioutil"
        "os"
        "sync"
        "syscall"
        "time"
 
-       "git.arvados.org/arvados.git/sdk/go/arvados"
        "git.arvados.org/arvados.git/sdk/go/ctxlog"
        "github.com/prometheus/client_golang/prometheus"
-       "github.com/sirupsen/logrus"
        check "gopkg.in/check.v1"
 )
 
-type TestableUnixVolume struct {
-       UnixVolume
+type testableUnixVolume struct {
+       unixVolume
        t TB
 }
 
-// PutRaw writes a Keep block directly into a UnixVolume, even if
-// the volume is readonly.
-func (v *TestableUnixVolume) PutRaw(locator string, data []byte) {
-       defer func(orig bool) {
-               v.volume.ReadOnly = orig
-       }(v.volume.ReadOnly)
-       v.volume.ReadOnly = false
-       err := v.Put(context.Background(), locator, data)
+func (v *testableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
+       err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{Actime: lastPut.Unix(), Modtime: lastPut.Unix()})
        if err != nil {
                v.t.Fatal(err)
        }
 }
 
-func (v *TestableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
-       err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{lastPut.Unix(), lastPut.Unix()})
-       if err != nil {
-               v.t.Fatal(err)
-       }
-}
-
-func (v *TestableUnixVolume) Teardown() {
+func (v *testableUnixVolume) Teardown() {
        if err := os.RemoveAll(v.Root); err != nil {
                v.t.Error(err)
        }
 }
 
-func (v *TestableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
        return "open", "create"
 }
 
-var _ = check.Suite(&UnixVolumeSuite{})
+var _ = check.Suite(&unixVolumeSuite{})
 
-type UnixVolumeSuite struct {
-       cluster *arvados.Cluster
-       volumes []*TestableUnixVolume
-       metrics *volumeMetricsVecs
+type unixVolumeSuite struct {
+       params  newVolumeParams
+       volumes []*testableUnixVolume
 }
 
-func (s *UnixVolumeSuite) SetUpTest(c *check.C) {
-       s.cluster = testCluster(c)
-       s.metrics = newVolumeMetricsVecs(prometheus.NewRegistry())
+func (s *unixVolumeSuite) SetUpTest(c *check.C) {
+       logger := ctxlog.TestLogger(c)
+       reg := prometheus.NewRegistry()
+       s.params = newVolumeParams{
+               UUID:        "zzzzz-nyw5e-999999999999999",
+               Cluster:     testCluster(c),
+               Logger:      logger,
+               MetricsVecs: newVolumeMetricsVecs(reg),
+               BufferPool:  newBufferPool(logger, 8, reg),
+       }
 }
 
-func (s *UnixVolumeSuite) TearDownTest(c *check.C) {
+func (s *unixVolumeSuite) TearDownTest(c *check.C) {
        for _, v := range s.volumes {
                v.Teardown()
        }
 }
 
-func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, serialize bool) *TestableUnixVolume {
+func (s *unixVolumeSuite) newTestableUnixVolume(c *check.C, params newVolumeParams, serialize bool) *testableUnixVolume {
        d, err := ioutil.TempDir("", "volume_test")
        c.Check(err, check.IsNil)
        var locker sync.Locker
        if serialize {
                locker = &sync.Mutex{}
        }
-       v := &TestableUnixVolume{
-               UnixVolume: UnixVolume{
-                       Root:    d,
-                       locker:  locker,
-                       cluster: cluster,
-                       logger:  ctxlog.TestLogger(c),
-                       volume:  volume,
-                       metrics: metrics,
+       v := &testableUnixVolume{
+               unixVolume: unixVolume{
+                       Root:       d,
+                       locker:     locker,
+                       uuid:       params.UUID,
+                       cluster:    params.Cluster,
+                       logger:     params.Logger,
+                       volume:     params.ConfigVolume,
+                       metrics:    params.MetricsVecs,
+                       bufferPool: params.BufferPool,
                },
                t: c,
        }
@@ -101,56 +92,45 @@ func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, cluster *arvados.Clu
        return v
 }
 
-// serialize = false; readonly = false
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTests(c *check.C) {
-       DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               return s.newTestableUnixVolume(c, cluster, volume, metrics, false)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests(c *check.C) {
+       DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+               return s.newTestableUnixVolume(c, params, false)
        })
 }
 
-// serialize = false; readonly = true
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsReadOnly(c *check.C) {
-       DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               return s.newTestableUnixVolume(c, cluster, volume, metrics, true)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_ReadOnly(c *check.C) {
+       DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+               return s.newTestableUnixVolume(c, params, false)
        })
 }
 
-// serialize = true; readonly = false
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsSerialized(c *check.C) {
-       DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               return s.newTestableUnixVolume(c, cluster, volume, metrics, false)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_Serialized(c *check.C) {
+       DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+               return s.newTestableUnixVolume(c, params, true)
        })
 }
 
-// serialize = true; readonly = true
-func (s *UnixVolumeSuite) TestUnixVolumeHandlersWithGenericVolumeTests(c *check.C) {
-       DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-               return s.newTestableUnixVolume(c, cluster, volume, metrics, true)
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_Readonly_Serialized(c *check.C) {
+       DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+               return s.newTestableUnixVolume(c, params, true)
        })
 }
 
-func (s *UnixVolumeSuite) TestGetNotFound(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestGetNotFound(c *check.C) {
+       v := s.newTestableUnixVolume(c, s.params, true)
        defer v.Teardown()
-       v.Put(context.Background(), TestHash, TestBlock)
-
-       buf := make([]byte, BlockSize)
-       n, err := v.Get(context.Background(), TestHash2, buf)
-       switch {
-       case os.IsNotExist(err):
-               break
-       case err == nil:
-               c.Errorf("Read should have failed, returned %+q", buf[:n])
-       default:
-               c.Errorf("Read expected ErrNotExist, got: %s", err)
-       }
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
+
+       buf := &brbuffer{}
+       err := v.BlockRead(context.Background(), TestHash2, buf)
+       c.Check(err, check.FitsTypeOf, os.ErrNotExist)
 }
 
-func (s *UnixVolumeSuite) TestPut(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestPut(c *check.C) {
+       v := s.newTestableUnixVolume(c, s.params, false)
        defer v.Teardown()
 
-       err := v.Put(context.Background(), TestHash, TestBlock)
+       err := v.BlockWrite(context.Background(), TestHash, TestBlock)
        if err != nil {
                c.Error(err)
        }
@@ -163,235 +143,85 @@ func (s *UnixVolumeSuite) TestPut(c *check.C) {
        }
 }
 
-func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestPutBadVolume(c *check.C) {
+       v := s.newTestableUnixVolume(c, s.params, false)
        defer v.Teardown()
 
        err := os.RemoveAll(v.Root)
        c.Assert(err, check.IsNil)
-       err = v.Put(context.Background(), TestHash, TestBlock)
+       err = v.BlockWrite(context.Background(), TestHash, TestBlock)
        c.Check(err, check.IsNil)
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeReadonly(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{ReadOnly: true, Replication: 1}, s.metrics, false)
-       defer v.Teardown()
-
-       v.PutRaw(TestHash, TestBlock)
-
-       buf := make([]byte, BlockSize)
-       _, err := v.Get(context.Background(), TestHash, buf)
-       if err != nil {
-               c.Errorf("got err %v, expected nil", err)
-       }
-
-       err = v.Put(context.Background(), TestHash, TestBlock)
-       if err != MethodDisabledError {
-               c.Errorf("got err %v, expected MethodDisabledError", err)
-       }
-
-       err = v.Touch(TestHash)
-       if err != MethodDisabledError {
-               c.Errorf("got err %v, expected MethodDisabledError", err)
-       }
-
-       err = v.Trash(TestHash)
-       if err != MethodDisabledError {
-               c.Errorf("got err %v, expected MethodDisabledError", err)
-       }
-}
-
-func (s *UnixVolumeSuite) TestIsFull(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestIsFull(c *check.C) {
+       v := s.newTestableUnixVolume(c, s.params, false)
        defer v.Teardown()
 
        fullPath := v.Root + "/full"
        now := fmt.Sprintf("%d", time.Now().Unix())
        os.Symlink(now, fullPath)
-       if !v.IsFull() {
-               c.Errorf("%s: claims not to be full", v)
+       if !v.isFull() {
+               c.Error("volume claims not to be full")
        }
        os.Remove(fullPath)
 
        // Test with an expired /full link.
        expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
        os.Symlink(expired, fullPath)
-       if v.IsFull() {
-               c.Errorf("%s: should no longer be full", v)
+       if v.isFull() {
+               c.Error("volume should no longer be full")
        }
 }
 
-func (s *UnixVolumeSuite) TestNodeStatus(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
-       defer v.Teardown()
-
-       // Get node status and make a basic sanity check.
-       volinfo := v.Status()
-       if volinfo.MountPoint != v.Root {
-               c.Errorf("GetNodeStatus mount_point %s, expected %s", volinfo.MountPoint, v.Root)
-       }
-       if volinfo.DeviceNum == 0 {
-               c.Errorf("uninitialized device_num in %v", volinfo)
-       }
-       if volinfo.BytesFree == 0 {
-               c.Errorf("uninitialized bytes_free in %v", volinfo)
-       }
-       if volinfo.BytesUsed == 0 {
-               c.Errorf("uninitialized bytes_used in %v", volinfo)
-       }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
-       defer v.Teardown()
-
-       v.Put(context.Background(), TestHash, TestBlock)
-       mockErr := errors.New("Mock error")
-       err := v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
-               return mockErr
-       })
-       if err != mockErr {
-               c.Errorf("Got %v, expected %v", err, mockErr)
-       }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncFileError(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
-       defer v.Teardown()
-
-       funcCalled := false
-       err := v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
-               funcCalled = true
-               return nil
-       })
-       if err == nil {
-               c.Errorf("Expected error opening non-existent file")
-       }
-       if funcCalled {
-               c.Errorf("Worker func should not have been called")
-       }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
-       defer v.Teardown()
-
-       v.Put(context.Background(), TestHash, TestBlock)
-
-       mtx := NewMockMutex()
-       v.locker = mtx
-
-       funcCalled := make(chan struct{})
-       go v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
-               funcCalled <- struct{}{}
-               return nil
-       })
-       select {
-       case mtx.AllowLock <- struct{}{}:
-       case <-funcCalled:
-               c.Fatal("Function was called before mutex was acquired")
-       case <-time.After(5 * time.Second):
-               c.Fatal("Timed out before mutex was acquired")
-       }
-       select {
-       case <-funcCalled:
-       case mtx.AllowUnlock <- struct{}{}:
-               c.Fatal("Mutex was released before function was called")
-       case <-time.After(5 * time.Second):
-               c.Fatal("Timed out waiting for funcCalled")
-       }
-       select {
-       case mtx.AllowUnlock <- struct{}{}:
-       case <-time.After(5 * time.Second):
-               c.Fatal("Timed out waiting for getFunc() to release mutex")
-       }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeCompare(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
-       defer v.Teardown()
-
-       v.Put(context.Background(), TestHash, TestBlock)
-       err := v.Compare(context.Background(), TestHash, TestBlock)
-       if err != nil {
-               c.Errorf("Got err %q, expected nil", err)
-       }
-
-       err = v.Compare(context.Background(), TestHash, []byte("baddata"))
-       if err != CollisionError {
-               c.Errorf("Got err %q, expected %q", err, CollisionError)
-       }
-
-       v.Put(context.Background(), TestHash, []byte("baddata"))
-       err = v.Compare(context.Background(), TestHash, TestBlock)
-       if err != DiskHashError {
-               c.Errorf("Got err %q, expected %q", err, DiskHashError)
-       }
-
-       if os.Getuid() == 0 {
-               c.Log("skipping 'permission denied' check when running as root")
-       } else {
-               p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
-               err = os.Chmod(p, 000)
-               c.Assert(err, check.IsNil)
-               err = v.Compare(context.Background(), TestHash, TestBlock)
-               c.Check(err, check.ErrorMatches, ".*permission denied.*")
-       }
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelPut(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, true)
+func (s *unixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
+       v := s.newTestableUnixVolume(c, s.params, true)
        defer v.Teardown()
        v.locker.Lock()
+       defer v.locker.Unlock()
        ctx, cancel := context.WithCancel(context.Background())
        go func() {
                time.Sleep(50 * time.Millisecond)
                cancel()
-               time.Sleep(50 * time.Millisecond)
-               v.locker.Unlock()
        }()
-       err := v.Put(ctx, TestHash, TestBlock)
+       err := v.BlockWrite(ctx, TestHash, TestBlock)
        if err != context.Canceled {
-               c.Errorf("Put() returned %s -- expected short read / canceled", err)
+               c.Errorf("BlockWrite() returned %s -- expected short read / canceled", err)
        }
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelGet(c *check.C) {
-       v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestUnixVolumeContextCancelBlockRead(c *check.C) {
+       v := s.newTestableUnixVolume(c, s.params, true)
        defer v.Teardown()
-       bpath := v.blockPath(TestHash)
-       v.PutRaw(TestHash, TestBlock)
-       os.Remove(bpath)
-       err := syscall.Mkfifo(bpath, 0600)
+       err := v.BlockWrite(context.Background(), TestHash, TestBlock)
        if err != nil {
-               c.Fatalf("Mkfifo %s: %s", bpath, err)
+               c.Fatal(err)
        }
-       defer os.Remove(bpath)
        ctx, cancel := context.WithCancel(context.Background())
+       v.locker.Lock()
+       defer v.locker.Unlock()
        go func() {
                time.Sleep(50 * time.Millisecond)
                cancel()
        }()
-       buf := make([]byte, len(TestBlock))
-       n, err := v.Get(ctx, TestHash, buf)
-       if n == len(TestBlock) || err != context.Canceled {
-               c.Errorf("Get() returned %d, %s -- expected short read / canceled", n, err)
+       buf := &brbuffer{}
+       err = v.BlockRead(ctx, TestHash, buf)
+       if buf.Len() != 0 || err != context.Canceled {
+               c.Errorf("BlockRead() returned %q, %s -- expected short read / canceled", buf.String(), err)
        }
 }
 
-func (s *UnixVolumeSuite) TestStats(c *check.C) {
-       vol := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestStats(c *check.C) {
+       vol := s.newTestableUnixVolume(c, s.params, false)
        stats := func() string {
                buf, err := json.Marshal(vol.InternalStats())
                c.Check(err, check.IsNil)
                return string(buf)
        }
 
-       c.Check(stats(), check.Matches, `.*"StatOps":1,.*`) // (*UnixVolume)check() calls Stat() once
+       c.Check(stats(), check.Matches, `.*"StatOps":1,.*`) // (*unixVolume)check() calls Stat() once
        c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
 
-       loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-       _, err := vol.Get(context.Background(), loc, make([]byte, 3))
+       err := vol.BlockRead(context.Background(), fooHash, brdiscard)
        c.Check(err, check.NotNil)
        c.Check(stats(), check.Matches, `.*"StatOps":[^0],.*`)
        c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
@@ -400,42 +230,42 @@ func (s *UnixVolumeSuite) TestStats(c *check.C) {
        c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
        c.Check(stats(), check.Matches, `.*"CreateOps":0,.*`)
 
-       err = vol.Put(context.Background(), loc, []byte("foo"))
+       err = vol.BlockWrite(context.Background(), fooHash, []byte("foo"))
        c.Check(err, check.IsNil)
        c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
        c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
        c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
        c.Check(stats(), check.Matches, `.*"UtimesOps":1,.*`)
 
-       err = vol.Touch(loc)
+       err = vol.BlockTouch(fooHash)
        c.Check(err, check.IsNil)
        c.Check(stats(), check.Matches, `.*"FlockOps":1,.*`)
        c.Check(stats(), check.Matches, `.*"OpenOps":1,.*`)
        c.Check(stats(), check.Matches, `.*"UtimesOps":2,.*`)
 
-       _, err = vol.Get(context.Background(), loc, make([]byte, 3))
-       c.Check(err, check.IsNil)
-       err = vol.Compare(context.Background(), loc, []byte("foo"))
+       buf := &brbuffer{}
+       err = vol.BlockRead(context.Background(), fooHash, buf)
        c.Check(err, check.IsNil)
-       c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
-       c.Check(stats(), check.Matches, `.*"OpenOps":3,.*`)
+       c.Check(buf.String(), check.Equals, "foo")
+       c.Check(stats(), check.Matches, `.*"InBytes":3,.*`)
+       c.Check(stats(), check.Matches, `.*"OpenOps":2,.*`)
 
-       err = vol.Trash(loc)
+       err = vol.BlockTrash(fooHash)
        c.Check(err, check.IsNil)
        c.Check(stats(), check.Matches, `.*"FlockOps":2,.*`)
 }
 
-func (s *UnixVolumeSuite) TestSkipUnusedDirs(c *check.C) {
-       vol := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+func (s *unixVolumeSuite) TestSkipUnusedDirs(c *check.C) {
+       vol := s.newTestableUnixVolume(c, s.params, false)
 
-       err := os.Mkdir(vol.UnixVolume.Root+"/aaa", 0777)
+       err := os.Mkdir(vol.unixVolume.Root+"/aaa", 0777)
        c.Assert(err, check.IsNil)
-       err = os.Mkdir(vol.UnixVolume.Root+"/.aaa", 0777) // EmptyTrash should not look here
+       err = os.Mkdir(vol.unixVolume.Root+"/.aaa", 0777) // EmptyTrash should not look here
        c.Assert(err, check.IsNil)
-       deleteme := vol.UnixVolume.Root + "/aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
+       deleteme := vol.unixVolume.Root + "/aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
        err = ioutil.WriteFile(deleteme, []byte{1, 2, 3}, 0777)
        c.Assert(err, check.IsNil)
-       skipme := vol.UnixVolume.Root + "/.aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
+       skipme := vol.unixVolume.Root + "/.aaa/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.trash.1"
        err = ioutil.WriteFile(skipme, []byte{1, 2, 3}, 0777)
        c.Assert(err, check.IsNil)
        vol.EmptyTrash()
index f597ff578106544c54763c9847a3190b53154130..cd61804913253af6032b25b97892db858a3b0cb7 100644 (file)
@@ -6,426 +6,93 @@ package keepstore
 
 import (
        "context"
-       "crypto/rand"
-       "fmt"
        "io"
-       "math/big"
-       "sort"
-       "sync/atomic"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
        "github.com/sirupsen/logrus"
 )
 
-type BlockWriter interface {
-       // WriteBlock reads all data from r, writes it to a backing
-       // store as "loc", and returns the number of bytes written.
-       WriteBlock(ctx context.Context, loc string, r io.Reader) error
-}
-
-type BlockReader interface {
-       // ReadBlock retrieves data previously stored as "loc" and
-       // writes it to w.
-       ReadBlock(ctx context.Context, loc string, w io.Writer) error
-}
-
-var driver = map[string]func(*arvados.Cluster, arvados.Volume, logrus.FieldLogger, *volumeMetricsVecs) (Volume, error){}
-
-// A Volume is an interface representing a Keep back-end storage unit:
-// for example, a single mounted disk, a RAID array, an Amazon S3 volume,
-// etc.
-type Volume interface {
-       // Get a block: copy the block data into buf, and return the
-       // number of bytes copied.
-       //
-       // loc is guaranteed to consist of 32 or more lowercase hex
-       // digits.
-       //
-       // Get should not verify the integrity of the data: it should
-       // just return whatever was found in its backing
-       // store. (Integrity checking is the caller's responsibility.)
-       //
-       // If an error is encountered that prevents it from
-       // retrieving the data, that error should be returned so the
-       // caller can log (and send to the client) a more useful
-       // message.
-       //
-       // If the error is "not found", and there's no particular
-       // reason to expect the block to be found (other than that a
-       // caller is asking for it), the returned error should satisfy
-       // os.IsNotExist(err): this is a normal condition and will not
-       // be logged as an error (except that a 404 will appear in the
-       // access log if the block is not found on any other volumes
-       // either).
-       //
-       // If the data in the backing store is bigger than len(buf),
-       // then Get is permitted to return an error without reading
-       // any of the data.
-       //
-       // len(buf) will not exceed BlockSize.
-       Get(ctx context.Context, loc string, buf []byte) (int, error)
-
-       // Compare the given data with the stored data (i.e., what Get
-       // would return). If equal, return nil. If not, return
-       // CollisionError or DiskHashError (depending on whether the
-       // data on disk matches the expected hash), or whatever error
-       // was encountered opening/reading the stored data.
-       Compare(ctx context.Context, loc string, data []byte) error
-
-       // Put writes a block to an underlying storage device.
-       //
-       // loc is as described in Get.
-       //
-       // len(block) is guaranteed to be between 0 and BlockSize.
-       //
-       // If a block is already stored under the same name (loc) with
-       // different content, Put must either overwrite the existing
-       // data with the new data or return a non-nil error. When
-       // overwriting existing data, it must never leave the storage
-       // device in an inconsistent state: a subsequent call to Get
-       // must return either the entire old block, the entire new
-       // block, or an error. (An implementation that cannot peform
-       // atomic updates must leave the old data alone and return an
-       // error.)
-       //
-       // Put also sets the timestamp for the given locator to the
-       // current time.
-       //
-       // Put must return a non-nil error unless it can guarantee
-       // that the entire block has been written and flushed to
-       // persistent storage, and that its timestamp is current. Of
-       // course, this guarantee is only as good as the underlying
-       // storage device, but it is Put's responsibility to at least
-       // get whatever guarantee is offered by the storage device.
+// volume is the interface to a back-end storage device.
+type volume interface {
+       // Return a unique identifier for the backend device. If
+       // possible, this should be chosen such that keepstore
+       // processes running on different hosts, and accessing the
+       // same backend device, will return the same string.
        //
-       // Put should not verify that loc==hash(block): this is the
-       // caller's responsibility.
-       Put(ctx context.Context, loc string, block []byte) error
+       // This helps keep-balance avoid redundantly downloading
+       // multiple index listings for the same backend device.
+       DeviceID() string
 
-       // Touch sets the timestamp for the given locator to the
-       // current time.
-       //
-       // loc is as described in Get.
-       //
-       // If invoked at time t0, Touch must guarantee that a
-       // subsequent call to Mtime will return a timestamp no older
-       // than {t0 minus one second}. For example, if Touch is called
-       // at 2015-07-07T01:23:45.67890123Z, it is acceptable for a
-       // subsequent Mtime to return any of the following:
+       // Copy a block from the backend device to writeTo.
        //
-       //   - 2015-07-07T01:23:45.00000000Z
-       //   - 2015-07-07T01:23:45.67890123Z
-       //   - 2015-07-07T01:23:46.67890123Z
-       //   - 2015-07-08T00:00:00.00000000Z
+       // As with all volume methods, the hash argument is a
+       // 32-character hexadecimal string.
        //
-       // It is not acceptable for a subsequente Mtime to return
-       // either of the following:
+       // Data can be written to writeTo in any order, and concurrent
+       // calls to writeTo.WriteAt() are allowed.  However, BlockRead
+       // must not do multiple writes that intersect with any given
+       // byte offset.
        //
-       //   - 2015-07-07T00:00:00.00000000Z -- ERROR
-       //   - 2015-07-07T01:23:44.00000000Z -- ERROR
+       // BlockRead is not expected to verify data integrity.
        //
-       // Touch must return a non-nil error if the timestamp cannot
-       // be updated.
-       Touch(loc string) error
+       // If the indicated block does not exist, or has been trashed,
+       // BlockRead must return os.ErrNotExist.
+       BlockRead(ctx context.Context, hash string, writeTo io.WriterAt) error
 
-       // Mtime returns the stored timestamp for the given locator.
+       // Store a block on the backend device, and set its timestamp
+       // to the current time.
        //
-       // loc is as described in Get.
-       //
-       // Mtime must return a non-nil error if the given block is not
-       // found or the timestamp could not be retrieved.
-       Mtime(loc string) (time.Time, error)
+       // The implementation must ensure that regardless of any
+       // errors encountered while writing, a partially written block
+       // is not left behind: a subsequent BlockRead call must return
+       // either a) the data previously stored under the given hash,
+       // if any, or b) os.ErrNotExist.
+       BlockWrite(ctx context.Context, hash string, data []byte) error
 
-       // IndexTo writes a complete list of locators with the given
-       // prefix for which Get() can retrieve data.
-       //
-       // prefix consists of zero or more lowercase hexadecimal
-       // digits.
-       //
-       // Each locator must be written to the given writer using the
-       // following format:
-       //
-       //   loc "+" size " " timestamp "\n"
-       //
-       // where:
-       //
-       //   - size is the number of bytes of content, given as a
-       //     decimal number with one or more digits
-       //
-       //   - timestamp is the timestamp stored for the locator,
-       //     given as a decimal number of seconds after January 1,
-       //     1970 UTC.
-       //
-       // IndexTo must not write any other data to writer: for
-       // example, it must not write any blank lines.
-       //
-       // If an error makes it impossible to provide a complete
-       // index, IndexTo must return a non-nil error. It is
-       // acceptable to return a non-nil error after writing a
-       // partial index to writer.
-       //
-       // The resulting index is not expected to be sorted in any
-       // particular order.
-       IndexTo(prefix string, writer io.Writer) error
-
-       // Trash moves the block data from the underlying storage
-       // device to trash area. The block then stays in trash for
-       // BlobTrashLifetime before it is actually deleted.
-       //
-       // loc is as described in Get.
-       //
-       // If the timestamp for the given locator is newer than
-       // BlobSigningTTL, Trash must not trash the data.
-       //
-       // If a Trash operation overlaps with any Touch or Put
-       // operations on the same locator, the implementation must
-       // ensure one of the following outcomes:
-       //
-       //   - Touch and Put return a non-nil error, or
-       //   - Trash does not trash the block, or
-       //   - Both of the above.
-       //
-       // If it is possible for the storage device to be accessed by
-       // a different process or host, the synchronization mechanism
-       // should also guard against races with other processes and
-       // hosts. If such a mechanism is not available, there must be
-       // a mechanism for detecting unsafe configurations, alerting
-       // the operator, and aborting or falling back to a read-only
-       // state. In other words, running multiple keepstore processes
-       // with the same underlying storage device must either work
-       // reliably or fail outright.
-       //
-       // Corollary: A successful Touch or Put guarantees a block
-       // will not be trashed for at least BlobSigningTTL seconds.
-       Trash(loc string) error
+       // Update the indicated block's stored timestamp to the
+       // current time.
+       BlockTouch(hash string) error
 
-       // Untrash moves block from trash back into store
-       Untrash(loc string) error
+       // Return the indicated block's stored timestamp.
+       Mtime(hash string) (time.Time, error)
 
-       // Status returns a *VolumeStatus representing the current
-       // in-use and available storage capacity and an
-       // implementation-specific volume identifier (e.g., "mount
-       // point" for a UnixVolume).
-       Status() *VolumeStatus
+       // Mark the indicated block as trash, such that -- unless it
+       // is untrashed before time.Now() + BlobTrashLifetime --
+       // BlockRead returns os.ErrNotExist and the block is not
+       // listed by Index.
+       BlockTrash(hash string) error
 
-       // String returns an identifying label for this volume,
-       // suitable for including in log messages. It should contain
-       // enough information to uniquely identify the underlying
-       // storage device, but should not contain any credentials or
-       // secrets.
-       String() string
+       // Un-mark the indicated block as trash. If the block has not
+       // been trashed, return os.ErrNotExist.
+       BlockUntrash(hash string) error
 
-       // EmptyTrash looks for trashed blocks that exceeded
-       // BlobTrashLifetime and deletes them from the volume.
+       // Permanently delete all blocks that have been marked as
+       // trash for BlobTrashLifetime or longer.
        EmptyTrash()
 
-       // Return a globally unique ID of the underlying storage
-       // device if possible, otherwise "".
-       GetDeviceID() string
-}
-
-// A VolumeWithExamples provides example configs to display in the
-// -help message.
-type VolumeWithExamples interface {
-       Volume
-       Examples() []Volume
-}
-
-// A VolumeManager tells callers which volumes can read, which volumes
-// can write, and on which volume the next write should be attempted.
-type VolumeManager interface {
-       // Mounts returns all mounts (volume attachments).
-       Mounts() []*VolumeMount
-
-       // Lookup returns the mount with the given UUID. Returns nil
-       // if the mount does not exist. If write==true, returns nil if
-       // the mount is not writable.
-       Lookup(uuid string, write bool) *VolumeMount
-
-       // AllReadable returns all mounts.
-       AllReadable() []*VolumeMount
-
-       // AllWritable returns all mounts that aren't known to be in
-       // a read-only state. (There is no guarantee that a write to
-       // one will succeed, though.)
-       AllWritable() []*VolumeMount
-
-       // NextWritable returns the volume where the next new block
-       // should be written. A VolumeManager can select a volume in
-       // order to distribute activity across spindles, fill up disks
-       // with more free space, etc.
-       NextWritable() *VolumeMount
-
-       // VolumeStats returns the ioStats used for tracking stats for
-       // the given Volume.
-       VolumeStats(Volume) *ioStats
-
-       // Close shuts down the volume manager cleanly.
-       Close()
-}
-
-// A VolumeMount is an attachment of a Volume to a VolumeManager.
-type VolumeMount struct {
-       arvados.KeepMount
-       Volume
-}
-
-// Generate a UUID the way API server would for a "KeepVolumeMount"
-// object.
-func (*VolumeMount) generateUUID() string {
-       var max big.Int
-       _, ok := max.SetString("zzzzzzzzzzzzzzz", 36)
-       if !ok {
-               panic("big.Int parse failed")
-       }
-       r, err := rand.Int(rand.Reader, &max)
-       if err != nil {
-               panic(err)
-       }
-       return fmt.Sprintf("zzzzz-ivpuk-%015s", r.Text(36))
-}
-
-// RRVolumeManager is a round-robin VolumeManager: the Nth call to
-// NextWritable returns the (N % len(writables))th writable Volume
-// (where writables are all Volumes v where v.Writable()==true).
-type RRVolumeManager struct {
-       mounts    []*VolumeMount
-       mountMap  map[string]*VolumeMount
-       readables []*VolumeMount
-       writables []*VolumeMount
-       counter   uint32
-       iostats   map[Volume]*ioStats
-}
-
-func makeRRVolumeManager(logger logrus.FieldLogger, cluster *arvados.Cluster, myURL arvados.URL, metrics *volumeMetricsVecs) (*RRVolumeManager, error) {
-       vm := &RRVolumeManager{
-               iostats: make(map[Volume]*ioStats),
-       }
-       vm.mountMap = make(map[string]*VolumeMount)
-       for uuid, cfgvol := range cluster.Volumes {
-               va, ok := cfgvol.AccessViaHosts[myURL]
-               if !ok && len(cfgvol.AccessViaHosts) > 0 {
-                       continue
-               }
-               dri, ok := driver[cfgvol.Driver]
-               if !ok {
-                       return nil, fmt.Errorf("volume %s: invalid driver %q", uuid, cfgvol.Driver)
-               }
-               vol, err := dri(cluster, cfgvol, logger, metrics)
-               if err != nil {
-                       return nil, fmt.Errorf("error initializing volume %s: %s", uuid, err)
-               }
-               sc := cfgvol.StorageClasses
-               if len(sc) == 0 {
-                       sc = map[string]bool{"default": true}
-               }
-               repl := cfgvol.Replication
-               if repl < 1 {
-                       repl = 1
-               }
-               mnt := &VolumeMount{
-                       KeepMount: arvados.KeepMount{
-                               UUID:           uuid,
-                               DeviceID:       vol.GetDeviceID(),
-                               AllowWrite:     !va.ReadOnly && !cfgvol.ReadOnly,
-                               AllowTrash:     !va.ReadOnly && (!cfgvol.ReadOnly || cfgvol.AllowTrashWhenReadOnly),
-                               Replication:    repl,
-                               StorageClasses: sc,
-                       },
-                       Volume: vol,
-               }
-               vm.iostats[vol] = &ioStats{}
-               vm.mounts = append(vm.mounts, mnt)
-               vm.mountMap[uuid] = mnt
-               vm.readables = append(vm.readables, mnt)
-               if mnt.KeepMount.AllowWrite {
-                       vm.writables = append(vm.writables, mnt)
-               }
-               logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol, mnt.AllowWrite, mnt.AllowTrash)
-       }
-       // pri(mnt): return highest priority of any storage class
-       // offered by mnt
-       pri := func(mnt *VolumeMount) int {
-               any, best := false, 0
-               for class := range mnt.KeepMount.StorageClasses {
-                       if p := cluster.StorageClasses[class].Priority; !any || best < p {
-                               best = p
-                               any = true
-                       }
-               }
-               return best
-       }
-       // less(a,b): sort first by highest priority of any offered
-       // storage class (highest->lowest), then by volume UUID
-       less := func(a, b *VolumeMount) bool {
-               if pa, pb := pri(a), pri(b); pa != pb {
-                       return pa > pb
-               } else {
-                       return a.KeepMount.UUID < b.KeepMount.UUID
-               }
-       }
-       sort.Slice(vm.readables, func(i, j int) bool {
-               return less(vm.readables[i], vm.readables[j])
-       })
-       sort.Slice(vm.writables, func(i, j int) bool {
-               return less(vm.writables[i], vm.writables[j])
-       })
-       sort.Slice(vm.mounts, func(i, j int) bool {
-               return less(vm.mounts[i], vm.mounts[j])
-       })
-       return vm, nil
-}
-
-func (vm *RRVolumeManager) Mounts() []*VolumeMount {
-       return vm.mounts
-}
-
-func (vm *RRVolumeManager) Lookup(uuid string, needWrite bool) *VolumeMount {
-       if mnt, ok := vm.mountMap[uuid]; ok && (!needWrite || mnt.AllowWrite) {
-               return mnt
-       }
-       return nil
-}
-
-// AllReadable returns an array of all readable volumes
-func (vm *RRVolumeManager) AllReadable() []*VolumeMount {
-       return vm.readables
-}
-
-// AllWritable returns writable volumes, sorted by priority/uuid. Used
-// by CompareAndTouch to ensure higher-priority volumes are checked
-// first.
-func (vm *RRVolumeManager) AllWritable() []*VolumeMount {
-       return vm.writables
-}
-
-// NextWritable returns writable volumes, rotated by vm.counter so
-// each volume gets a turn to be first. Used by PutBlock to distribute
-// new data across available volumes.
-func (vm *RRVolumeManager) NextWritable() []*VolumeMount {
-       if len(vm.writables) == 0 {
-               return nil
-       }
-       offset := (int(atomic.AddUint32(&vm.counter, 1)) - 1) % len(vm.writables)
-       return append(append([]*VolumeMount(nil), vm.writables[offset:]...), vm.writables[:offset]...)
-}
-
-// VolumeStats returns an ioStats for the given volume.
-func (vm *RRVolumeManager) VolumeStats(v Volume) *ioStats {
-       return vm.iostats[v]
+       // Write an index of all non-trashed blocks available on the
+       // backend device whose hash begins with the given prefix
+       // (prefix is a string of zero or more hexadecimal digits).
+       //
+       // Each block is written as "{hash}+{size} {timestamp}\n"
+       // where timestamp is a decimal-formatted number of
+       // nanoseconds since the UTC Unix epoch.
+       //
+       // Index should abort and return ctx.Err() if ctx is cancelled
+       // before indexing is complete.
+       Index(ctx context.Context, prefix string, writeTo io.Writer) error
 }
 
-// Close the RRVolumeManager
-func (vm *RRVolumeManager) Close() {
-}
+type volumeDriver func(newVolumeParams) (volume, error)
 
-// VolumeStatus describes the current condition of a volume
-type VolumeStatus struct {
-       MountPoint string
-       DeviceNum  uint64
-       BytesFree  uint64
-       BytesUsed  uint64
+type newVolumeParams struct {
+       UUID         string
+       Cluster      *arvados.Cluster
+       ConfigVolume arvados.Volume
+       Logger       logrus.FieldLogger
+       MetricsVecs  *volumeMetricsVecs
+       BufferPool   *bufferPool
 }
 
 // ioStats tracks I/O statistics for a volume or server
@@ -439,7 +106,3 @@ type ioStats struct {
        InBytes    uint64
        OutBytes   uint64
 }
-
-type InternalStatser interface {
-       InternalStats() interface{}
-}
index 21804124316fe2ea9421c25eeeec14c9aab190aa..16084058b7d57f3e8cef4b1ec1063337d8e62f84 100644 (file)
@@ -14,6 +14,7 @@ import (
        "sort"
        "strconv"
        "strings"
+       "sync"
        "time"
 
        "git.arvados.org/arvados.git/sdk/go/arvados"
@@ -39,7 +40,7 @@ type TB interface {
 // A TestableVolumeFactory returns a new TestableVolume. The factory
 // function, and the TestableVolume it returns, can use "t" to write
 // logs, fail the current test, etc.
-type TestableVolumeFactory func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume
+type TestableVolumeFactory func(t TB, params newVolumeParams) TestableVolume
 
 // DoGenericVolumeTests runs a set of tests that every TestableVolume
 // is expected to pass. It calls factory to create a new TestableVolume
@@ -51,16 +52,6 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
        s.testGet(t, factory)
        s.testGetNoSuchBlock(t, factory)
 
-       s.testCompareNonexistent(t, factory)
-       s.testCompareSameContent(t, factory, TestHash, TestBlock)
-       s.testCompareSameContent(t, factory, EmptyHash, EmptyBlock)
-       s.testCompareWithCollision(t, factory, TestHash, TestBlock, []byte("baddata"))
-       s.testCompareWithCollision(t, factory, TestHash, TestBlock, EmptyBlock)
-       s.testCompareWithCollision(t, factory, EmptyHash, EmptyBlock, TestBlock)
-       s.testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, []byte("baddata"))
-       s.testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, EmptyBlock)
-       s.testCompareWithCorruptStoredData(t, factory, EmptyHash, EmptyBlock, []byte("baddata"))
-
        if !readonly {
                s.testPutBlockWithSameContent(t, factory, TestHash, TestBlock)
                s.testPutBlockWithSameContent(t, factory, EmptyHash, EmptyBlock)
@@ -76,7 +67,7 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
 
        s.testMtimeNoSuchBlock(t, factory)
 
-       s.testIndexTo(t, factory)
+       s.testIndex(t, factory)
 
        if !readonly {
                s.testDeleteNewBlock(t, factory)
@@ -84,33 +75,24 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
        }
        s.testDeleteNoSuchBlock(t, factory)
 
-       s.testStatus(t, factory)
-
        s.testMetrics(t, readonly, factory)
 
-       s.testString(t, factory)
-
-       if readonly {
-               s.testUpdateReadOnly(t, factory)
-       }
-
        s.testGetConcurrent(t, factory)
        if !readonly {
                s.testPutConcurrent(t, factory)
-
                s.testPutFullBlock(t, factory)
+               s.testTrashUntrash(t, readonly, factory)
+               s.testTrashEmptyTrashUntrash(t, factory)
        }
-
-       s.testTrashUntrash(t, readonly, factory)
-       s.testTrashEmptyTrashUntrash(t, factory)
 }
 
 type genericVolumeSuite struct {
-       cluster  *arvados.Cluster
-       volume   arvados.Volume
-       logger   logrus.FieldLogger
-       metrics  *volumeMetricsVecs
-       registry *prometheus.Registry
+       cluster    *arvados.Cluster
+       volume     arvados.Volume
+       logger     logrus.FieldLogger
+       metrics    *volumeMetricsVecs
+       registry   *prometheus.Registry
+       bufferPool *bufferPool
 }
 
 func (s *genericVolumeSuite) setup(t TB) {
@@ -118,10 +100,18 @@ func (s *genericVolumeSuite) setup(t TB) {
        s.logger = ctxlog.TestLogger(t)
        s.registry = prometheus.NewRegistry()
        s.metrics = newVolumeMetricsVecs(s.registry)
+       s.bufferPool = newBufferPool(s.logger, 8, s.registry)
 }
 
 func (s *genericVolumeSuite) newVolume(t TB, factory TestableVolumeFactory) TestableVolume {
-       return factory(t, s.cluster, s.volume, s.logger, s.metrics)
+       return factory(t, newVolumeParams{
+               UUID:         "zzzzz-nyw5e-999999999999999",
+               Cluster:      s.cluster,
+               ConfigVolume: s.volume,
+               Logger:       s.logger,
+               MetricsVecs:  s.metrics,
+               BufferPool:   s.bufferPool,
+       })
 }
 
 // Put a test block, get it and verify content
@@ -131,95 +121,30 @@ func (s *genericVolumeSuite) testGet(t TB, factory TestableVolumeFactory) {
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       v.PutRaw(TestHash, TestBlock)
-
-       buf := make([]byte, BlockSize)
-       n, err := v.Get(context.Background(), TestHash, buf)
+       err := v.BlockWrite(context.Background(), TestHash, TestBlock)
        if err != nil {
-               t.Fatal(err)
-       }
-
-       if bytes.Compare(buf[:n], TestBlock) != 0 {
-               t.Errorf("expected %s, got %s", string(TestBlock), string(buf))
-       }
-}
-
-// Invoke get on a block that does not exist in volume; should result in error
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testGetNoSuchBlock(t TB, factory TestableVolumeFactory) {
-       s.setup(t)
-       v := s.newVolume(t, factory)
-       defer v.Teardown()
-
-       buf := make([]byte, BlockSize)
-       if _, err := v.Get(context.Background(), TestHash2, buf); err == nil {
-               t.Errorf("Expected error while getting non-existing block %v", TestHash2)
-       }
-}
-
-// Compare() should return os.ErrNotExist if the block does not exist.
-// Otherwise, writing new data causes CompareAndTouch() to generate
-// error logs even though everything is working fine.
-func (s *genericVolumeSuite) testCompareNonexistent(t TB, factory TestableVolumeFactory) {
-       s.setup(t)
-       v := s.newVolume(t, factory)
-       defer v.Teardown()
-
-       err := v.Compare(context.Background(), TestHash, TestBlock)
-       if err != os.ErrNotExist {
-               t.Errorf("Got err %T %q, expected os.ErrNotExist", err, err)
+               t.Error(err)
        }
-}
 
-// Put a test block and compare the locator with same content
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareSameContent(t TB, factory TestableVolumeFactory, testHash string, testData []byte) {
-       s.setup(t)
-       v := s.newVolume(t, factory)
-       defer v.Teardown()
-
-       v.PutRaw(testHash, testData)
-
-       // Compare the block locator with same content
-       err := v.Compare(context.Background(), testHash, testData)
+       buf := &brbuffer{}
+       err = v.BlockRead(context.Background(), TestHash, buf)
        if err != nil {
-               t.Errorf("Got err %q, expected nil", err)
+               t.Error(err)
        }
-}
-
-// Test behavior of Compare() when stored data matches expected
-// checksum but differs from new data we need to store. Requires
-// testHash = md5(testDataA).
-//
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareWithCollision(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
-       s.setup(t)
-       v := s.newVolume(t, factory)
-       defer v.Teardown()
-
-       v.PutRaw(testHash, testDataA)
-
-       // Compare the block locator with different content; collision
-       err := v.Compare(context.Background(), TestHash, testDataB)
-       if err == nil {
-               t.Errorf("Got err nil, expected error due to collision")
+       if bytes.Compare(buf.Bytes(), TestBlock) != 0 {
+               t.Errorf("expected %s, got %s", "foo", buf.String())
        }
 }
 
-// Test behavior of Compare() when stored data has become
-// corrupted. Requires testHash = md5(testDataA) != md5(testDataB).
-//
+// Invoke get on a block that does not exist in volume; should result in error
 // Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareWithCorruptStoredData(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
+func (s *genericVolumeSuite) testGetNoSuchBlock(t TB, factory TestableVolumeFactory) {
        s.setup(t)
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       v.PutRaw(TestHash, testDataB)
-
-       err := v.Compare(context.Background(), testHash, testDataA)
-       if err == nil || err == CollisionError {
-               t.Errorf("Got err %+v, expected non-collision error", err)
+       if err := v.BlockRead(context.Background(), barHash, brdiscard); err == nil {
+               t.Errorf("Expected error while getting non-existing block %v", barHash)
        }
 }
 
@@ -230,12 +155,12 @@ func (s *genericVolumeSuite) testPutBlockWithSameContent(t TB, factory TestableV
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       err := v.Put(context.Background(), testHash, testData)
+       err := v.BlockWrite(context.Background(), testHash, testData)
        if err != nil {
                t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
        }
 
-       err = v.Put(context.Background(), testHash, testData)
+       err = v.BlockWrite(context.Background(), testHash, testData)
        if err != nil {
                t.Errorf("Got err putting block second time %q: %q, expected nil", TestBlock, err)
        }
@@ -248,23 +173,23 @@ func (s *genericVolumeSuite) testPutBlockWithDifferentContent(t TB, factory Test
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       v.PutRaw(testHash, testDataA)
+       v.BlockWrite(context.Background(), testHash, testDataA)
 
-       putErr := v.Put(context.Background(), testHash, testDataB)
-       buf := make([]byte, BlockSize)
-       n, getErr := v.Get(context.Background(), testHash, buf)
+       putErr := v.BlockWrite(context.Background(), testHash, testDataB)
+       buf := &brbuffer{}
+       getErr := v.BlockRead(context.Background(), testHash, buf)
        if putErr == nil {
                // Put must not return a nil error unless it has
                // overwritten the existing data.
-               if bytes.Compare(buf[:n], testDataB) != 0 {
-                       t.Errorf("Put succeeded but Get returned %+q, expected %+q", buf[:n], testDataB)
+               if buf.String() != string(testDataB) {
+                       t.Errorf("Put succeeded but Get returned %+q, expected %+q", buf, testDataB)
                }
        } else {
                // It is permissible for Put to fail, but it must
                // leave us with either the original data, the new
                // data, or nothing at all.
-               if getErr == nil && bytes.Compare(buf[:n], testDataA) != 0 && bytes.Compare(buf[:n], testDataB) != 0 {
-                       t.Errorf("Put failed but Get returned %+q, which is neither %+q nor %+q", buf[:n], testDataA, testDataB)
+               if getErr == nil && buf.String() != string(testDataA) && buf.String() != string(testDataB) {
+                       t.Errorf("Put failed but Get returned %+q, which is neither %+q nor %+q", buf, testDataA, testDataB)
                }
        }
 }
@@ -276,46 +201,48 @@ func (s *genericVolumeSuite) testPutMultipleBlocks(t TB, factory TestableVolumeF
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       err := v.Put(context.Background(), TestHash, TestBlock)
+       err := v.BlockWrite(context.Background(), TestHash, TestBlock)
        if err != nil {
                t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
        }
 
-       err = v.Put(context.Background(), TestHash2, TestBlock2)
+       err = v.BlockWrite(context.Background(), TestHash2, TestBlock2)
        if err != nil {
                t.Errorf("Got err putting block %q: %q, expected nil", TestBlock2, err)
        }
 
-       err = v.Put(context.Background(), TestHash3, TestBlock3)
+       err = v.BlockWrite(context.Background(), TestHash3, TestBlock3)
        if err != nil {
                t.Errorf("Got err putting block %q: %q, expected nil", TestBlock3, err)
        }
 
-       data := make([]byte, BlockSize)
-       n, err := v.Get(context.Background(), TestHash, data)
+       buf := &brbuffer{}
+       err = v.BlockRead(context.Background(), TestHash, buf)
        if err != nil {
                t.Error(err)
        } else {
-               if bytes.Compare(data[:n], TestBlock) != 0 {
-                       t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock)
+               if bytes.Compare(buf.Bytes(), TestBlock) != 0 {
+                       t.Errorf("Block present, but got %+q, expected %+q", buf, TestBlock)
                }
        }
 
-       n, err = v.Get(context.Background(), TestHash2, data)
+       buf.Reset()
+       err = v.BlockRead(context.Background(), TestHash2, buf)
        if err != nil {
                t.Error(err)
        } else {
-               if bytes.Compare(data[:n], TestBlock2) != 0 {
-                       t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock2)
+               if bytes.Compare(buf.Bytes(), TestBlock2) != 0 {
+                       t.Errorf("Block present, but got %+q, expected %+q", buf, TestBlock2)
                }
        }
 
-       n, err = v.Get(context.Background(), TestHash3, data)
+       buf.Reset()
+       err = v.BlockRead(context.Background(), TestHash3, buf)
        if err != nil {
                t.Error(err)
        } else {
-               if bytes.Compare(data[:n], TestBlock3) != 0 {
-                       t.Errorf("Block present, but to %+q, expected %+q", data[:n], TestBlock3)
+               if bytes.Compare(buf.Bytes(), TestBlock3) != 0 {
+                       t.Errorf("Block present, but to %+q, expected %+q", buf, TestBlock3)
                }
        }
 }
@@ -328,13 +255,13 @@ func (s *genericVolumeSuite) testPutAndTouch(t TB, factory TestableVolumeFactory
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+       if err := v.BlockWrite(context.Background(), TestHash, TestBlock); err != nil {
                t.Error(err)
        }
 
        // We'll verify { t0 < threshold < t1 }, where t0 is the
-       // existing block's timestamp on disk before Put() and t1 is
-       // its timestamp after Put().
+       // existing block's timestamp on disk before BlockWrite() and t1 is
+       // its timestamp after BlockWrite().
        threshold := time.Now().Add(-time.Second)
 
        // Set the stored block's mtime far enough in the past that we
@@ -348,7 +275,7 @@ func (s *genericVolumeSuite) testPutAndTouch(t TB, factory TestableVolumeFactory
        }
 
        // Write the same block again.
-       if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+       if err := v.BlockWrite(context.Background(), TestHash, TestBlock); err != nil {
                t.Error(err)
        }
 
@@ -367,7 +294,7 @@ func (s *genericVolumeSuite) testTouchNoSuchBlock(t TB, factory TestableVolumeFa
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       if err := v.Touch(TestHash); err == nil {
+       if err := v.BlockTouch(TestHash); err == nil {
                t.Error("Expected error when attempted to touch a non-existing block")
        }
 }
@@ -384,12 +311,12 @@ func (s *genericVolumeSuite) testMtimeNoSuchBlock(t TB, factory TestableVolumeFa
        }
 }
 
-// Put a few blocks and invoke IndexTo with:
+// Put a few blocks and invoke Index with:
 // * no prefix
 // * with a prefix
 // * with no such prefix
 // Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
+func (s *genericVolumeSuite) testIndex(t TB, factory TestableVolumeFactory) {
        s.setup(t)
        v := s.newVolume(t, factory)
        defer v.Teardown()
@@ -400,9 +327,9 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
        minMtime := time.Now().UTC().UnixNano()
        minMtime -= minMtime % 1e9
 
-       v.PutRaw(TestHash, TestBlock)
-       v.PutRaw(TestHash2, TestBlock2)
-       v.PutRaw(TestHash3, TestBlock3)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash2, TestBlock2)
+       v.BlockWrite(context.Background(), TestHash3, TestBlock3)
 
        maxMtime := time.Now().UTC().UnixNano()
        if maxMtime%1e9 > 0 {
@@ -412,13 +339,13 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
 
        // Blocks whose names aren't Keep hashes should be omitted from
        // index
-       v.PutRaw("fffffffffnotreallyahashfffffffff", nil)
-       v.PutRaw("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", nil)
-       v.PutRaw("f0000000000000000000000000000000f", nil)
-       v.PutRaw("f00", nil)
+       v.BlockWrite(context.Background(), "fffffffffnotreallyahashfffffffff", nil)
+       v.BlockWrite(context.Background(), "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", nil)
+       v.BlockWrite(context.Background(), "f0000000000000000000000000000000f", nil)
+       v.BlockWrite(context.Background(), "f00", nil)
 
        buf := new(bytes.Buffer)
-       v.IndexTo("", buf)
+       v.Index(context.Background(), "", buf)
        indexRows := strings.Split(string(buf.Bytes()), "\n")
        sort.Strings(indexRows)
        sortedIndex := strings.Join(indexRows, "\n")
@@ -441,7 +368,7 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
 
        for _, prefix := range []string{"f", "f15", "f15ac"} {
                buf = new(bytes.Buffer)
-               v.IndexTo(prefix, buf)
+               v.Index(context.Background(), prefix, buf)
 
                m, err := regexp.MatchString(`^`+TestHash2+`\+\d+ \d+\n$`, string(buf.Bytes()))
                if err != nil {
@@ -453,11 +380,11 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
 
        for _, prefix := range []string{"zero", "zip", "zilch"} {
                buf = new(bytes.Buffer)
-               err := v.IndexTo(prefix, buf)
+               err := v.Index(context.Background(), prefix, buf)
                if err != nil {
-                       t.Errorf("Got error on IndexTo with no such prefix %v", err.Error())
+                       t.Errorf("Got error on Index with no such prefix %v", err.Error())
                } else if buf.Len() != 0 {
-                       t.Errorf("Expected empty list for IndexTo with no such prefix %s", prefix)
+                       t.Errorf("Expected empty list for Index with no such prefix %s", prefix)
                }
        }
 }
@@ -471,17 +398,17 @@ func (s *genericVolumeSuite) testDeleteNewBlock(t TB, factory TestableVolumeFact
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       v.Put(context.Background(), TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
 
-       if err := v.Trash(TestHash); err != nil {
+       if err := v.BlockTrash(TestHash); err != nil {
                t.Error(err)
        }
-       data := make([]byte, BlockSize)
-       n, err := v.Get(context.Background(), TestHash, data)
+       buf := &brbuffer{}
+       err := v.BlockRead(context.Background(), TestHash, buf)
        if err != nil {
                t.Error(err)
-       } else if bytes.Compare(data[:n], TestBlock) != 0 {
-               t.Errorf("Got data %+q, expected %+q", data[:n], TestBlock)
+       } else if buf.String() != string(TestBlock) {
+               t.Errorf("Got data %+q, expected %+q", buf.String(), TestBlock)
        }
 }
 
@@ -494,36 +421,30 @@ func (s *genericVolumeSuite) testDeleteOldBlock(t TB, factory TestableVolumeFact
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       v.Put(context.Background(), TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
-       if err := v.Trash(TestHash); err != nil {
+       if err := v.BlockTrash(TestHash); err != nil {
                t.Error(err)
        }
-       data := make([]byte, BlockSize)
-       if _, err := v.Get(context.Background(), TestHash, data); err == nil || !os.IsNotExist(err) {
+       if err := v.BlockRead(context.Background(), TestHash, brdiscard); err == nil || !os.IsNotExist(err) {
                t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
        _, err := v.Mtime(TestHash)
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
-       }
-
-       err = v.Compare(context.Background(), TestHash, TestBlock)
-       if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
        indexBuf := new(bytes.Buffer)
-       v.IndexTo("", indexBuf)
+       v.Index(context.Background(), "", indexBuf)
        if strings.Contains(string(indexBuf.Bytes()), TestHash) {
-               t.Fatalf("Found trashed block in IndexTo")
+               t.Errorf("Found trashed block in Index")
        }
 
-       err = v.Touch(TestHash)
+       err = v.BlockTouch(TestHash)
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 }
 
@@ -534,33 +455,11 @@ func (s *genericVolumeSuite) testDeleteNoSuchBlock(t TB, factory TestableVolumeF
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       if err := v.Trash(TestHash2); err == nil {
+       if err := v.BlockTrash(TestHash2); err == nil {
                t.Errorf("Expected error when attempting to delete a non-existing block")
        }
 }
 
-// Invoke Status and verify that VolumeStatus is returned
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testStatus(t TB, factory TestableVolumeFactory) {
-       s.setup(t)
-       v := s.newVolume(t, factory)
-       defer v.Teardown()
-
-       // Get node status and make a basic sanity check.
-       status := v.Status()
-       if status.DeviceNum == 0 {
-               t.Errorf("uninitialized device_num in %v", status)
-       }
-
-       if status.BytesFree == 0 {
-               t.Errorf("uninitialized bytes_free in %v", status)
-       }
-
-       if status.BytesUsed == 0 {
-               t.Errorf("uninitialized bytes_used in %v", status)
-       }
-}
-
 func getValueFrom(cv *prometheus.CounterVec, lbls prometheus.Labels) float64 {
        c, _ := cv.GetMetricWith(lbls)
        pb := &dto.Metric{}
@@ -575,7 +474,7 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       opsC, _, ioC := s.metrics.getCounterVecsFor(prometheus.Labels{"device_id": v.GetDeviceID()})
+       opsC, _, ioC := s.metrics.getCounterVecsFor(prometheus.Labels{"device_id": v.DeviceID()})
 
        if ioC == nil {
                t.Error("ioBytes CounterVec is nil")
@@ -600,7 +499,7 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
 
        // Test Put if volume is writable
        if !readonly {
-               err = v.Put(context.Background(), TestHash, TestBlock)
+               err = v.BlockWrite(context.Background(), TestHash, TestBlock)
                if err != nil {
                        t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
                }
@@ -614,13 +513,12 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
                        t.Error("ioBytes{direction=out} counter shouldn't be zero")
                }
        } else {
-               v.PutRaw(TestHash, TestBlock)
+               v.BlockWrite(context.Background(), TestHash, TestBlock)
        }
 
-       buf := make([]byte, BlockSize)
-       _, err = v.Get(context.Background(), TestHash, buf)
+       err = v.BlockRead(context.Background(), TestHash, brdiscard)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        // Check that the operations counter increased
@@ -634,63 +532,6 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
        }
 }
 
-// Invoke String for the volume; expect non-empty result
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testString(t TB, factory TestableVolumeFactory) {
-       s.setup(t)
-       v := s.newVolume(t, factory)
-       defer v.Teardown()
-
-       if id := v.String(); len(id) == 0 {
-               t.Error("Got empty string for v.String()")
-       }
-}
-
-// Putting, updating, touching, and deleting blocks from a read-only volume result in error.
-// Test is intended for only read-only volumes
-func (s *genericVolumeSuite) testUpdateReadOnly(t TB, factory TestableVolumeFactory) {
-       s.setup(t)
-       v := s.newVolume(t, factory)
-       defer v.Teardown()
-
-       v.PutRaw(TestHash, TestBlock)
-       buf := make([]byte, BlockSize)
-
-       // Get from read-only volume should succeed
-       _, err := v.Get(context.Background(), TestHash, buf)
-       if err != nil {
-               t.Errorf("got err %v, expected nil", err)
-       }
-
-       // Put a new block to read-only volume should result in error
-       err = v.Put(context.Background(), TestHash2, TestBlock2)
-       if err == nil {
-               t.Errorf("Expected error when putting block in a read-only volume")
-       }
-       _, err = v.Get(context.Background(), TestHash2, buf)
-       if err == nil {
-               t.Errorf("Expected error when getting block whose put in read-only volume failed")
-       }
-
-       // Touch a block in read-only volume should result in error
-       err = v.Touch(TestHash)
-       if err == nil {
-               t.Errorf("Expected error when touching block in a read-only volume")
-       }
-
-       // Delete a block from a read-only volume should result in error
-       err = v.Trash(TestHash)
-       if err == nil {
-               t.Errorf("Expected error when deleting block from a read-only volume")
-       }
-
-       // Overwriting an existing block in read-only volume should result in error
-       err = v.Put(context.Background(), TestHash, TestBlock)
-       if err == nil {
-               t.Errorf("Expected error when putting block in a read-only volume")
-       }
-}
-
 // Launch concurrent Gets
 // Test should pass for both writable and read-only volumes
 func (s *genericVolumeSuite) testGetConcurrent(t TB, factory TestableVolumeFactory) {
@@ -698,43 +539,43 @@ func (s *genericVolumeSuite) testGetConcurrent(t TB, factory TestableVolumeFacto
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       v.PutRaw(TestHash, TestBlock)
-       v.PutRaw(TestHash2, TestBlock2)
-       v.PutRaw(TestHash3, TestBlock3)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash2, TestBlock2)
+       v.BlockWrite(context.Background(), TestHash3, TestBlock3)
 
        sem := make(chan int)
        go func() {
-               buf := make([]byte, BlockSize)
-               n, err := v.Get(context.Background(), TestHash, buf)
+               buf := &brbuffer{}
+               err := v.BlockRead(context.Background(), TestHash, buf)
                if err != nil {
                        t.Errorf("err1: %v", err)
                }
-               if bytes.Compare(buf[:n], TestBlock) != 0 {
-                       t.Errorf("buf should be %s, is %s", string(TestBlock), string(buf[:n]))
+               if buf.String() != string(TestBlock) {
+                       t.Errorf("buf should be %s, is %s", TestBlock, buf)
                }
                sem <- 1
        }()
 
        go func() {
-               buf := make([]byte, BlockSize)
-               n, err := v.Get(context.Background(), TestHash2, buf)
+               buf := &brbuffer{}
+               err := v.BlockRead(context.Background(), TestHash2, buf)
                if err != nil {
                        t.Errorf("err2: %v", err)
                }
-               if bytes.Compare(buf[:n], TestBlock2) != 0 {
-                       t.Errorf("buf should be %s, is %s", string(TestBlock2), string(buf[:n]))
+               if buf.String() != string(TestBlock2) {
+                       t.Errorf("buf should be %s, is %s", TestBlock2, buf)
                }
                sem <- 1
        }()
 
        go func() {
-               buf := make([]byte, BlockSize)
-               n, err := v.Get(context.Background(), TestHash3, buf)
+               buf := &brbuffer{}
+               err := v.BlockRead(context.Background(), TestHash3, buf)
                if err != nil {
                        t.Errorf("err3: %v", err)
                }
-               if bytes.Compare(buf[:n], TestBlock3) != 0 {
-                       t.Errorf("buf should be %s, is %s", string(TestBlock3), string(buf[:n]))
+               if buf.String() != string(TestBlock3) {
+                       t.Errorf("buf should be %s, is %s", TestBlock3, buf)
                }
                sem <- 1
        }()
@@ -752,60 +593,38 @@ func (s *genericVolumeSuite) testPutConcurrent(t TB, factory TestableVolumeFacto
        v := s.newVolume(t, factory)
        defer v.Teardown()
 
-       sem := make(chan int)
-       go func(sem chan int) {
-               err := v.Put(context.Background(), TestHash, TestBlock)
+       blks := []struct {
+               hash string
+               data []byte
+       }{
+               {hash: TestHash, data: TestBlock},
+               {hash: TestHash2, data: TestBlock2},
+               {hash: TestHash3, data: TestBlock3},
+       }
+
+       var wg sync.WaitGroup
+       for _, blk := range blks {
+               blk := blk
+               wg.Add(1)
+               go func() {
+                       defer wg.Done()
+                       err := v.BlockWrite(context.Background(), blk.hash, blk.data)
+                       if err != nil {
+                               t.Errorf("%s: %v", blk.hash, err)
+                       }
+               }()
+       }
+       wg.Wait()
+
+       // Check that we actually wrote the blocks.
+       for _, blk := range blks {
+               buf := &brbuffer{}
+               err := v.BlockRead(context.Background(), blk.hash, buf)
                if err != nil {
-                       t.Errorf("err1: %v", err)
+                       t.Errorf("get %s: %v", blk.hash, err)
+               } else if buf.String() != string(blk.data) {
+                       t.Errorf("get %s: expected %s, got %s", blk.hash, blk.data, buf)
                }
-               sem <- 1
-       }(sem)
-
-       go func(sem chan int) {
-               err := v.Put(context.Background(), TestHash2, TestBlock2)
-               if err != nil {
-                       t.Errorf("err2: %v", err)
-               }
-               sem <- 1
-       }(sem)
-
-       go func(sem chan int) {
-               err := v.Put(context.Background(), TestHash3, TestBlock3)
-               if err != nil {
-                       t.Errorf("err3: %v", err)
-               }
-               sem <- 1
-       }(sem)
-
-       // Wait for all goroutines to finish
-       for done := 0; done < 3; done++ {
-               <-sem
-       }
-
-       // Double check that we actually wrote the blocks we expected to write.
-       buf := make([]byte, BlockSize)
-       n, err := v.Get(context.Background(), TestHash, buf)
-       if err != nil {
-               t.Errorf("Get #1: %v", err)
-       }
-       if bytes.Compare(buf[:n], TestBlock) != 0 {
-               t.Errorf("Get #1: expected %s, got %s", string(TestBlock), string(buf[:n]))
-       }
-
-       n, err = v.Get(context.Background(), TestHash2, buf)
-       if err != nil {
-               t.Errorf("Get #2: %v", err)
-       }
-       if bytes.Compare(buf[:n], TestBlock2) != 0 {
-               t.Errorf("Get #2: expected %s, got %s", string(TestBlock2), string(buf[:n]))
-       }
-
-       n, err = v.Get(context.Background(), TestHash3, buf)
-       if err != nil {
-               t.Errorf("Get #3: %v", err)
-       }
-       if bytes.Compare(buf[:n], TestBlock3) != 0 {
-               t.Errorf("Get #3: expected %s, got %s", string(TestBlock3), string(buf[:n]))
        }
 }
 
@@ -819,17 +638,18 @@ func (s *genericVolumeSuite) testPutFullBlock(t TB, factory TestableVolumeFactor
        wdata[0] = 'a'
        wdata[BlockSize-1] = 'z'
        hash := fmt.Sprintf("%x", md5.Sum(wdata))
-       err := v.Put(context.Background(), hash, wdata)
+       err := v.BlockWrite(context.Background(), hash, wdata)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
-       buf := make([]byte, BlockSize)
-       n, err := v.Get(context.Background(), hash, buf)
+
+       buf := &brbuffer{}
+       err = v.BlockRead(context.Background(), hash, buf)
        if err != nil {
                t.Error(err)
        }
-       if bytes.Compare(buf[:n], wdata) != 0 {
-               t.Error("buf %+q != wdata %+q", buf[:n], wdata)
+       if buf.String() != string(wdata) {
+               t.Errorf("buf (len %d) != wdata (len %d)", buf.Len(), len(wdata))
        }
 }
 
@@ -844,48 +664,44 @@ func (s *genericVolumeSuite) testTrashUntrash(t TB, readonly bool, factory Testa
        defer v.Teardown()
 
        // put block and backdate it
-       v.PutRaw(TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
-       buf := make([]byte, BlockSize)
-       n, err := v.Get(context.Background(), TestHash, buf)
+       buf := &brbuffer{}
+       err := v.BlockRead(context.Background(), TestHash, buf)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
-       if bytes.Compare(buf[:n], TestBlock) != 0 {
-               t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+       if buf.String() != string(TestBlock) {
+               t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
        }
 
        // Trash
-       err = v.Trash(TestHash)
-       if readonly {
-               if err != MethodDisabledError {
-                       t.Fatal(err)
-               }
-       } else if err != nil {
-               if err != ErrNotImplemented {
-                       t.Fatal(err)
-               }
-       } else {
-               _, err = v.Get(context.Background(), TestHash, buf)
-               if err == nil || !os.IsNotExist(err) {
-                       t.Errorf("os.IsNotExist(%v) should have been true", err)
-               }
+       err = v.BlockTrash(TestHash)
+       if err != nil {
+               t.Error(err)
+               return
+       }
+       buf.Reset()
+       err = v.BlockRead(context.Background(), TestHash, buf)
+       if err == nil || !os.IsNotExist(err) {
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
+       }
 
-               // Untrash
-               err = v.Untrash(TestHash)
-               if err != nil {
-                       t.Fatal(err)
-               }
+       // Untrash
+       err = v.BlockUntrash(TestHash)
+       if err != nil {
+               t.Error(err)
        }
 
        // Get the block - after trash and untrash sequence
-       n, err = v.Get(context.Background(), TestHash, buf)
+       buf.Reset()
+       err = v.BlockRead(context.Background(), TestHash, buf)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
-       if bytes.Compare(buf[:n], TestBlock) != 0 {
-               t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+       if buf.String() != string(TestBlock) {
+               t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
        }
 }
 
@@ -895,13 +711,13 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
        defer v.Teardown()
 
        checkGet := func() error {
-               buf := make([]byte, BlockSize)
-               n, err := v.Get(context.Background(), TestHash, buf)
+               buf := &brbuffer{}
+               err := v.BlockRead(context.Background(), TestHash, buf)
                if err != nil {
                        return err
                }
-               if bytes.Compare(buf[:n], TestBlock) != 0 {
-                       t.Fatalf("Got data %+q, expected %+q", buf[:n], TestBlock)
+               if buf.String() != string(TestBlock) {
+                       t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
                }
 
                _, err = v.Mtime(TestHash)
@@ -909,13 +725,8 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
                        return err
                }
 
-               err = v.Compare(context.Background(), TestHash, TestBlock)
-               if err != nil {
-                       return err
-               }
-
                indexBuf := new(bytes.Buffer)
-               v.IndexTo("", indexBuf)
+               v.Index(context.Background(), "", indexBuf)
                if !strings.Contains(string(indexBuf.Bytes()), TestHash) {
                        return os.ErrNotExist
                }
@@ -927,50 +738,47 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
        s.cluster.Collections.BlobTrashLifetime.Set("1h")
 
-       v.PutRaw(TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
        err := checkGet()
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        // Trash the block
-       err = v.Trash(TestHash)
-       if err == MethodDisabledError || err == ErrNotImplemented {
-               // Skip the trash tests for read-only volumes, and
-               // volume types that don't support
-               // BlobTrashLifetime>0.
-               return
+       err = v.BlockTrash(TestHash)
+       if err != nil {
+               t.Error(err)
        }
 
        err = checkGet()
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
-       err = v.Touch(TestHash)
+       err = v.BlockTouch(TestHash)
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
        v.EmptyTrash()
 
        // Even after emptying the trash, we can untrash our block
        // because the deadline hasn't been reached.
-       err = v.Untrash(TestHash)
+       err = v.BlockUntrash(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        err = checkGet()
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
-       err = v.Touch(TestHash)
+       err = v.BlockTouch(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        // Because we Touch'ed, need to backdate again for next set of tests
@@ -979,16 +787,16 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
        // If the only block in the trash has already been untrashed,
        // most volumes will fail a subsequent Untrash with a 404, but
        // it's also acceptable for Untrash to succeed.
-       err = v.Untrash(TestHash)
+       err = v.BlockUntrash(TestHash)
        if err != nil && !os.IsNotExist(err) {
-               t.Fatalf("Expected success or os.IsNotExist(), but got: %v", err)
+               t.Errorf("Expected success or os.IsNotExist(), but got: %v", err)
        }
 
        // The additional Untrash should not interfere with our
        // already-untrashed copy.
        err = checkGet()
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        // Untrash might have updated the timestamp, so backdate again
@@ -998,74 +806,74 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
        s.cluster.Collections.BlobTrashLifetime.Set("1ns")
 
-       err = v.Trash(TestHash)
+       err = v.BlockTrash(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
        err = checkGet()
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
        // Even though 1ns has passed, we can untrash because we
        // haven't called EmptyTrash yet.
-       err = v.Untrash(TestHash)
+       err = v.BlockUntrash(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
        err = checkGet()
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        // Trash it again, and this time call EmptyTrash so it really
        // goes away.
        // (In Azure volumes, un/trash changes Mtime, so first backdate again)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
-       _ = v.Trash(TestHash)
+       _ = v.BlockTrash(TestHash)
        err = checkGet()
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
        v.EmptyTrash()
 
        // Untrash won't find it
-       err = v.Untrash(TestHash)
+       err = v.BlockUntrash(TestHash)
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
        // Get block won't find it
        err = checkGet()
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
        // Third set: If the same data block gets written again after
        // being trashed, and then the trash gets emptied, the newer
        // un-trashed copy doesn't get deleted along with it.
 
-       v.PutRaw(TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
        s.cluster.Collections.BlobTrashLifetime.Set("1ns")
-       err = v.Trash(TestHash)
+       err = v.BlockTrash(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
        err = checkGet()
        if err == nil || !os.IsNotExist(err) {
-               t.Fatalf("os.IsNotExist(%v) should have been true", err)
+               t.Errorf("os.IsNotExist(%v) should have been true", err)
        }
 
-       v.PutRaw(TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
        // EmptyTrash should not delete the untrashed copy.
        v.EmptyTrash()
        err = checkGet()
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        // Fourth set: If the same data block gets trashed twice with
@@ -1073,33 +881,33 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
        // at intermediate time B (A < B < C), it is still possible to
        // untrash the block whose deadline is "C".
 
-       v.PutRaw(TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
        s.cluster.Collections.BlobTrashLifetime.Set("1ns")
-       err = v.Trash(TestHash)
+       err = v.BlockTrash(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
-       v.PutRaw(TestHash, TestBlock)
+       v.BlockWrite(context.Background(), TestHash, TestBlock)
        v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
        s.cluster.Collections.BlobTrashLifetime.Set("1h")
-       err = v.Trash(TestHash)
+       err = v.BlockTrash(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 
        // EmptyTrash should not prevent us from recovering the
        // time.Hour ("C") trash
        v.EmptyTrash()
-       err = v.Untrash(TestHash)
+       err = v.BlockUntrash(TestHash)
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
        err = checkGet()
        if err != nil {
-               t.Fatal(err)
+               t.Error(err)
        }
 }
index 950b3989aa0f6a72e20553f8505f6575a91b39c4..f64041b04852e7fa9ca71235b2716d84843771f3 100644 (file)
@@ -5,25 +5,13 @@
 package keepstore
 
 import (
-       "bytes"
-       "context"
-       "crypto/md5"
-       "errors"
-       "fmt"
-       "io"
-       "os"
-       "strings"
        "sync"
        "time"
-
-       "git.arvados.org/arvados.git/sdk/go/arvados"
-       "github.com/sirupsen/logrus"
 )
 
 var (
-       TestBlock       = []byte("The quick brown fox jumps over the lazy dog.")
-       TestHash        = "e4d909c290d0fb1ca068ffaddf22cbd0"
-       TestHashPutResp = "e4d909c290d0fb1ca068ffaddf22cbd0+44\n"
+       TestBlock = []byte("The quick brown fox jumps over the lazy dog.")
+       TestHash  = "e4d909c290d0fb1ca068ffaddf22cbd0"
 
        TestBlock2 = []byte("Pack my box with five dozen liquor jugs.")
        TestHash2  = "f15ac516f788aec4f30932ffb6395c39"
@@ -31,10 +19,6 @@ var (
        TestBlock3 = []byte("Now is the time for all good men to come to the aid of their country.")
        TestHash3  = "eed29bbffbc2dbe5e5ee0bb71888e61f"
 
-       // BadBlock is used to test collisions and corruption.
-       // It must not match any test hashes.
-       BadBlock = []byte("The magic words are squeamish ossifrage.")
-
        EmptyHash  = "d41d8cd98f00b204e9800998ecf8427e"
        EmptyBlock = []byte("")
 )
@@ -43,230 +27,64 @@ var (
 // underlying Volume, in order to test behavior in cases that are
 // impractical to achieve with a sequence of normal Volume operations.
 type TestableVolume interface {
-       Volume
-
-       // [Over]write content for a locator with the given data,
-       // bypassing all constraints like readonly and serialize.
-       PutRaw(locator string, data []byte)
+       volume
 
        // Returns the strings that a driver uses to record read/write operations.
        ReadWriteOperationLabelValues() (r, w string)
 
        // Specify the value Mtime() should return, until the next
-       // call to Touch, TouchWithDate, or Put.
-       TouchWithDate(locator string, lastPut time.Time)
+       // call to Touch, TouchWithDate, or BlockWrite.
+       TouchWithDate(locator string, lastBlockWrite time.Time)
 
        // Clean up, delete temporary files.
        Teardown()
 }
 
-func init() {
-       driver["mock"] = newMockVolume
-}
-
-// MockVolumes are test doubles for Volumes, used to test handlers.
-type MockVolume struct {
-       Store      map[string][]byte
-       Timestamps map[string]time.Time
-
-       // Bad volumes return an error for every operation.
-       Bad            bool
-       BadVolumeError error
-
-       // Touchable volumes' Touch() method succeeds for a locator
-       // that has been Put().
-       Touchable bool
-
-       // Gate is a "starting gate", allowing test cases to pause
-       // volume operations long enough to inspect state. Every
-       // operation (except Status) starts by receiving from
-       // Gate. Sending one value unblocks one operation; closing the
-       // channel unblocks all operations. By default, Gate is a
-       // closed channel, so all operations proceed without
-       // blocking. See trash_worker_test.go for an example.
-       Gate chan struct{} `json:"-"`
-
-       cluster *arvados.Cluster
-       volume  arvados.Volume
-       logger  logrus.FieldLogger
-       metrics *volumeMetricsVecs
-       called  map[string]int
-       mutex   sync.Mutex
-}
-
-// newMockVolume returns a non-Bad, non-Readonly, Touchable mock
-// volume.
-func newMockVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
-       gate := make(chan struct{})
-       close(gate)
-       return &MockVolume{
-               Store:      make(map[string][]byte),
-               Timestamps: make(map[string]time.Time),
-               Bad:        false,
-               Touchable:  true,
-               called:     map[string]int{},
-               Gate:       gate,
-               cluster:    cluster,
-               volume:     volume,
-               logger:     logger,
-               metrics:    metrics,
-       }, nil
+// brbuffer is like bytes.Buffer, but it implements io.WriterAt.
+// Convenient for testing (volume)BlockRead implementations.
+type brbuffer struct {
+       mtx sync.Mutex
+       buf []byte
 }
 
-// CallCount returns how many times the named method has been called.
-func (v *MockVolume) CallCount(method string) int {
-       v.mutex.Lock()
-       defer v.mutex.Unlock()
-       c, ok := v.called[method]
-       if !ok {
-               return 0
+func (b *brbuffer) WriteAt(p []byte, offset int64) (int, error) {
+       b.mtx.Lock()
+       defer b.mtx.Unlock()
+       if short := int(offset) + len(p) - len(b.buf); short > 0 {
+               b.buf = append(b.buf, make([]byte, short)...)
        }
-       return c
+       return copy(b.buf[offset:], p), nil
 }
 
-func (v *MockVolume) gotCall(method string) {
-       v.mutex.Lock()
-       defer v.mutex.Unlock()
-       if _, ok := v.called[method]; !ok {
-               v.called[method] = 1
-       } else {
-               v.called[method]++
-       }
+func (b *brbuffer) Bytes() []byte {
+       b.mtx.Lock()
+       defer b.mtx.Unlock()
+       return b.buf
 }
 
-func (v *MockVolume) Compare(ctx context.Context, loc string, buf []byte) error {
-       v.gotCall("Compare")
-       <-v.Gate
-       if v.Bad {
-               return v.BadVolumeError
-       } else if block, ok := v.Store[loc]; ok {
-               if fmt.Sprintf("%x", md5.Sum(block)) != loc {
-                       return DiskHashError
-               }
-               if bytes.Compare(buf, block) != 0 {
-                       return CollisionError
-               }
-               return nil
-       } else {
-               return os.ErrNotExist
-       }
+func (b *brbuffer) String() string {
+       b.mtx.Lock()
+       defer b.mtx.Unlock()
+       return string(b.buf)
 }
 
-func (v *MockVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
-       v.gotCall("Get")
-       <-v.Gate
-       if v.Bad {
-               return 0, v.BadVolumeError
-       } else if block, ok := v.Store[loc]; ok {
-               copy(buf[:len(block)], block)
-               return len(block), nil
-       }
-       return 0, os.ErrNotExist
+func (b *brbuffer) Len() int {
+       b.mtx.Lock()
+       defer b.mtx.Unlock()
+       return len(b.buf)
 }
 
-func (v *MockVolume) Put(ctx context.Context, loc string, block []byte) error {
-       v.gotCall("Put")
-       <-v.Gate
-       if v.Bad {
-               return v.BadVolumeError
-       }
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-       v.Store[loc] = block
-       return v.Touch(loc)
+func (b *brbuffer) Reset() {
+       b.mtx.Lock()
+       defer b.mtx.Unlock()
+       b.buf = nil
 }
 
-func (v *MockVolume) Touch(loc string) error {
-       return v.TouchWithDate(loc, time.Now())
-}
+// a brdiscarder is like io.Discard, but it implements
+// io.WriterAt. Convenient for testing (volume)BlockRead
+// implementations when the output is not checked.
+type brdiscarder struct{}
 
-func (v *MockVolume) TouchWithDate(loc string, t time.Time) error {
-       v.gotCall("Touch")
-       <-v.Gate
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-       if _, exists := v.Store[loc]; !exists {
-               return os.ErrNotExist
-       }
-       if v.Touchable {
-               v.Timestamps[loc] = t
-               return nil
-       }
-       return errors.New("Touch failed")
-}
+func (brdiscarder) WriteAt(p []byte, offset int64) (int, error) { return len(p), nil }
 
-func (v *MockVolume) Mtime(loc string) (time.Time, error) {
-       v.gotCall("Mtime")
-       <-v.Gate
-       var mtime time.Time
-       var err error
-       if v.Bad {
-               err = v.BadVolumeError
-       } else if t, ok := v.Timestamps[loc]; ok {
-               mtime = t
-       } else {
-               err = os.ErrNotExist
-       }
-       return mtime, err
-}
-
-func (v *MockVolume) IndexTo(prefix string, w io.Writer) error {
-       v.gotCall("IndexTo")
-       <-v.Gate
-       for loc, block := range v.Store {
-               if !IsValidLocator(loc) || !strings.HasPrefix(loc, prefix) {
-                       continue
-               }
-               _, err := fmt.Fprintf(w, "%s+%d %d\n",
-                       loc, len(block), 123456789)
-               if err != nil {
-                       return err
-               }
-       }
-       return nil
-}
-
-func (v *MockVolume) Trash(loc string) error {
-       v.gotCall("Delete")
-       <-v.Gate
-       if v.volume.ReadOnly {
-               return MethodDisabledError
-       }
-       if _, ok := v.Store[loc]; ok {
-               if time.Since(v.Timestamps[loc]) < time.Duration(v.cluster.Collections.BlobSigningTTL) {
-                       return nil
-               }
-               delete(v.Store, loc)
-               return nil
-       }
-       return os.ErrNotExist
-}
-
-func (v *MockVolume) GetDeviceID() string {
-       return "mock-device-id"
-}
-
-func (v *MockVolume) Untrash(loc string) error {
-       return nil
-}
-
-func (v *MockVolume) Status() *VolumeStatus {
-       var used uint64
-       for _, block := range v.Store {
-               used = used + uint64(len(block))
-       }
-       return &VolumeStatus{"/bogo", 123, 1000000 - used, used}
-}
-
-func (v *MockVolume) String() string {
-       return "[MockVolume]"
-}
-
-func (v *MockVolume) EmptyTrash() {
-}
-
-func (v *MockVolume) GetStorageClasses() []string {
-       return nil
-}
+var brdiscard = brdiscarder{}
diff --git a/services/keepstore/work_queue.go b/services/keepstore/work_queue.go
deleted file mode 100644 (file)
index be3d118..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-/* A WorkQueue is an asynchronous thread-safe queue manager.  It
-   provides a channel from which items can be read off the queue, and
-   permits replacing the contents of the queue at any time.
-
-   The overall work flow for a WorkQueue is as follows:
-
-     1. A WorkQueue is created with NewWorkQueue().  This
-        function instantiates a new WorkQueue and starts a manager
-        goroutine.  The manager listens on an input channel
-        (manager.newlist) and an output channel (manager.NextItem).
-
-     2. The manager first waits for a new list of requests on the
-        newlist channel.  When another goroutine calls
-        manager.ReplaceQueue(lst), it sends lst over the newlist
-        channel to the manager.  The manager goroutine now has
-        ownership of the list.
-
-     3. Once the manager has this initial list, it listens on both the
-        input and output channels for one of the following to happen:
-
-          a. A worker attempts to read an item from the NextItem
-             channel.  The manager sends the next item from the list
-             over this channel to the worker, and loops.
-
-          b. New data is sent to the manager on the newlist channel.
-             This happens when another goroutine calls
-             manager.ReplaceItem() with a new list.  The manager
-             discards the current list, replaces it with the new one,
-             and begins looping again.
-
-          c. The input channel is closed.  The manager closes its
-             output channel (signalling any workers to quit) and
-             terminates.
-
-   Tasks currently handled by WorkQueue:
-     * the pull list
-     * the trash list
-
-   Example usage:
-
-        // Any kind of user-defined type can be used with the
-        // WorkQueue.
-               type FrobRequest struct {
-                       frob string
-               }
-
-               // Make a work list.
-               froblist := NewWorkQueue()
-
-               // Start a concurrent worker to read items from the NextItem
-               // channel until it is closed, deleting each one.
-               go func(list WorkQueue) {
-                       for i := range list.NextItem {
-                               req := i.(FrobRequest)
-                               frob.Run(req)
-                       }
-               }(froblist)
-
-               // Set up a HTTP handler for PUT /frob
-               router.HandleFunc(`/frob`,
-                       func(w http.ResponseWriter, req *http.Request) {
-                               // Parse the request body into a list.List
-                               // of FrobRequests, and give this list to the
-                               // frob manager.
-                               newfrobs := parseBody(req.Body)
-                               froblist.ReplaceQueue(newfrobs)
-                       }).Methods("PUT")
-
-   Methods available on a WorkQueue:
-
-               ReplaceQueue(list)
-                       Replaces the current item list with a new one.  The list
-            manager discards any unprocessed items on the existing
-            list and replaces it with the new one. If the worker is
-            processing a list item when ReplaceQueue is called, it
-            finishes processing before receiving items from the new
-            list.
-               Close()
-                       Shuts down the manager goroutine. When Close is called,
-                       the manager closes the NextItem channel.
-*/
-
-import "container/list"
-
-// WorkQueue definition
-type WorkQueue struct {
-       getStatus chan WorkQueueStatus
-       newlist   chan *list.List
-       // Workers get work items by reading from this channel.
-       NextItem <-chan interface{}
-       // Each worker must send struct{}{} to DoneItem exactly once
-       // for each work item received from NextItem, when it stops
-       // working on that item (regardless of whether the work was
-       // successful).
-       DoneItem chan<- struct{}
-}
-
-// WorkQueueStatus reflects the queue status.
-type WorkQueueStatus struct {
-       InProgress int
-       Queued     int
-}
-
-// NewWorkQueue returns a new empty WorkQueue.
-func NewWorkQueue() *WorkQueue {
-       nextItem := make(chan interface{})
-       reportDone := make(chan struct{})
-       newList := make(chan *list.List)
-       b := WorkQueue{
-               getStatus: make(chan WorkQueueStatus),
-               newlist:   newList,
-               NextItem:  nextItem,
-               DoneItem:  reportDone,
-       }
-       go func() {
-               // Read new work lists from the newlist channel.
-               // Reply to "status" and "get next item" queries by
-               // sending to the getStatus and nextItem channels
-               // respectively. Return when the newlist channel
-               // closes.
-
-               todo := &list.List{}
-               status := WorkQueueStatus{}
-
-               // When we're done, close the output channel; workers will
-               // shut down next time they ask for new work.
-               defer close(nextItem)
-               defer close(b.getStatus)
-
-               // nextChan and nextVal are both nil when we have
-               // nothing to send; otherwise they are, respectively,
-               // the nextItem channel and the next work item to send
-               // to it.
-               var nextChan chan interface{}
-               var nextVal interface{}
-
-               for newList != nil || status.InProgress > 0 {
-                       select {
-                       case p, ok := <-newList:
-                               if !ok {
-                                       // Closed, stop receiving
-                                       newList = nil
-                               }
-                               todo = p
-                               if todo == nil {
-                                       todo = &list.List{}
-                               }
-                               status.Queued = todo.Len()
-                               if status.Queued == 0 {
-                                       // Stop sending work
-                                       nextChan = nil
-                                       nextVal = nil
-                               } else {
-                                       nextChan = nextItem
-                                       nextVal = todo.Front().Value
-                               }
-                       case nextChan <- nextVal:
-                               todo.Remove(todo.Front())
-                               status.InProgress++
-                               status.Queued--
-                               if status.Queued == 0 {
-                                       // Stop sending work
-                                       nextChan = nil
-                                       nextVal = nil
-                               } else {
-                                       nextVal = todo.Front().Value
-                               }
-                       case <-reportDone:
-                               status.InProgress--
-                       case b.getStatus <- status:
-                       }
-               }
-       }()
-       return &b
-}
-
-// ReplaceQueue abandons any work items left in the existing queue,
-// and starts giving workers items from the given list. After giving
-// it to ReplaceQueue, the caller must not read or write the given
-// list.
-func (b *WorkQueue) ReplaceQueue(list *list.List) {
-       b.newlist <- list
-}
-
-// Close shuts down the manager and terminates the goroutine, which
-// abandons any pending requests, but allows any pull request already
-// in progress to continue.
-//
-// After Close, Status will return correct values, NextItem will be
-// closed, and ReplaceQueue will panic.
-func (b *WorkQueue) Close() {
-       close(b.newlist)
-}
-
-// Status returns an up-to-date WorkQueueStatus reflecting the current
-// queue status.
-func (b *WorkQueue) Status() WorkQueueStatus {
-       // If the channel is closed, we get the nil value of
-       // WorkQueueStatus, which is an accurate description of a
-       // finished queue.
-       return <-b.getStatus
-}
diff --git a/services/keepstore/work_queue_test.go b/services/keepstore/work_queue_test.go
deleted file mode 100644 (file)
index 254f96c..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-       "container/list"
-       "runtime"
-       "testing"
-       "time"
-)
-
-type fatalfer interface {
-       Fatalf(string, ...interface{})
-}
-
-func makeTestWorkList(ary []interface{}) *list.List {
-       l := list.New()
-       for _, n := range ary {
-               l.PushBack(n)
-       }
-       return l
-}
-
-func expectChannelEmpty(t fatalfer, c <-chan interface{}) {
-       select {
-       case item, ok := <-c:
-               if ok {
-                       t.Fatalf("Received value (%+v) from channel that we expected to be empty", item)
-               }
-       default:
-       }
-}
-
-func expectChannelNotEmpty(t fatalfer, c <-chan interface{}) interface{} {
-       select {
-       case item, ok := <-c:
-               if !ok {
-                       t.Fatalf("expected data on a closed channel")
-               }
-               return item
-       case <-time.After(time.Second):
-               t.Fatalf("expected data on an empty channel")
-               return nil
-       }
-}
-
-func expectChannelClosedWithin(t fatalfer, timeout time.Duration, c <-chan interface{}) {
-       select {
-       case received, ok := <-c:
-               if ok {
-                       t.Fatalf("Expected channel to be closed, but received %+v instead", received)
-               }
-       case <-time.After(timeout):
-               t.Fatalf("Expected channel to be closed, but it is still open after %v", timeout)
-       }
-}
-
-func doWorkItems(t fatalfer, q *WorkQueue, expected []interface{}) {
-       for i := range expected {
-               actual, ok := <-q.NextItem
-               if !ok {
-                       t.Fatalf("Expected %+v but channel was closed after receiving %+v as expected.", expected, expected[:i])
-               }
-               q.DoneItem <- struct{}{}
-               if actual.(int) != expected[i] {
-                       t.Fatalf("Expected %+v but received %+v after receiving %+v as expected.", expected[i], actual, expected[:i])
-               }
-       }
-}
-
-func expectEqualWithin(t fatalfer, timeout time.Duration, expect interface{}, f func() interface{}) {
-       ok := make(chan struct{})
-       giveup := false
-       go func() {
-               for f() != expect && !giveup {
-                       time.Sleep(time.Millisecond)
-               }
-               close(ok)
-       }()
-       select {
-       case <-ok:
-       case <-time.After(timeout):
-               giveup = true
-               _, file, line, _ := runtime.Caller(1)
-               t.Fatalf("Still getting %+v, timed out waiting for %+v\n%s:%d", f(), expect, file, line)
-       }
-}
-
-func expectQueued(t fatalfer, b *WorkQueue, expectQueued int) {
-       if l := b.Status().Queued; l != expectQueued {
-               t.Fatalf("Got Queued==%d, expected %d", l, expectQueued)
-       }
-}
-
-func TestWorkQueueDoneness(t *testing.T) {
-       b := NewWorkQueue()
-       defer b.Close()
-       b.ReplaceQueue(makeTestWorkList([]interface{}{1, 2, 3}))
-       expectQueued(t, b, 3)
-       gate := make(chan struct{})
-       go func() {
-               <-gate
-               for range b.NextItem {
-                       <-gate
-                       time.Sleep(time.Millisecond)
-                       b.DoneItem <- struct{}{}
-               }
-       }()
-       expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
-       b.ReplaceQueue(makeTestWorkList([]interface{}{4, 5, 6}))
-       for i := 1; i <= 3; i++ {
-               gate <- struct{}{}
-               expectEqualWithin(t, time.Second, 3-i, func() interface{} { return b.Status().Queued })
-               expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
-       }
-       close(gate)
-       expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
-       expectChannelEmpty(t, b.NextItem)
-}
-
-// Create a WorkQueue, generate a list for it, and instantiate a worker.
-func TestWorkQueueReadWrite(t *testing.T) {
-       var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-
-       b := NewWorkQueue()
-       expectQueued(t, b, 0)
-
-       b.ReplaceQueue(makeTestWorkList(input))
-       expectQueued(t, b, len(input))
-
-       doWorkItems(t, b, input)
-       expectChannelEmpty(t, b.NextItem)
-       b.Close()
-}
-
-// Start a worker before the list has any input.
-func TestWorkQueueEarlyRead(t *testing.T) {
-       var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-
-       b := NewWorkQueue()
-       defer b.Close()
-
-       // First, demonstrate that nothing is available on the NextItem
-       // channel.
-       expectChannelEmpty(t, b.NextItem)
-
-       // Start a reader in a goroutine. The reader will block until the
-       // block work list has been initialized.
-       //
-       done := make(chan int)
-       go func() {
-               doWorkItems(t, b, input)
-               done <- 1
-       }()
-
-       // Feed the blocklist a new worklist, and wait for the worker to
-       // finish.
-       b.ReplaceQueue(makeTestWorkList(input))
-       <-done
-       expectQueued(t, b, 0)
-}
-
-// After Close(), NextItem closes, work finishes, then stats return zero.
-func TestWorkQueueClose(t *testing.T) {
-       b := NewWorkQueue()
-       input := []interface{}{1, 2, 3, 4, 5, 6, 7, 8}
-       mark := make(chan struct{})
-       go func() {
-               <-b.NextItem
-               mark <- struct{}{}
-               <-mark
-               b.DoneItem <- struct{}{}
-       }()
-       b.ReplaceQueue(makeTestWorkList(input))
-       // Wait for worker to take item 1
-       <-mark
-       b.Close()
-       expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
-       // Tell worker to report done
-       mark <- struct{}{}
-       expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
-       expectChannelClosedWithin(t, time.Second, b.NextItem)
-}
-
-// Show that a reader may block when the manager's list is exhausted,
-// and that the reader resumes automatically when new data is
-// available.
-func TestWorkQueueReaderBlocks(t *testing.T) {
-       var (
-               inputBeforeBlock = []interface{}{1, 2, 3, 4, 5}
-               inputAfterBlock  = []interface{}{6, 7, 8, 9, 10}
-       )
-
-       b := NewWorkQueue()
-       defer b.Close()
-       sendmore := make(chan int)
-       done := make(chan int)
-       go func() {
-               doWorkItems(t, b, inputBeforeBlock)
-
-               // Confirm that the channel is empty, so a subsequent read
-               // on it will block.
-               expectChannelEmpty(t, b.NextItem)
-
-               // Signal that we're ready for more input.
-               sendmore <- 1
-               doWorkItems(t, b, inputAfterBlock)
-               done <- 1
-       }()
-
-       // Write a slice of the first five elements and wait for the
-       // reader to signal that it's ready for us to send more input.
-       b.ReplaceQueue(makeTestWorkList(inputBeforeBlock))
-       <-sendmore
-
-       b.ReplaceQueue(makeTestWorkList(inputAfterBlock))
-
-       // Wait for the reader to complete.
-       <-done
-}
-
-// Replace one active work list with another.
-func TestWorkQueueReplaceQueue(t *testing.T) {
-       var firstInput = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-       var replaceInput = []interface{}{1, 4, 9, 16, 25, 36, 49, 64, 81}
-
-       b := NewWorkQueue()
-       b.ReplaceQueue(makeTestWorkList(firstInput))
-
-       // Read just the first five elements from the work list.
-       // Confirm that the channel is not empty.
-       doWorkItems(t, b, firstInput[0:5])
-       expectChannelNotEmpty(t, b.NextItem)
-
-       // Replace the work list and read five more elements.
-       // The old list should have been discarded and all new
-       // elements come from the new list.
-       b.ReplaceQueue(makeTestWorkList(replaceInput))
-       doWorkItems(t, b, replaceInput[0:5])
-
-       b.Close()
-}
index 0d402a63c14465417727843f9239d395c3a8787b..1a68d6fd77c7ca8b52007ce6b0f174ef97947425 100644 (file)
@@ -9,6 +9,9 @@ SHELL := /bin/bash
 GOPATH?=~/go
 APP_NAME?=arvados-workbench2
 
+# Cypress test file that can be passed to the integration-test target
+SPECFILE?=ALL
+
 # VERSION uses all the above to produce X.Y.Z.timestamp
 # something in the lines of 1.2.0.20180612145021, this will be the package version
 # it can be overwritten when invoking make as in make packages VERSION=1.2.0
@@ -21,7 +24,7 @@ GIT_COMMIT?=$(shell git rev-parse --short HEAD)
 # changes in the package. (i.e. example config files externally added
 ITERATION?=1
 
-TARGETS?=centos7 rocky8 debian10 debian11 ubuntu1804 ubuntu2004
+TARGETS?=rocky8 debian11 debian12 ubuntu2004 ubuntu2204
 
 DESCRIPTION=Arvados Workbench2 - Arvados is a free and open source platform for big data science.
 MAINTAINER=Arvados Package Maintainers <packaging@arvados.org>
@@ -82,7 +85,11 @@ unit-tests: yarn-install
 
 integration-tests: yarn-install check-arvados-directory
        yarn run cypress install
+ifeq ($(SPECFILE), ALL)
        $(WORKSPACE)/tools/run-integration-tests.sh -a $(ARVADOS_DIRECTORY)
+else
+       $(WORKSPACE)/tools/run-integration-tests.sh -a $(ARVADOS_DIRECTORY) -- --spec $(SPECFILE)
+endif
 
 integration-tests-in-docker: workbench2-build-image check-arvados-directory
        docker run -ti --rm \
@@ -92,7 +99,7 @@ integration-tests-in-docker: workbench2-build-image check-arvados-directory
                -v $(ARVADOS_DIRECTORY):/usr/src/arvados \
                -w /usr/src/arvados/services/workbench2 \
                workbench2-build \
-               make arvados-server-install integration-tests
+               make arvados-server-install integration-tests SPECFILE=$(SPECFILE)
 
 unit-tests-in-docker: workbench2-build-image check-arvados-directory
        docker run -ti --rm \
@@ -187,4 +194,5 @@ packages-in-docker: check-arvados-directory workbench2-build-image
                sh -c 'git config --global --add safe.directory /tmp/workbench2 && make packages'
 
 workbench2-build-image:
-       (cd docker && docker build -t workbench2-build .)
+       docker inspect workbench2-build &> /dev/null || \
+               docker build -t workbench2-build -f docker/Dockerfile ${ARVADOS_DIRECTORY}
index 4ec4bd1cf8418b02b62bd31f5058aba22d27536f..9aa788a5b05a0463bb6e5d48d83f5510dccddb05 100644 (file)
@@ -49,8 +49,7 @@ make integration-tests-in-docker
 
 ```
 xhost +local:root
-ARVADOS_DIR=/path/to/arvados
-docker run -ti -v$PWD:$PWD -v$ARVADOS_DIR:/usr/src/arvados -w$PWD --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" workbench2-build /bin/bash
+docker run -ti -v$PWD:$PWD -v$(realpath ../..):/usr/src/arvados -w$PWD --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" workbench2-build /bin/bash
 (inside container)
 yarn run cypress install
 tools/run-integration-tests.sh -i -a /usr/src/arvados
diff --git a/services/workbench2/cypress.config.ts b/services/workbench2/cypress.config.ts
new file mode 100644 (file)
index 0000000..d5698b0
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import { defineConfig } from 'cypress'
+
+export default defineConfig({
+  chromeWebSecurity: false,
+  viewportWidth: 1920,
+  viewportHeight: 1080,
+  downloadsFolder: 'cypress/downloads',
+  videoCompression: false,
+  e2e: {
+    // We've imported your old cypress plugins here.
+    // You may want to clean this up later by importing these.
+    setupNodeEvents(on, config) {
+      return require('./cypress/plugins/index.js')(on, config)
+    },
+    baseUrl: 'https://localhost:3000/',
+    experimentalRunAllSpecs: true,
+    // The 2 options below make Electron crash a lot less and Firefox behave better
+    experimentalMemoryManagement: true,
+    numTestsKeptInMemory: 0,
+  },
+})
diff --git a/services/workbench2/cypress.json b/services/workbench2/cypress.json
deleted file mode 100644 (file)
index 1a4ed40..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-    "baseUrl": "https://localhost:3000/",
-    "chromeWebSecurity": false,
-    "viewportWidth": 1920,
-    "viewportHeight": 1080,
-    "downloadsFolder": "cypress/downloads",
-    "videoCompression": false
-}
diff --git a/services/workbench2/cypress/e2e/banner-tooltip.cy.js b/services/workbench2/cypress/e2e/banner-tooltip.cy.js
new file mode 100644 (file)
index 0000000..0a434ec
--- /dev/null
@@ -0,0 +1,100 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+describe('Banner / tooltip tests', function () {
+    let activeUser;
+    let adminUser;
+    let collectionUUID;
+
+    before(function () {
+        // Only set up common users once. These aren't set up as aliases because
+        // aliases are cleaned up after every test. Also it doesn't make sense
+        // to set the same users on beforeEach() over and over again, so we
+        // separate a little from Cypress' 'Best Practices' here.
+        cy.getUser('admin', 'Admin', 'User', true, true)
+            .as('adminUser').then(function () {
+                adminUser = this.adminUser;
+            });
+        cy.getUser('collectionuser1', 'Collection', 'User', false, true)
+            .as('activeUser').then(function () {
+                activeUser = this.activeUser;
+            });
+    });
+
+    beforeEach(function () {
+        cy.on('uncaught:exception', (err, runnable, promise) => {
+            Cypress.log({ message: `Application Error: ${err}`});
+            if (promise) {
+                return false;
+            }
+        });
+
+        cy.createCollection(adminUser.token, {
+            name: `BannerTooltipTest${Math.floor(Math.random() * 999999)}`,
+            owner_uuid: adminUser.user.uuid,
+        }).as('bannerCollection');
+
+        cy.getAll('@bannerCollection').then(function ([bannerCollection]) {
+            collectionUUID=bannerCollection.uuid;
+
+            cy.loginAs(adminUser);
+
+            cy.goToPath(`/collections/${bannerCollection.uuid}`);
+
+            cy.get('[data-cy=upload-button]').click();
+
+            cy.fixture('files/banner.html').as('banner');
+            cy.fixture('files/tooltips.txt').as('tooltips');
+
+            cy.getAll('@banner', '@tooltips').then(([banner, tooltips]) => {
+                cy.get('[data-cy=drag-and-drop]').upload(banner, 'banner.html', false);
+                cy.get('[data-cy=drag-and-drop]').upload(tooltips, 'tooltips.json', false);
+            });
+
+            cy.get('[data-cy=form-submit-btn]').click();
+            cy.get('[data-cy=form-submit-btn]').should('not.exist');
+            cy.get('[data-cy=collection-files-right-panel]')
+                .should('contain', 'banner.html');
+            cy.get('[data-cy=collection-files-right-panel]')
+                .should('contain', 'tooltips.json');
+
+            cy.intercept({ method: 'GET', url: '**/arvados/v1/config?nocache=*' }, (req) => {
+                req.reply((res) => {
+                    res.body.Workbench.BannerUUID = collectionUUID;
+                });
+            });
+        });
+    });
+
+    it('should re-show the banner', () => {
+        cy.loginAs(adminUser);
+
+        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();
+
+        cy.get('[title=Notifications]').click();
+        cy.get('li').contains('Restore Banner').click();
+
+        cy.get('[data-cy=confirmation-dialog-ok-btn]').should('be.visible');
+    });
+
+
+    it('should show tooltips and remove tooltips as localStorage key is present', () => {
+        cy.loginAs(adminUser);
+
+        cy.get('[data-cy=side-panel-tree]').then(($el) => {
+            const el = $el.get(0) //native DOM element
+            expect(el._tippy).to.exist;
+        });
+
+        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();
+
+        cy.get('[title=Notifications]').click();
+        cy.get('li').contains('Disable tooltips').click();
+
+        cy.get('[data-cy=side-panel-tree]').then(($el) => {
+            const el = $el.get(0) //native DOM element
+            expect(el._tippy).to.be.undefined;
+        });
+    });
+});
\ No newline at end of file
similarity index 99%
rename from services/workbench2/cypress/integration/collection.spec.js
rename to services/workbench2/cypress/e2e/collection.cy.js
index 54c570f7c4453fdafa3fe5bd4cd27795eadcb1e1..c5edf0e4f2d64edadbcab474ed747e9d54380b48 100644 (file)
@@ -27,11 +27,6 @@ describe("Collection panel tests", function () {
         downloadsFolder = Cypress.config("downloadsFolder");
     });
 
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
     it('shows the appropriate buttons in the toolbar', () => {
 
         const msButtonTooltips = [
@@ -63,6 +58,7 @@ describe("Collection panel tests", function () {
             cy.get("[data-cy=side-panel-tree]").contains("Home Projects").click();
             cy.waitForDom()
             cy.get('[data-cy=data-table-row]').contains(name).should('exist').parent().parent().parent().parent().click()
+            cy.waitForDom()
             cy.get('[data-cy=multiselect-button]').should('have.length', msButtonTooltips.length)
             for (let i = 0; i < msButtonTooltips.length; i++) {
                 cy.get('[data-cy=multiselect-button]').eq(i).trigger('mouseover');
@@ -164,8 +160,6 @@ describe("Collection panel tests", function () {
         cy.get("[data-cy=form-dialog]").should("exist").and("contain", "Collection with the same name already exists");
     });
 
-    
-
     it("uses the property editor (from edit dialog) with vocabulary terms", function () {
         cy.createCollection(adminUser.token, {
             name: `Test collection ${Math.floor(Math.random() * 999999)}`,
@@ -209,8 +203,6 @@ describe("Collection panel tests", function () {
             });
     });
 
-    
-
     it("uses the editor (from details panel) with vocabulary terms", function () {
         cy.createCollection(adminUser.token, {
             name: `Test collection ${Math.floor(Math.random() * 999999)}`,
@@ -1319,8 +1311,7 @@ describe("Collection panel tests", function () {
 
                     cy.get("[data-cy=form-submit-btn]").click();
 
-                    cy.get("button[aria-label=Remove]").should("exist");
-                    cy.get("button[aria-label=Remove]").click({ multiple: true, force: true });
+                    cy.get("button[aria-label=Remove]").should("exist").click({ multiple: true});
 
                     cy.get("[data-cy=form-submit-btn]").should("not.exist");
 
similarity index 99%
rename from services/workbench2/cypress/integration/create-workflow.spec.js
rename to services/workbench2/cypress/e2e/create-workflow.cy.js
index e6469039348338873aef4df1337556ffe3397cd5..c8acc6734888a41da01a1fe4c21e3680a8bb0944 100644 (file)
@@ -19,11 +19,6 @@ describe('Create workflow tests', function () {
             );
     });
 
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
     it('can create project with nested data', function () {
         cy.createGroup(adminUser.token, {
             group_class: "project",
similarity index 97%
rename from services/workbench2/cypress/integration/delete-multiple-files.spec.js
rename to services/workbench2/cypress/e2e/delete-multiple-files.cy.js
index b506fb3d6874b25e9fd16085d5da090e22b48be1..8086dd125dc70d798cf9d3f2cf594a0fa971541b 100644 (file)
@@ -19,11 +19,6 @@ describe('Multi-file deletion tests', function () {
             );
     });
 
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
     it('deletes all files from root dir', function () {
         cy.createCollection(adminUser.token, {
             name: `Test collection ${Math.floor(Math.random() * 999999)}`,
similarity index 99%
rename from services/workbench2/cypress/integration/favorites.spec.js
rename to services/workbench2/cypress/e2e/favorites.cy.js
index db9a0d5f394072736dbff8c1af730182eacc6ee4..2898c22cba2c354080c5daa9226ddab61def0671 100644 (file)
@@ -21,11 +21,6 @@ describe('Favorites tests', function () {
             });
     });
 
-    beforeEach(function () {
-        cy.clearCookies()
-        cy.clearLocalStorage()
-    });
-
     it('creates and removes a public favorite', function () {
         cy.loginAs(adminUser);
 
similarity index 98%
rename from services/workbench2/cypress/integration/group-manage.spec.js
rename to services/workbench2/cypress/e2e/group-manage.cy.js
index c4731bb3c6bf01bdde33ccdb62cc57579c4531bc..4af9b40d804f541a96d3537d3983c0fb23c64fea 100644 (file)
@@ -36,7 +36,7 @@ describe('Group manage tests', function() {
         );
     });
 
-    it('creates a new group', function() {
+    it('creates a new group, add users to it and changes permission level', function() {
         cy.loginAs(activeUser);
 
         // Navigate to Groups
@@ -59,9 +59,7 @@ describe('Group manage tests', function() {
         cy.get('[data-cy=groups-panel-data-explorer]').contains(groupName).click();
         cy.get('[data-cy=group-members-data-explorer]').contains(activeUser.user.full_name);
         cy.get('[data-cy=group-members-data-explorer]').contains(userThree.user.full_name);
-    });
 
-    it('adds users to the group', function() {
         // Add other user to the group
         cy.get('[data-cy=group-member-add]').click();
         cy.get('.sharing-dialog')
@@ -93,9 +91,7 @@ describe('Group manage tests', function() {
             .within(() => {
                 cy.contains('Manage');
             });
-    });
 
-    it('changes permission level of a member', function() {
         // Test change permission level
         cy.get('[data-cy=group-members-data-explorer]')
             .contains(otherUser.user.full_name)
@@ -213,6 +209,8 @@ describe('Group manage tests', function() {
     });
 
     it('removes users from the group', function() {
+        cy.loginAs(activeUser);
+
         cy.get('[data-cy=side-panel-tree]').contains('Groups').click();
         cy.get('[data-cy=groups-panel-data-explorer]').contains(groupName).click();
 
@@ -266,6 +264,8 @@ describe('Group manage tests', function() {
     });
 
     it('deletes the group', function() {
+        cy.loginAs(adminUser);
+
         // Navigate to Groups
         cy.get('[data-cy=side-panel-tree]').contains('Groups').click();
 
similarity index 98%
rename from services/workbench2/cypress/integration/login.spec.js
rename to services/workbench2/cypress/e2e/login.cy.js
index 79f73670a34b055141974ba7da8bec0eef1ea22c..6f2c91c37e27c82b91e5e6d2311e04efa26d0c19 100644 (file)
@@ -33,11 +33,6 @@ describe('Login tests', function() {
         randomUser.password = 'topsecret';
     })
 
-    beforeEach(function() {
-        cy.clearCookies()
-        cy.clearLocalStorage()
-    })
-
     it('shows login page on first visit', function() {
         cy.visit('/')
         cy.get('div#root').should('contain', 'Please log in')
similarity index 91%
rename from services/workbench2/cypress/integration/multiselect-toolbar.spec.js
rename to services/workbench2/cypress/e2e/multiselect-toolbar.cy.js
index ef503f7ef628874af301821f23a1ffc1385e39e0..ce3551bbd1e49936074e6166c734eadc1b38d5d9 100644 (file)
@@ -23,11 +23,6 @@ describe('Multiselect Toolbar Tests', () => {
             });
     });
 
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
     it('exists in DOM in neutral state', () => {
         cy.loginAs(activeUser);
         cy.get('[data-cy=multiselect-toolbar]').should('exist');
similarity index 87%
rename from services/workbench2/cypress/integration/page-not-found.spec.js
rename to services/workbench2/cypress/e2e/page-not-found.cy.js
index 6eab27c827dc3b1d0ffbc6b4cd22ce0f79e9b6fb..7cffd1079be8f7909b436c60355f6c494f5040a4 100644 (file)
@@ -13,11 +13,6 @@ describe('Page not found tests', function() {
         );
     });
 
-    beforeEach(function() {
-        cy.clearCookies()
-        cy.clearLocalStorage()
-    });
-
     it('shows not found page', function() {
         // when
         cy.loginAs(adminUser);
@@ -34,8 +29,6 @@ describe('Page not found tests', function() {
         [
             '/projects/zzzzz-j7d0g-nonexistingproj',
             '/projects/zzzzz-tpzed-nonexistinguser',
-            '/processes/zzzzz-xvhdp-nonexistingproc',
-            '/collections/zzzzz-4zz18-nonexistingcoll'
         ].forEach(function(path) {
             // Using de slower loginAs() method to avoid bumping into dialog
             // dismissal issues that are not related to this test.
@@ -45,6 +38,17 @@ describe('Page not found tests', function() {
             cy.goToPath(path);
 
             // then
+            cy.get('[data-cy=default-view]').should('exist');
+        });
+
+        [
+            '/processes/zzzzz-xvhdp-nonexistingproc',
+            '/collections/zzzzz-4zz18-nonexistingcoll'
+        ].forEach(function(path) {
+            cy.loginAs(adminUser);
+
+            cy.goToPath(path);
+
             cy.get('[data-cy=not-found-view]').should('exist');
         });
     });
similarity index 92%
rename from services/workbench2/cypress/integration/process.spec.js
rename to services/workbench2/cypress/e2e/process.cy.js
index 7b4ccf9bd558ef19209cb97444d095a0851e7620..ca13e9f9e0b380313e849b5ee9560aa9526554ba 100644 (file)
@@ -25,11 +25,6 @@ describe("Process tests", function () {
             });
     });
 
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
     function setupDockerImage(image_name) {
         // Create a collection that will be used as a docker image for the tests.
         cy.createCollection(adminUser.token, {
@@ -106,7 +101,7 @@ describe("Process tests", function () {
                 'Share',
                 'View details',
             ];
-    
+
             createContainerRequest(
                 activeUser,
                 `test_container_request ${Math.floor(Math.random() * 999999)}`,
@@ -123,6 +118,7 @@ describe("Process tests", function () {
                 cy.get("[data-cy=side-panel-tree]").contains("Home Projects").click();
                 cy.waitForDom()
                 cy.get('[data-cy=data-table-row]').contains(containerRequest.name).should('exist').parent().parent().parent().parent().click()
+                cy.waitForDom()
                 cy.get('[data-cy=multiselect-button]').should('have.length', msButtonTooltips.length)
                 for (let i = 0; i < msButtonTooltips.length; i++) {
                     cy.get('[data-cy=multiselect-button]').eq(i).trigger('mouseover');
@@ -336,6 +332,7 @@ describe("Process tests", function () {
             createContainerRequest(activeUser, crUncommitted, "arvados/jobs", ["echo", "hello world"], false, "Uncommitted").then(function (
                 containerRequest
             ) {
+                cy.loginAs(activeUser);
                 // Navigate to process and verify run / cancel button
                 cy.goToPath(`/processes/${containerRequest.uuid}`);
                 cy.waitForDom();
@@ -578,23 +575,23 @@ describe("Process tests", function () {
 
                 cy.getAll("@node-info", "@stdout", "@stderr").then(() => {
                     // Verify sorted main logs
-                    cy.get("[data-cy=process-logs] pre", { timeout: 7000 }).eq(0).should("contain", "2023-07-18T20:14:48.128642814Z first");
-                    cy.get("[data-cy=process-logs] pre").eq(1).should("contain", "2023-07-18T20:14:48.528642814Z second");
-                    cy.get("[data-cy=process-logs] pre").eq(2).should("contain", "2023-07-18T20:14:49.128642814Z third");
+                    cy.get("[data-cy=process-logs] span > p", { timeout: 7000 }).eq(0).should("contain", "2023-07-18T20:14:48.128642814Z first");
+                    cy.get("[data-cy=process-logs] span > p").eq(1).should("contain", "2023-07-18T20:14:48.528642814Z second");
+                    cy.get("[data-cy=process-logs] span > p").eq(2).should("contain", "2023-07-18T20:14:49.128642814Z third");
 
                     // Switch to All logs
                     cy.get("[data-cy=process-logs-filter]").click();
                     cy.get("body").contains("li", "All logs").click();
                     // Verify non-sorted lines were preserved
-                    cy.get("[data-cy=process-logs] pre").eq(0).should("contain", "3: nodeinfo 1");
-                    cy.get("[data-cy=process-logs] pre").eq(1).should("contain", "2: nodeinfo 2");
-                    cy.get("[data-cy=process-logs] pre").eq(2).should("contain", "1: nodeinfo 3");
-                    cy.get("[data-cy=process-logs] pre").eq(3).should("contain", "2: nodeinfo 4");
-                    cy.get("[data-cy=process-logs] pre").eq(4).should("contain", "3: nodeinfo 5");
+                    cy.get("[data-cy=process-logs] span > p").eq(0).should("contain", "3: nodeinfo 1");
+                    cy.get("[data-cy=process-logs] span > p").eq(1).should("contain", "2: nodeinfo 2");
+                    cy.get("[data-cy=process-logs] span > p").eq(2).should("contain", "1: nodeinfo 3");
+                    cy.get("[data-cy=process-logs] span > p").eq(3).should("contain", "2: nodeinfo 4");
+                    cy.get("[data-cy=process-logs] span > p").eq(4).should("contain", "3: nodeinfo 5");
                     // Verify sorted logs
-                    cy.get("[data-cy=process-logs] pre").eq(5).should("contain", "2023-07-18T20:14:48.128642814Z first");
-                    cy.get("[data-cy=process-logs] pre").eq(6).should("contain", "2023-07-18T20:14:48.528642814Z second");
-                    cy.get("[data-cy=process-logs] pre").eq(7).should("contain", "2023-07-18T20:14:49.128642814Z third");
+                    cy.get("[data-cy=process-logs] span > p").eq(5).should("contain", "2023-07-18T20:14:48.128642814Z first");
+                    cy.get("[data-cy=process-logs] span > p").eq(6).should("contain", "2023-07-18T20:14:48.528642814Z second");
+                    cy.get("[data-cy=process-logs] span > p").eq(7).should("contain", "2023-07-18T20:14:49.128642814Z third");
                 });
             });
         });
@@ -632,16 +629,16 @@ describe("Process tests", function () {
                     cy.get("[data-cy=process-logs-filter]").click();
                     cy.get("body").contains("li", "All logs").click();
                     // Verify sorted logs
-                    cy.get("[data-cy=process-logs] pre").eq(0).should("contain", "2023-07-18T20:14:46.000000000Z A out 1");
-                    cy.get("[data-cy=process-logs] pre").eq(1).should("contain", "2023-07-18T20:14:47.000000000Z Z err 1");
-                    cy.get("[data-cy=process-logs] pre").eq(2).should("contain", "2023-07-18T20:14:48.128642814Z B err 2");
-                    cy.get("[data-cy=process-logs] pre").eq(3).should("contain", "2023-07-18T20:14:48.128642814Z C err 3");
-                    cy.get("[data-cy=process-logs] pre").eq(4).should("contain", "2023-07-18T20:14:48.128642814Z Y err 4");
-                    cy.get("[data-cy=process-logs] pre").eq(5).should("contain", "2023-07-18T20:14:48.128642814Z Z err 5");
-                    cy.get("[data-cy=process-logs] pre").eq(6).should("contain", "2023-07-18T20:14:48.128642814Z A err 6");
-                    cy.get("[data-cy=process-logs] pre").eq(7).should("contain", "2023-07-18T20:14:48.128642814Z A out 2");
-                    cy.get("[data-cy=process-logs] pre").eq(8).should("contain", "2023-07-18T20:14:48.128642814Z X out 3");
-                    cy.get("[data-cy=process-logs] pre").eq(9).should("contain", "2023-07-18T20:14:48.128642814Z A out 4");
+                    cy.get("[data-cy=process-logs] span > p").eq(0).should("contain", "2023-07-18T20:14:46.000000000Z A out 1");
+                    cy.get("[data-cy=process-logs] span > p").eq(1).should("contain", "2023-07-18T20:14:47.000000000Z Z err 1");
+                    cy.get("[data-cy=process-logs] span > p").eq(2).should("contain", "2023-07-18T20:14:48.128642814Z B err 2");
+                    cy.get("[data-cy=process-logs] span > p").eq(3).should("contain", "2023-07-18T20:14:48.128642814Z C err 3");
+                    cy.get("[data-cy=process-logs] span > p").eq(4).should("contain", "2023-07-18T20:14:48.128642814Z Y err 4");
+                    cy.get("[data-cy=process-logs] span > p").eq(5).should("contain", "2023-07-18T20:14:48.128642814Z Z err 5");
+                    cy.get("[data-cy=process-logs] span > p").eq(6).should("contain", "2023-07-18T20:14:48.128642814Z A err 6");
+                    cy.get("[data-cy=process-logs] span > p").eq(7).should("contain", "2023-07-18T20:14:48.128642814Z A out 2");
+                    cy.get("[data-cy=process-logs] span > p").eq(8).should("contain", "2023-07-18T20:14:48.128642814Z X out 3");
+                    cy.get("[data-cy=process-logs] span > p").eq(9).should("contain", "2023-07-18T20:14:48.128642814Z A out 4");
                 });
             });
         });
@@ -665,15 +662,60 @@ describe("Process tests", function () {
 
                 cy.getAll("@stdout").then(() => {
                     // Verify first 64KB and snipline
-                    cy.get("[data-cy=process-logs] pre", { timeout: 7000 })
+                    cy.get("[data-cy=process-logs] span > p", { timeout: 7000 })
                         .eq(0)
                         .should("contain", "X".repeat(63999) + "_\n" + SNIPLINE);
                     // Verify last 64KB
-                    cy.get("[data-cy=process-logs] pre")
+                    cy.get("[data-cy=process-logs] span > p")
                         .eq(1)
                         .should("contain", "_" + "X".repeat(63999));
                     // Verify none of the Os got through
-                    cy.get("[data-cy=process-logs] pre").should("not.contain", "O");
+                    cy.get("[data-cy=process-logs] span > p").should("not.contain", "O");
+                });
+            });
+        });
+
+        it("correctly break long lines when no obvious line separation exists", function () {
+            function randomString(length) {
+                const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
+                let res = '';
+                for (let i = 0; i < length; i++) {
+                    res += chars.charAt(Math.floor(Math.random() * chars.length));
+                }
+                return res;
+            }
+
+            const logLinesQty = 10;
+            const logLines = [];
+            for (let i = 0; i < logLinesQty; i++) {
+                const length = Math.floor(Math.random() * 500) + 500;
+                logLines.push(randomString(length));
+            }
+
+            createContainerRequest(activeUser, "test_container_request", "arvados/jobs", ["echo", "hello world"], false, "Committed").then(function (
+                containerRequest
+            ) {
+                cy.appendLog(adminUser.token, containerRequest.uuid, "stdout.txt", logLines).as("stdoutLogs");
+
+                cy.getAll("@stdoutLogs").then(function () {
+                    cy.loginAs(activeUser);
+                    cy.goToPath(`/processes/${containerRequest.uuid}`);
+                    // Select 'stdout' log filter
+                    cy.get("[data-cy=process-logs-filter]").click();
+                    cy.get("body").contains("li", "stdout").click();
+                    cy.get("[data-cy=process-logs] span > p")
+                        .should('have.length', logLinesQty)
+                        .each($p => {
+                            expect($p.text().length).to.be.greaterThan(499);
+
+                            // This looks like an ugly hack, but I was not able
+                            // to get [client|scroll]Width attributes through
+                            // the usual Cypress methods.
+                            const parentClientWidth = $p[0].parentElement.clientWidth;
+                            const parentScrollWidth = $p[0].parentElement.scrollWidth
+                            // Scrollbar should not be visible
+                            expect(parentClientWidth).to.be.eq(parentScrollWidth);
+                        });
                 });
             });
         });
@@ -1363,7 +1405,7 @@ describe("Process tests", function () {
             cy.getAll("@containerRequest", "@testOutputCollection").then(function ([containerRequest, testOutputCollection]) {
                 cy.goToPath(`/processes/${containerRequest.uuid}`);
                 cy.get("[data-cy=process-io-card] h6")
-                    .contains("Inputs")
+                    .contains("Input Parameters")
                     .parents("[data-cy=process-io-card]")
                     .within(() => {
                         verifyIOParameter("input_file", null, "Label Description", "input1.tar", "00000000000000000000000000000000+01");
@@ -1398,7 +1440,7 @@ describe("Process tests", function () {
                         verifyIOParameter("input_file_url", null, null, "http://example.com/index.html");
                     });
                 cy.get("[data-cy=process-io-card] h6")
-                    .contains("Outputs")
+                    .contains("Output Parameters")
                     .parents("[data-cy=process-io-card]")
                     .within(ctx => {
                         cy.get(ctx).scrollIntoView();
@@ -1498,7 +1540,7 @@ describe("Process tests", function () {
                 cy.waitForDom();
 
                 cy.get("[data-cy=process-io-card] h6")
-                    .contains("Inputs")
+                    .contains("Input Parameters")
                     .parents("[data-cy=process-io-card]")
                     .within(() => {
                         cy.wait(2000);
@@ -1508,7 +1550,7 @@ describe("Process tests", function () {
                         });
                     });
                 cy.get("[data-cy=process-io-card] h6")
-                    .contains("Outputs")
+                    .contains("Output Parameters")
                     .parents("[data-cy=process-io-card]")
                     .within(() => {
                         cy.get("tbody tr").each(item => {
similarity index 91%
rename from services/workbench2/cypress/integration/project.spec.js
rename to services/workbench2/cypress/e2e/project.cy.js
index e6185c108e94c454970ad2605d83f3bc8b4637a2..4aeb59bc75ef4d8cfe79a6e187e3e12411fdcf5e 100644 (file)
@@ -23,11 +23,6 @@ describe("Project tests", function () {
             });
     });
 
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
     it("creates a new project with multiple properties", function () {
         const projName = `Test project (${Math.floor(999999 * Math.random())})`;
         cy.loginAs(activeUser);
@@ -44,20 +39,22 @@ describe("Project tests", function () {
         cy.get("[data-cy=form-dialog]").should("not.contain", "Color: Magenta");
         cy.get("[data-cy=resource-properties-form]").within(() => {
             cy.get("[data-cy=property-field-key]").within(() => {
-                cy.get("input").type("Color");
+                cy.get("input").type("Color").blur();
             });
             cy.get("[data-cy=property-field-value]").within(() => {
-                cy.get("input").type("Magenta");
+                cy.get("input").type("Magenta").blur();
             });
-            cy.root().submit();
+            cy.get("[data-cy=property-add-btn]").click();
+
             cy.get("[data-cy=property-field-value]").within(() => {
-                cy.get("input").type("Pink");
+                cy.get("input").type("Pink").blur();
             });
-            cy.root().submit();
+            cy.get("[data-cy=property-add-btn]").click();
+
             cy.get("[data-cy=property-field-value]").within(() => {
-                cy.get("input").type("Yellow");
+                cy.get("input").type("Yellow").blur();
             });
-            cy.root().submit();
+            cy.get("[data-cy=property-add-btn]").click();
         });
         // Confirm proper vocabulary labels are displayed on the UI.
         cy.get("[data-cy=form-dialog]").should("contain", "Color: Magenta");
@@ -100,14 +97,14 @@ describe("Project tests", function () {
         // Add another property
         cy.get("[data-cy=resource-properties-form]").within(() => {
             cy.get("[data-cy=property-field-key]").within(() => {
-                cy.get("input").type("Animal");
+                cy.get("input").type("Animal").blur();
             });
             cy.get("[data-cy=property-field-value]").within(() => {
-                cy.get("input").type("Dog");
+                cy.get("input").type("Dog").blur();
             });
-            cy.root().submit();
+            cy.get("[data-cy=property-add-btn]").click();
         });
-        cy.get("[data-cy=form-submit-btn]").click({ force: true });
+        cy.get("[data-cy=form-submit-btn]").click();
         // Reopen edit via breadcrumbs and verify properties
         cy.get("[data-cy=breadcrumbs]").contains(projName).rightclick();
         cy.get("[data-cy=context-menu]").contains("Edit").click();
@@ -180,6 +177,45 @@ describe("Project tests", function () {
         verifyProjectDescription(projName, null);
     });
 
+    it("creates a project from the context menu in the correct subfolder", function () {
+        const parentProjName = `Test project (${Math.floor(999999 * Math.random())})`;
+        const childProjName = `Test project (${Math.floor(999999 * Math.random())})`;
+        cy.loginAs(activeUser);
+
+        // Create project
+        cy.get("[data-cy=side-panel-button]").click();
+        cy.get("[data-cy=side-panel-new-project]").click();
+        cy.get("[data-cy=form-dialog]")
+            .should("contain", "New Project")
+            .within(() => {
+                cy.get("[data-cy=name-field]").within(() => {
+                    cy.get("input").type(parentProjName);
+                });
+            });
+        cy.get("[data-cy=form-submit-btn]").click();
+        cy.get("[data-cy=form-dialog]").should("not.exist");
+        cy.go('back')
+
+        // Create subproject from context menu
+        cy.get("[data-cy=project-panel] tbody tr").contains(parentProjName).rightclick({ force: true });
+        cy.get("[data-cy=context-menu]").contains("New project").click();
+        cy.get("[data-cy=form-dialog]")
+            .should("contain", "New Project")
+            .within(() => {
+                cy.get("[data-cy=name-field]").within(() => {
+                    cy.get("input").type(childProjName);
+                });
+            });
+        cy.get("[data-cy=form-submit-btn]").click();
+        cy.get("[data-cy=form-dialog]").should("not.exist");
+
+        // open details panel and check 'owner' field
+        cy.get("[data-cy=additional-info-icon]").click();
+        cy.waitForDom();
+        cy.get("[data-cy=details-panel-owner]").contains(parentProjName).should("be.visible")
+        cy.get("[data-cy=additional-info-icon]").click();
+    });
+
     it('shows the appropriate buttons in the multiselect toolbar', () => {
 
         const msButtonTooltips = [
@@ -213,6 +249,7 @@ describe("Project tests", function () {
         cy.go('back')
 
         cy.get('[data-cy=data-table-row]').contains(projName).should('exist').parent().parent().parent().click()
+        cy.waitForDom()
         cy.get('[data-cy=multiselect-button]').should('have.length', msButtonTooltips.length)
         for (let i = 0; i < msButtonTooltips.length; i++) {
             cy.get('[data-cy=multiselect-button]').eq(i).trigger('mouseover');
@@ -577,7 +614,9 @@ describe("Project tests", function () {
         });
     });
 
-    it("copies project URL to clipboard", () => {
+    // The following test is enabled on Electron only, as Chromium and Firefox
+    // require permissions to access the clipboard.
+    it("copies project URL to clipboard", { browser: 'electron' }, () => {
         const projectName = `Test project (${Math.floor(999999 * Math.random())})`;
 
         cy.loginAs(activeUser);
similarity index 98%
rename from services/workbench2/cypress/integration/search.spec.js
rename to services/workbench2/cypress/e2e/search.cy.js
index d8aa35d3d2d4b6398282350f9d68e88ccb5a2030..1bf2b5c3efb4b83a8bdd8324f751ffb67d7b53c5 100644 (file)
@@ -23,11 +23,6 @@ describe("Search tests", function () {
             });
     });
 
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
     it("can search for old collection versions", function () {
         const colName = `Versioned Collection ${Math.floor(Math.random() * Math.floor(999999))}`;
         let colUuid = "";
@@ -165,7 +160,9 @@ describe("Search tests", function () {
         });
     });
 
-    it("shows search context menu", function () {
+    // The following test is enabled on Electron only, as Chromium and Firefox
+    // require permissions to access the clipboard.
+    it("shows search context menu", { browser: 'electron' } , function () {
         const colName = `Home Collection ${Math.floor(Math.random() * Math.floor(999999))}`;
         const federatedColName = `Federated Collection ${Math.floor(Math.random() * Math.floor(999999))}`;
         const federatedColUuid = "xxxxx-4zz18-000000000000000";
similarity index 98%
rename from services/workbench2/cypress/integration/sharing.spec.js
rename to services/workbench2/cypress/e2e/sharing.cy.js
index f742d09062a4f0b95fe8be8946fb20464a0b8ef7..05a7d470bf6e1b1d7ed048dc27c46c60d27cd9aa 100644 (file)
@@ -21,11 +21,6 @@ describe('Sharing tests', function () {
             });
     })
 
-    beforeEach(function () {
-        cy.clearCookies()
-        cy.clearLocalStorage()
-    });
-
     it('can create and delete sharing URLs on collections', () => {
         const collName = 'shared-collection ' + new Date().getTime();
         cy.createCollection(adminUser.token, {
similarity index 98%
rename from services/workbench2/cypress/integration/side-panel.spec.js
rename to services/workbench2/cypress/e2e/side-panel.cy.js
index d6ac754d0a4d0da815b51c7ee4ef7564b43b1e9d..6d6b19bfab2dd523bbfb5d2198f10950c1c57f24 100644 (file)
@@ -23,11 +23,6 @@ describe('Side panel tests', function() {
         );
     })
 
-    beforeEach(function() {
-        cy.clearCookies()
-        cy.clearLocalStorage()
-    })
-
     it('enables the +NEW side panel button on users home project', function() {
         cy.loginAs(activeUser);
         cy.get('[data-cy=side-panel-button]')
similarity index 99%
rename from services/workbench2/cypress/integration/workflow.spec.js
rename to services/workbench2/cypress/e2e/workflow.cy.js
index 844e87d8deb64b1969014296330a17312623051f..c6c49ee34325b294b2f0e17bcfe09058cf562401 100644 (file)
@@ -217,13 +217,13 @@ describe('Registered workflow panel tests', function() {
                     cy.get('[data-cy=registered-workflow-info-panel')
                         .should('contain', 'gitCommit: 9b091ed7e0bef98b3312e9478c52b89ba25792de')
 
-                    cy.get('[data-cy=process-io-card] h6').contains('Inputs')
+                    cy.get('[data-cy=process-io-card] h6').contains('Input Parameters')
                         .parents('[data-cy=process-io-card]').within(() => {
                             verifyIOParameter('file1', null, '', '', '');
                             verifyIOParameter('numbering', null, '', '', '');
                             verifyIOParameter('args.py', null, '', 'args.py', 'de738550734533c5027997c87dc5488e+53');
                         });
-                    cy.get('[data-cy=process-io-card] h6').contains('Outputs')
+                    cy.get('[data-cy=process-io-card] h6').contains('Output Parameters')
                         .parents('[data-cy=process-io-card]').within(() => {
                             verifyIOParameter('args', null, '', '', '');
                         });
diff --git a/services/workbench2/cypress/integration/banner-tooltip.spec.js b/services/workbench2/cypress/integration/banner-tooltip.spec.js
deleted file mode 100644 (file)
index 295bc38..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-describe('Banner / tooltip tests', function () {
-    let activeUser;
-    let adminUser;
-    let collectionUUID;
-
-    before(function () {
-        // Only set up common users once. These aren't set up as aliases because
-        // aliases are cleaned up after every test. Also it doesn't make sense
-        // to set the same users on beforeEach() over and over again, so we
-        // separate a little from Cypress' 'Best Practices' here.
-        cy.getUser('admin', 'Admin', 'User', true, true)
-            .as('adminUser').then(function () {
-                adminUser = this.adminUser;
-            }
-            );
-        cy.getUser('collectionuser1', 'Collection', 'User', false, true)
-            .as('activeUser').then(function () {
-                activeUser = this.activeUser;
-            });
-            cy.on('uncaught:exception', (err, runnable) => {console.error(err)});
-    });
-
-    beforeEach(function () {
-        cy.clearCookies();
-        cy.clearLocalStorage();
-    });
-
-    it('should re-show the banner', () => {
-        setupTheEnvironment();
-
-        cy.loginAs(adminUser);
-
-        cy.wait(2000);
-
-        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();
-
-        cy.get('[title=Notifications]').click();
-        cy.get('li').contains('Restore Banner').click();
-
-        cy.wait(2000);
-
-        cy.get('[data-cy=confirmation-dialog-ok-btn]').should('be.visible');
-    });
-
-
-    it('should show tooltips and remove tooltips as localStorage key is present', () => {
-        setupTheEnvironment();
-
-        cy.loginAs(adminUser);
-
-        cy.wait(2000);
-
-        cy.get('[data-cy=side-panel-tree]').then(($el) => {
-            const el = $el.get(0) //native DOM element
-            expect(el._tippy).to.exist;
-        });
-
-        cy.wait(2000);
-
-        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();
-
-        cy.get('[title=Notifications]').click();
-        cy.get('li').contains('Disable tooltips').click();
-
-        cy.get('[data-cy=side-panel-tree]').then(($el) => {
-            const el = $el.get(0) //native DOM element
-            expect(el._tippy).to.be.undefined;
-        });
-    });
-
-    const setupTheEnvironment = () => {
-            cy.createCollection(adminUser.token, {
-                name: `BannerTooltipTest${Math.floor(Math.random() * 999999)}`,
-                owner_uuid: adminUser.user.uuid,
-            }).as('bannerCollection');
-
-            cy.getAll('@bannerCollection')
-                .then(function ([bannerCollection]) {
-
-                    collectionUUID=bannerCollection.uuid;
-
-                    cy.loginAs(adminUser);
-
-                    cy.goToPath(`/collections/${bannerCollection.uuid}`);
-
-                    cy.get('[data-cy=upload-button]').click();
-
-                    cy.fixture('files/banner.html').as('banner');
-                    cy.fixture('files/tooltips.txt').as('tooltips');
-
-                    cy.getAll('@banner', '@tooltips')
-                        .then(([banner, tooltips]) => {
-                            cy.get('[data-cy=drag-and-drop]').upload(banner, 'banner.html', false);
-                            cy.get('[data-cy=drag-and-drop]').upload(tooltips, 'tooltips.json', false);
-                        });
-
-                    cy.get('[data-cy=form-submit-btn]').click();
-                    cy.get('[data-cy=form-submit-btn]').should('not.exist');
-                    cy.get('[data-cy=collection-files-right-panel]')
-                        .contains('banner.html').should('exist');
-                    cy.get('[data-cy=collection-files-right-panel]')
-                        .contains('tooltips.json').should('exist');
-
-                        cy.intercept({ method: 'GET', url: '**/arvados/v1/config?nocache=*' }, (req) => {
-                            req.reply((res) => {
-                                res.body.Workbench.BannerUUID = collectionUUID;
-                            });
-                        });
-                });
-    }
-});
index 8fe0b06aafebb6adb3aca33f94e701b3b515213c..da7300a43014cbefde2f3edd2842fb61d9aa8c87 100644 (file)
@@ -350,8 +350,11 @@ Cypress.Commands.add("updateResource", (token, suffix, uuid, data) => {
 });
 
 Cypress.Commands.add("loginAs", user => {
+    // This shouldn't be necessary unless we need to call loginAs multiple times
+    // in the same test.
     cy.clearCookies();
-    cy.clearLocalStorage();
+    cy.clearAllLocalStorage();
+    cy.clearAllSessionStorage();
     cy.visit(`/token/?api_token=${user.token}`);
     // Use waitUntil to avoid permafail race conditions with window.location being undefined
     cy.waitUntil(() => cy.window().then(win =>
index 4942ca0a5798fa26d637929a5270f6da3a098cd8..fa4266191aa0c3f8998cc92f879ba4195b6aef4a 100644 (file)
@@ -2,10 +2,10 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-FROM node:12.22.3-buster
+FROM node:12.22.12-bullseye
 LABEL maintainer="Arvados Package Maintainers <packaging@arvados.org>"
 
-RUN echo deb http://deb.debian.org/debian buster-backports main >> /etc/apt/sources.list.d/backports.list
+RUN echo deb http://deb.debian.org/debian bullseye-backports main >> /etc/apt/sources.list.d/backports.list
 RUN apt-get update && \
     apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
     libsecret-1-0 libsecret-1-dev rpm ruby ruby-dev rubygems build-essential \
@@ -13,13 +13,17 @@ RUN apt-get update && \
     apt-get clean
 
 RUN /usr/bin/gem install --no-document fpm
-RUN cd /usr/src && git clone git://git.arvados.org/arvados.git && \
-    cd arvados && \
+WORKDIR /usr/src/arvados
+COPY . .
+RUN cd /usr/src/arvados && \
+    test -d cmd/arvados-server || \
+      (echo "ERROR: build context must be an Arvados repository" && false) && \
     GO_VERSION=$(grep 'goversion =' lib/install/deps.go |awk -F'"' '{print $2}') && \
+    ARCH=$(dpkg --print-architecture) && \
     echo $GO_VERSION && \
     cd /usr/src && \
-    wget https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz && \
-    tar xzf go${GO_VERSION}.linux-amd64.tar.gz && \
+    wget https://golang.org/dl/go${GO_VERSION}.linux-${ARCH}.tar.gz && \
+    tar xzf go${GO_VERSION}.linux-${ARCH}.tar.gz && \
     ln -s /usr/src/go/bin/go /usr/local/bin/go-${GO_VERSION} && \
     ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-${GO_VERSION} && \
     ln -s /usr/local/bin/go-${GO_VERSION} /usr/local/bin/go && \
index c6e2d6bcda6bf81eec36107e42194710fe7d51c5..e02fa6b956df60b7a7a8daeba75fda069c72a90f 100644 (file)
@@ -62,6 +62,7 @@
     "react-dropzone": "5.1.1",
     "react-highlight-words": "0.14.0",
     "react-idle-timer": "4.3.6",
+    "react-loader-spinner": "^6.1.6",
     "react-redux": "5.0.7",
     "react-router": "4.3.1",
     "react-router-dom": "4.3.1",
@@ -85,7 +86,7 @@
     "uuid": "3.3.2"
   },
   "scripts": {
-    "start": "react-scripts start",
+    "start": "BROWSER=none react-scripts start",
     "build": "REACT_APP_VERSION=$VERSION REACT_APP_BUILD_NUMBER=$BUILD_NUMBER REACT_APP_GIT_COMMIT=$GIT_COMMIT react-scripts build",
     "build-local": "react-scripts build",
     "test": "CI=true react-scripts test",
     "@types/sinon": "7.5",
     "@types/uuid": "3.4.4",
     "axios-mock-adapter": "1.17.0",
-    "cypress": "6.3.0",
+    "cypress": "^13.6.6",
     "cypress-wait-until": "^3.0.1",
     "enzyme": "3.11.0",
     "enzyme-adapter-react-16": "1.15.6",
index 27e46d584962c8d3e1cb1ca536b21ab1b4577ecf..ba710bc783e9ca6368c5355d042a3930e677af8b 100644 (file)
@@ -84,7 +84,6 @@ interface DataExplorerDataProps<T> {
     defaultViewIcon?: IconType;
     defaultViewMessages?: string[];
     working?: boolean;
-    currentRefresh?: string;
     currentRoute?: string;
     hideColumnSelector?: boolean;
     paperProps?: PaperProps;
@@ -97,6 +96,7 @@ interface DataExplorerDataProps<T> {
     elementPath?: string;
     isMSToolbarVisible: boolean;
     checkedList: TCheckedList;
+    isNotFound: boolean;
 }
 
 interface DataExplorerActionProps<T> {
@@ -120,52 +120,13 @@ type DataExplorerProps<T> = DataExplorerDataProps<T> & DataExplorerActionProps<T
 
 export const DataExplorer = withStyles(styles)(
     class DataExplorerGeneric<T> extends React.Component<DataExplorerProps<T>> {
-        state = {
-            showLoading: false,
-            prevRefresh: "",
-            prevRoute: "",
-        };
 
         multiSelectToolbarInTitle = !this.props.title && !this.props.progressBar;
 
-        componentDidUpdate(prevProps: DataExplorerProps<T>) {
-            const currentRefresh = this.props.currentRefresh || "";
-            const currentRoute = this.props.currentRoute || "";
-
-            if (currentRoute !== this.state.prevRoute) {
-                // Component already mounted, but the user comes from a route change,
-                // like browsing through a project hierarchy.
-                this.setState({
-                    showLoading: this.props.working,
-                    prevRoute: currentRoute,
-                });
-            }
-
-            if (currentRefresh !== this.state.prevRefresh) {
-                // Component already mounted, but the user just clicked the
-                // refresh button.
-                this.setState({
-                    showLoading: this.props.working,
-                    prevRefresh: currentRefresh,
-                });
-            }
-            if (this.state.showLoading && !this.props.working) {
-                this.setState({
-                    showLoading: false,
-                });
-            }
-        }
-
         componentDidMount() {
             if (this.props.onSetColumns) {
                 this.props.onSetColumns(this.props.columns);
             }
-            // Component just mounted, so we need to show the loading indicator.
-            this.setState({
-                showLoading: this.props.working,
-                prevRefresh: this.props.currentRefresh || "",
-                prevRoute: this.props.currentRoute || "",
-            });
         }
 
         render() {
@@ -207,6 +168,7 @@ export const DataExplorer = withStyles(styles)(
                 toggleMSToolbar,
                 setCheckedListOnStore,
                 checkedList,
+                working,
             } = this.props;
             return (
                 <Paper
@@ -314,7 +276,6 @@ export const DataExplorer = withStyles(styles)(
                                 onFiltersChange={onFiltersChange}
                                 onSortToggle={onSortToggle}
                                 extractKey={extractKey}
-                                working={this.state.showLoading}
                                 defaultViewIcon={defaultViewIcon}
                                 defaultViewMessages={defaultViewMessages}
                                 currentItemUuid={currentItemUuid}
@@ -322,6 +283,8 @@ export const DataExplorer = withStyles(styles)(
                                 toggleMSToolbar={toggleMSToolbar}
                                 setCheckedListOnStore={setCheckedListOnStore}
                                 checkedList={checkedList}
+                                working={working}
+                                isNotFound={this.props.isNotFound}
                             />
                         </Grid>
                         <Grid
index 880868bdf8d54c4d0b24c198b07bcea7a66f3a0a..87d3efc2353904cd1fc2968d6827136308834a7d 100644 (file)
@@ -124,7 +124,9 @@ describe("<DataTable />", () => {
                 setCheckedListOnStore={jest.fn()}
             />
         );
-        expect(dataTable.find(TableBody).find(TableCell).last().key()).toBe("column-1-key");
+        setTimeout(() => {
+            expect(dataTable.find(TableBody).find(TableCell).last().key()).toBe("column-1-key");
+        }, 1000);
     });
 
     it("renders items", () => {
@@ -155,8 +157,10 @@ describe("<DataTable />", () => {
                 setCheckedListOnStore={jest.fn()}
             />
         );
-        expect(dataTable.find(TableBody).find(Typography).last().text()).toBe("item 1");
-        expect(dataTable.find(TableBody).find(Button).last().text()).toBe("item 1");
+        setTimeout(() => {
+            expect(dataTable.find(TableBody).find(Typography).last().text()).toBe("item 1");
+            expect(dataTable.find(TableBody).find(Button).last().text()).toBe("item 1");
+        }, 1000);
     });
 
     it("passes sorting props to <TableSortLabel />", () => {
index d3bbab5077f863a96010f3e248c1fbda232077e8..7b787994577305ea5725b80fcfd942d42f34f360 100644 (file)
@@ -24,11 +24,12 @@ import { DataTableFilters } from "../data-table-filters/data-table-filters-tree"
 import { DataTableMultiselectPopover } from "../data-table-multiselect-popover/data-table-multiselect-popover";
 import { DataTableFiltersPopover } from "../data-table-filters/data-table-filters-popover";
 import { countNodes, getTreeDirty } from "models/tree";
-import { IconType, PendingIcon } from "components/icon/icon";
+import { IconType } from "components/icon/icon";
 import { SvgIconProps } from "@material-ui/core/SvgIcon";
 import ArrowDownwardIcon from "@material-ui/icons/ArrowDownward";
 import { createTree } from "models/tree";
 import { DataTableMultiselectOption } from "../data-table-multiselect-popover/data-table-multiselect-popover";
+import { PendingIcon } from "components/icon/icon";
 
 export type DataColumns<I, R> = Array<DataColumn<I, R>>;
 
@@ -54,6 +55,7 @@ export interface DataTableDataProps<I> {
     toggleMSToolbar: (isVisible: boolean) => void;
     setCheckedListOnStore: (checkedList: TCheckedList) => void;
     checkedList: TCheckedList;
+    isNotFound?: boolean;
 }
 
 type CssRules =
@@ -69,8 +71,7 @@ type CssRules =
     | "tableCell"
     | "arrow"
     | "arrowButton"
-    | "tableCellWorkflows"
-    | "loader";
+    | "tableCellWorkflows";
 
 const styles: StyleRulesCallback<CssRules> = (theme: Theme) => ({
     root: {
@@ -83,11 +84,6 @@ const styles: StyleRulesCallback<CssRules> = (theme: Theme) => ({
     tableBody: {
         background: theme.palette.background.paper,
     },
-    loader: {
-        left: "50%",
-        marginLeft: "-84px",
-        position: "absolute",
-    },
     noItemsInfo: {
         textAlign: "center",
         padding: theme.spacing.unit,
@@ -146,6 +142,7 @@ export type TCheckedList = Record<string, boolean>;
 
 type DataTableState = {
     isSelected: boolean;
+    isLoaded: boolean;
 };
 
 type DataTableProps<T> = DataTableDataProps<T> & WithStyles<CssRules>;
@@ -154,6 +151,7 @@ export const DataTable = withStyles(styles)(
     class Component<T> extends React.Component<DataTableProps<T>> {
         state: DataTableState = {
             isSelected: false,
+            isLoaded: false,
         };
 
         componentDidMount(): void {
@@ -171,6 +169,12 @@ export const DataTable = withStyles(styles)(
             if (prevProps.currentRoute !== this.props.currentRoute) {
                 this.initializeCheckedList([])
             }
+            if(prevProps.working === true && this.props.working === false) {
+                this.setState({ isLoaded: true });
+            }
+            if((this.props.items.length > 0) && !this.state.isLoaded) {
+                this.setState({ isLoaded: true });
+            }
         }
 
         componentWillUnmount(): void {
@@ -291,7 +295,8 @@ export const DataTable = withStyles(styles)(
         };
 
         render() {
-            const { items, classes, working, columns } = this.props;
+            const { items, classes, columns, isNotFound } = this.props;
+            const { isLoaded } = this.state;
             if (columns[0].name === this.checkBoxColumn.name) columns.shift();
             columns.unshift(this.checkBoxColumn);
             return (
@@ -301,31 +306,43 @@ export const DataTable = withStyles(styles)(
                             <TableHead>
                                 <TableRow>{this.mapVisibleColumns(this.renderHeadCell)}</TableRow>
                             </TableHead>
-                            <TableBody className={classes.tableBody}>{!working && items.map(this.renderBodyRow)}</TableBody>
+                            <TableBody className={classes.tableBody}>{(isLoaded && !isNotFound) && items.map(this.renderBodyRow)}</TableBody>
                         </Table>
-                        {!!working && (
-                            <div className={classes.loader}>
-                                <DataTableDefaultView
-                                    icon={PendingIcon}
-                                    messages={["Loading data, please wait."]}
-                                />
-                            </div>
-                        )}
-                        {items.length === 0 && !working && this.renderNoItemsPlaceholder(this.props.columns)}
+                        {(!isLoaded || isNotFound || items.length === 0) && this.renderNoItemsPlaceholder(this.props.columns)}
                     </div>
                 </div>
             );
         }
 
         renderNoItemsPlaceholder = (columns: DataColumns<T, any>) => {
+            const { isLoaded } = this.state;
+            const { working, isNotFound } = this.props;
             const dirty = columns.some(column => getTreeDirty("")(column.filters));
-            return (
-                <DataTableDefaultView
-                    icon={this.props.defaultViewIcon}
-                    messages={this.props.defaultViewMessages}
-                    filtersApplied={dirty}
-                />
-            );
+            if (isNotFound && isLoaded) {
+                return (
+                    <DataTableDefaultView 
+                        icon={this.props.defaultViewIcon} 
+                        messages={["No items found"]} 
+                    />
+                );
+            } else 
+            if (isLoaded === false || working === true) {
+                return (
+                    <DataTableDefaultView 
+                        icon={PendingIcon} 
+                        messages={["Loading data, please wait"]} 
+                    />
+                );
+            } else {
+                // isLoaded && !working && !isNotFound
+                return (
+                    <DataTableDefaultView
+                        icon={this.props.defaultViewIcon}
+                        messages={this.props.defaultViewMessages}
+                        filtersApplied={dirty}
+                    />
+                );
+            }
         };
 
         renderHeadCell = (column: DataColumn<T, any>, index: number) => {
index 5acea6193be906ddb9ec3c865f1d9e80ea068b2f..588dcfa5cce9c7f589a97772ac5c9425d08c4656 100644 (file)
@@ -41,6 +41,7 @@ export const DefaultView = withStyles(styles)(
             {Icon && <Icon className={classnames([classes.icon, classIcon])} />}
             {messages.map((msg: string, index: number) => {
                 return <Typography key={index}
+                    data-cy='default-view'
                     className={classnames([classes.message, classMessage])}>{msg}</Typography>;
             })}
         </Typography>
index 92d31b0b8e278b924bfc7fb2d25a61f5a497de1d..d965b60f5b591b6f29fb98534643943001be1179 100644 (file)
@@ -25,7 +25,8 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     label: {
         boxSizing: 'border-box',
         color: theme.palette.grey["600"],
-        width: '100%'
+        width: '100%',
+        marginTop: "0.4em",
     },
     value: {
         boxSizing: 'border-box',
@@ -115,7 +116,7 @@ interface DetailsAttributeComponentProps {
 
 export const DetailsAttributeComponent = withStyles(styles)(
     (props: DetailsAttributeDataProps & WithStyles<CssRules> & DetailsAttributeComponentProps) =>
-        <Typography component="div" className={props.classes.attribute}>
+        <Typography component="div" className={props.classes.attribute} data-cy={`details-panel-${props.label.toLowerCase()}`}>
             <Typography component="div" className={classnames([props.classes.label, props.classLabel])}>{props.label}</Typography>
             <Typography
                 onClick={props.onValueClick}
@@ -132,4 +133,3 @@ export const DetailsAttributeComponent = withStyles(styles)(
                 </Tooltip>}
             </Typography>
         </Typography>);
-
index 8ec4c59b8781e2b4dc63d04b62bff4f955a815a9..1ba88d25b221ce4ea5567401ebff3f7e4ed2e6d7 100644 (file)
@@ -77,6 +77,7 @@ import NotInterested from "@material-ui/icons/NotInterested";
 import Image from "@material-ui/icons/Image";
 import Stop from "@material-ui/icons/Stop";
 import FileCopy from "@material-ui/icons/FileCopy";
+import ShowChart from "@material-ui/icons/ShowChart";
 
 // Import FontAwesome icons
 import { library } from "@fortawesome/fontawesome-svg-core";
@@ -171,6 +172,13 @@ export const TerminalIcon: IconType = (props: any) => (
     </SvgIcon>
 )
 
+//https://pictogrammers.com/library/mdi/icon/chevron-double-right/
+export const DoubleRightArrows: IconType = (props: any) => (
+    <SvgIcon {...props}>
+        <path d="M5.59,7.41L7,6L13,12L7,18L5.59,16.59L10.17,12L5.59,7.41M11.59,7.41L13,6L19,12L13,18L11.59,16.59L16.17,12L11.59,7.41Z" />
+    </SvgIcon>
+)
+
 export type IconType = React.SFC<{ className?: string; style?: object }>;
 
 export const AddIcon: IconType = props => <Add {...props} />;
@@ -267,3 +275,4 @@ export const StartIcon: IconType = props => <PlayArrow {...props} />;
 export const StopIcon: IconType = props => <Stop {...props} />;
 export const SelectAllIcon: IconType = props => <CheckboxMultipleOutline {...props} />;
 export const SelectNoneIcon: IconType = props => <CheckboxMultipleBlankOutline {...props} />;
+export const ShowChartIcon: IconType = props => <ShowChart {...props} />;
diff --git a/services/workbench2/src/components/loading/inline-pulser.tsx b/services/workbench2/src/components/loading/inline-pulser.tsx
new file mode 100644 (file)
index 0000000..def6b5e
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import React from 'react';
+import { ThreeDots } from 'react-loader-spinner'
+import { withTheme } from '@material-ui/core';
+import { ArvadosTheme } from 'common/custom-theme';
+
+type ThemeProps = {
+    theme: ArvadosTheme;
+};
+
+type Props = {
+    color?: string;
+    height?: number;
+    width?: number;
+    radius?: number;
+};
+
+export const InlinePulser = withTheme()((props: Props & ThemeProps) => (
+    <ThreeDots
+        visible={true}
+        height={props.height || "30"}
+        width={props.width || "30"}
+        color={props.color || props.theme.customs.colors.greyL}
+        radius={props.radius || "10"}
+        ariaLabel="three-dots-loading"
+    />
+));
index 203748d5e0b2c73ff6241b100718f2c01f5e68b2..7e0ca8fd1ffaff3feeb6841c451c969c40a1d28e 100644 (file)
@@ -56,12 +56,12 @@ interface MPVHideablePanelActionProps {
 
 type MPVHideablePanelProps = MPVHideablePanelDataProps & MPVHideablePanelActionProps;
 
-const MPVHideablePanel = ({doHidePanel, doMaximizePanel, doUnMaximizePanel, name, visible, maximized, illuminated, ...props}: MPVHideablePanelProps) =>
+const MPVHideablePanel = ({ doHidePanel, doMaximizePanel, doUnMaximizePanel, name, visible, maximized, illuminated, ...props }: MPVHideablePanelProps) =>
     visible
-    ? <>
-        {React.cloneElement((props.children as ReactElement), { doHidePanel, doMaximizePanel, doUnMaximizePanel, panelName: name, panelMaximized: maximized, panelIlluminated: illuminated, panelRef: props.panelRef })}
-    </>
-    : null;
+        ? <>
+            {React.cloneElement((props.children as ReactElement), { doHidePanel, doMaximizePanel, doUnMaximizePanel, panelName: name, panelMaximized: maximized, panelIlluminated: illuminated, panelRef: props.panelRef })}
+        </>
+        : null;
 
 interface MPVPanelDataProps {
     panelName?: string;
@@ -82,15 +82,15 @@ interface MPVPanelActionProps {
 // Props received by panel implementors
 export type MPVPanelProps = MPVPanelDataProps & MPVPanelActionProps;
 
-type MPVPanelContentProps = {children: ReactElement} & MPVPanelProps & GridProps;
+type MPVPanelContentProps = { children: ReactElement } & MPVPanelProps & GridProps;
 
 // Grid item compatible component for layout and MPV props passing
-export const MPVPanelContent = ({doHidePanel, doMaximizePanel, doUnMaximizePanel, panelName,
+export const MPVPanelContent = ({ doHidePanel, doMaximizePanel, doUnMaximizePanel, panelName,
     panelMaximized, panelIlluminated, panelRef, forwardProps, maxHeight, minHeight,
-    ...props}: MPVPanelContentProps) => {
+    ...props }: MPVPanelContentProps) => {
     useEffect(() => {
         if (panelRef && panelRef.current) {
-            panelRef.current.scrollIntoView({alignToTop: true});
+            panelRef.current.scrollIntoView({ alignToTop: true });
         }
     }, [panelRef]);
 
@@ -98,12 +98,12 @@ export const MPVPanelContent = ({doHidePanel, doMaximizePanel, doUnMaximizePanel
         ? '100%'
         : maxHeight;
 
-    return <Grid item style={{maxHeight: maxH, minHeight}} {...props}>
+    return <Grid item style={{ maxHeight: maxH, minHeight }} {...props}>
         <span ref={panelRef} /> {/* Element to scroll to when the panel is selected */}
-        <Paper style={{height: '100%'}} elevation={panelIlluminated ? 8 : 0}>
-            { forwardProps
+        <Paper style={{ height: '100%' }} elevation={panelIlluminated ? 8 : 0}>
+            {forwardProps
                 ? React.cloneElement(props.children, { doHidePanel, doMaximizePanel, doUnMaximizePanel, panelName, panelMaximized })
-                : props.children }
+                : props.children}
         </Paper>
     </Grid>;
 }
@@ -118,7 +118,7 @@ interface MPVContainerDataProps {
 type MPVContainerProps = MPVContainerDataProps & GridProps;
 
 // Grid container compatible component that also handles panel toggling.
-const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVContainerProps & WithStyles<CssRules>) => {
+const MPVContainerComponent = ({ children, panelStates, classes, ...props }: MPVContainerProps & WithStyles<CssRules>) => {
     if (children === undefined || children === null || children === {}) {
         children = [];
     } else if (!isArray(children)) {
@@ -126,8 +126,8 @@ const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVCo
     }
     const initialVisibility = (children as ReactNodeArray).map((_, idx) =>
         !panelStates || // if panelStates wasn't passed, default to all visible panels
-            (panelStates[idx] &&
-                (panelStates[idx].visible || panelStates[idx].visible === undefined)));
+        (panelStates[idx] &&
+            (panelStates[idx].visible || panelStates[idx].visible === undefined)));
     const [panelVisibility, setPanelVisibility] = useState<boolean[]>(initialVisibility);
     const [previousPanelVisibility, setPreviousPanelVisibility] = useState<boolean[]>(initialVisibility);
     const [highlightedPanel, setHighlightedPanel] = useState<number>(-1);
@@ -144,7 +144,7 @@ const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVCo
                 setPanelVisibility([
                     ...panelVisibility.slice(0, idx),
                     true,
-                    ...panelVisibility.slice(idx+1)
+                    ...panelVisibility.slice(idx + 1)
                 ]);
                 setSelectedPanel(idx);
             };
@@ -153,7 +153,7 @@ const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVCo
                 setPanelVisibility([
                     ...panelVisibility.slice(0, idx),
                     false,
-                    ...panelVisibility.slice(idx+1)
+                    ...panelVisibility.slice(idx + 1)
                 ])
             };
             const maximizeFn = (idx: number) => () => {
@@ -162,7 +162,7 @@ const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVCo
                 setPanelVisibility([
                     ...panelVisibility.slice(0, idx).map(() => false),
                     true,
-                    ...panelVisibility.slice(idx+1).map(() => false),
+                    ...panelVisibility.slice(idx + 1).map(() => false),
                 ]);
             };
             const unMaximizeFn = (idx: number) => () => {
@@ -170,14 +170,14 @@ const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVCo
                 setSelectedPanel(idx);
             }
             const panelName = panelStates === undefined
-                ? `Panel ${idx+1}`
-                : (panelStates[idx] && panelStates[idx].name) || `Panel ${idx+1}`;
+                ? `Panel ${idx + 1}`
+                : (panelStates[idx] && panelStates[idx].name) || `Panel ${idx + 1}`;
             const btnVariant = panelVisibility[idx]
                 ? "contained"
                 : "outlined";
             const btnTooltip = panelVisibility[idx]
                 ? ``
-                :`Open ${panelName} panel`;
+                : `Open ${panelName} panel`;
             const panelIsMaximized = panelVisibility[idx] &&
                 panelVisibility.filter(e => e).length === 1;
 
@@ -193,7 +193,7 @@ const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVCo
                             setHighlightedPanel(-1);
                         }}
                         onClick={showFn(idx)}>
-                            {panelName}
+                        {panelName}
                     </Button>
                 </Tooltip>
             ];
@@ -211,15 +211,15 @@ const MPVContainerComponent = ({children, panelStates, classes, ...props}: MPVCo
 
     return <Grid container {...props} className={classes.root}>
         <Grid container item direction="row">
-            { buttons.map((tgl, idx) => <Grid item key={idx}>{tgl}</Grid>) }
+            {buttons.map((tgl, idx) => <Grid item key={idx}>{tgl}</Grid>)}
         </Grid>
         <Grid container item {...props} xs className={classes.content}
             onScroll={() => setSelectedPanel(-1)}>
-            { panelVisibility.includes(true)
+            {panelVisibility.includes(true)
                 ? panels
                 : <Grid container item alignItems='center' justify='center'>
                     <DefaultView messages={["All panels are hidden.", "Click on the buttons above to show them."]} icon={InfoIcon} />
-                </Grid> }
+                </Grid>}
         </Grid>
     </Grid>;
 };
index f92c0dcf4eb6147bd0d645532ca55c29e30d73cd..650059316626632d5ed29b0ef68553eed3433502 100644 (file)
@@ -34,10 +34,11 @@ import { getProcess } from "store/processes/process";
 import { Process } from "store/processes/process";
 import { PublicFavoritesState } from "store/public-favorites/public-favorites-reducer";
 import { isExactlyOneSelected } from "store/multiselect/multiselect-actions";
+import { IntersectionObserverWrapper } from "./ms-toolbar-overflow-wrapper";
 
 const WIDTH_TRANSITION = 150
 
-type CssRules = "root" | "transition" | "button" | "iconContainer";
+type CssRules = "root" | "transition" | "button" | "iconContainer" | "icon";
 
 const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     root: {
@@ -46,39 +47,30 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
         width: 0,
         height: '2.7rem',
         padding: 0,
-        margin: "1rem auto auto 0.5rem",
+        margin: "1rem auto auto 0.3rem",
         transition: `width ${WIDTH_TRANSITION}ms`,
-        overflowY: 'auto',
-        scrollBehavior: 'smooth',
-        '&::-webkit-scrollbar': {
-            width: 0,
-            height: 2
-        },
-        '&::-webkit-scrollbar-track': {
-            width: 0,
-            height: 2
-        },
-        '&::-webkit-scrollbar-thumb': {
-            backgroundColor: '#757575',
-            borderRadius: 2
-        }
+        overflow: 'hidden',
     },
     transition: {
         display: "flex",
         flexDirection: "row",
-        width: 0,
         height: '2.7rem',
         padding: 0,
-        margin: "1rem auto auto 0.5rem",
+        margin: "1rem auto auto 0.3rem",
         overflow: 'hidden',
         transition: `width ${WIDTH_TRANSITION}ms`,
     },
     button: {
         width: "2.5rem",
         height: "2.5rem ",
+        paddingLeft: 0,
+        border: "1px solid transparent",
     },
     iconContainer: {
-        height: '100%'
+        height: '100%',
+    },
+    icon: {
+        marginLeft: '-0.5rem',
     }
 });
 
@@ -130,47 +122,53 @@ export const MultiselectToolbar = connect(
             <React.Fragment>
                 <Toolbar
                     className={isTransitioning ? classes.transition: classes.root}
-                    style={{ width: `${(actions.length * 2.5) + 1}rem` }}
+                    style={{ width: `${(actions.length * 2.5) + 6}rem`}}
                     data-cy='multiselect-toolbar'
                     >
                     {actions.length ? (
-                        actions.map((action, i) =>{
-                            const { hasAlts, useAlts, name, altName, icon, altIcon } = action;
-                        return hasAlts ? (
-                            <Tooltip
-                                className={classes.button}
-                                title={currentPathIsTrash || (useAlts && useAlts(singleSelectedUuid, iconProps)) ? altName : name}
-                                key={i}
-                                disableFocusListener
-                            >
-                                <span className={classes.iconContainer}>
-                                    <IconButton
-                                        data-cy='multiselect-button'
-                                        disabled={disabledButtons.has(name)}
-                                        onClick={() => props.executeMulti(action, checkedList, iconProps.resources)}
+                        <IntersectionObserverWrapper menuLength={actions.length}>
+                            {actions.map((action, i) =>{
+                                const { hasAlts, useAlts, name, altName, icon, altIcon } = action;
+                            return hasAlts ? (
+                                <Tooltip
+                                    className={classes.button}
+                                    data-targetid={name}
+                                    title={currentPathIsTrash || (useAlts && useAlts(singleSelectedUuid, iconProps)) ? altName : name}
+                                    key={i}
+                                    disableFocusListener
                                     >
-                                        {currentPathIsTrash || (useAlts && useAlts(singleSelectedUuid, iconProps)) ? altIcon && altIcon({}) : icon({})}
-                                    </IconButton>
-                                </span>
-                            </Tooltip>
-                        ) : (
-                            <Tooltip
-                                className={classes.button}
-                                title={action.name}
-                                key={i}
-                                disableFocusListener
-                            >
-                                <span className={classes.iconContainer}>
-                                    <IconButton
-                                        data-cy='multiselect-button'
-                                        onClick={() => props.executeMulti(action, checkedList, iconProps.resources)}
+                                    <span className={classes.iconContainer}>
+                                        <IconButton
+                                            data-cy='multiselect-button'
+                                            disabled={disabledButtons.has(name)}
+                                            onClick={() => props.executeMulti(action, checkedList, iconProps.resources)}
+                                            className={classes.icon}
+                                        >
+                                            {currentPathIsTrash || (useAlts && useAlts(singleSelectedUuid, iconProps)) ? altIcon && altIcon({}) : icon({})}
+                                        </IconButton>
+                                    </span>
+                                </Tooltip>
+                            ) : (
+                                <Tooltip
+                                    className={classes.button}
+                                    data-targetid={name}
+                                    title={action.name}
+                                    key={i}
+                                    disableFocusListener
                                     >
-                                        {action.icon({})}
-                                    </IconButton>
-                                </span>
-                            </Tooltip>
-                        );
-                        })
+                                    <span className={classes.iconContainer}>
+                                        <IconButton
+                                            data-cy='multiselect-button'
+                                            onClick={() => props.executeMulti(action, checkedList, iconProps.resources)}
+                                            className={classes.icon}
+                                        >
+                                            {action.icon({})}
+                                        </IconButton>
+                                    </span>
+                                </Tooltip>
+                            );
+                            })}
+                        </IntersectionObserverWrapper>
                     ) : (
                         <></>
                     )}
diff --git a/services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-menu.tsx b/services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-menu.tsx
new file mode 100644 (file)
index 0000000..9f8ced9
--- /dev/null
@@ -0,0 +1,104 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import React, { useState, useMemo, ReactElement, JSXElementConstructor } from 'react';
+import { DoubleRightArrows } from 'components/icon/icon';
+import classnames from 'classnames';
+import { IconButton, Menu, MenuItem, StyleRulesCallback, Tooltip, WithStyles, withStyles } from '@material-ui/core';
+import { ArvadosTheme } from 'common/custom-theme';
+
+type CssRules = 'inOverflowMenu' | 'openMenuButton' | 'menu' | 'menuItem' | 'menuElement';
+
+const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
+    inOverflowMenu: {
+        '&:hover': {
+            backgroundColor: 'transparent',
+        },
+    },
+    openMenuButton: {
+        right: '10px',
+    },
+    menu: {
+        marginLeft: 0,
+    },
+    menuItem: {
+        '&:hover': {
+            backgroundColor: 'white',
+        },
+        marginTop: 0,
+        paddingTop: 0,
+        paddingLeft: '1rem',
+        height: '2.5rem',
+    },
+    menuElement: {
+        width: '2rem',
+    }
+});
+
+export type OverflowChild = ReactElement<{ className: string; }, string | JSXElementConstructor<any>>
+
+type OverflowMenuProps = {
+    children: OverflowChild[]
+    className: string
+    visibilityMap: {}
+}
+
+export const OverflowMenu = withStyles(styles)((props: OverflowMenuProps & WithStyles<CssRules>) => {
+    const { children, className, visibilityMap, classes } = props;
+    const [anchorEl, setAnchorEl] = useState(null);
+    const open = Boolean(anchorEl);
+    const handleClick = (event) => {
+        setAnchorEl(event.currentTarget);
+    };
+
+    const handleClose = () => {
+        setAnchorEl(null);
+    };
+
+    const shouldShowMenu = useMemo(() => Object.values(visibilityMap).some((v) => v === false), [visibilityMap]);
+    if (!shouldShowMenu) {
+        return null;
+    }
+    return (
+        <div className={className}>
+            <Tooltip title="More Options" disableFocusListener>
+                <IconButton
+                    aria-label='more'
+                    aria-controls='long-menu'
+                    aria-haspopup='true'
+                    onClick={handleClick}
+                    className={classes.openMenuButton}
+                >
+                        <DoubleRightArrows />
+                </IconButton>
+            </Tooltip>
+            <Menu
+                id='long-menu'
+                anchorEl={anchorEl}
+                keepMounted
+                open={open}
+                onClose={handleClose}
+                disableAutoFocusItem
+                className={classes.menu}
+            >
+                {React.Children.map(children, (child: any) => {
+                    if (!visibilityMap[child.props['data-targetid']]) {
+                        return (
+                            <MenuItem
+                                key={child}
+                                onClick={handleClose}
+                                className={classes.menuItem}
+                            >
+                                {React.cloneElement(child, {
+                                    className: classnames(classes.menuElement),
+                                })}
+                            </MenuItem>
+                        );
+                    }
+                    return null;
+                })}
+            </Menu>
+        </div>
+    );
+});
diff --git a/services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-wrapper.tsx b/services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-wrapper.tsx
new file mode 100644 (file)
index 0000000..32f977e
--- /dev/null
@@ -0,0 +1,135 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import React, { useState, useRef, useEffect } from 'react';
+import { StyleRulesCallback, WithStyles, withStyles } from '@material-ui/core';
+import classnames from 'classnames';
+import { ArvadosTheme } from 'common/custom-theme';
+import { OverflowMenu, OverflowChild } from './ms-toolbar-overflow-menu';
+
+type CssRules = 'visible' | 'inVisible' | 'toolbarWrapper' | 'overflowStyle';
+
+const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
+    visible: {
+        order: 0,
+        visibility: 'visible',
+        opacity: 1,
+    },
+    inVisible: {
+        order: 100,
+        visibility: 'hidden',
+        pointerEvents: 'none',
+    },
+    toolbarWrapper: {
+        display: 'flex',
+        overflow: 'hidden',
+        padding: '0 0px 0 20px',
+        width: '100%',
+    },
+    overflowStyle: {
+        order: 99,
+        position: 'sticky',
+        right: '-2rem',
+        width: 0,
+    },
+});
+
+type WrapperProps = {
+    children: OverflowChild[];
+    menuLength: number;
+};
+
+export const IntersectionObserverWrapper = withStyles(styles)((props: WrapperProps & WithStyles<CssRules>) => {
+    const { classes, children, menuLength } = props;
+    const lastEntryId = (children[menuLength - 1] as any).props['data-targetid'];
+    const navRef = useRef<any>(null);
+    const [visibilityMap, setVisibilityMap] = useState<Record<string, boolean>>({});
+    const [numHidden, setNumHidden] = useState(() => findNumHidden(visibilityMap));
+
+    const prevNumHidden = useRef(numHidden);
+
+    const handleIntersection = (entries) => {
+        const updatedEntries: Record<string, boolean> = {};
+        entries.forEach((entry) => {
+            const targetid = entry.target.dataset.targetid as string;
+            //if true, the element is visible
+            if (entry.isIntersecting) {
+                updatedEntries[targetid] = true;
+            } else {
+                updatedEntries[targetid] = false;
+            }
+        });
+
+        setVisibilityMap((prev) => ({
+            ...prev,
+            ...updatedEntries,
+            [lastEntryId]: Object.keys(updatedEntries)[0] === lastEntryId,
+        }));
+    };
+
+    //ensures that the last element is always visible if the second to last is visible
+    useEffect(() => {
+        if ((prevNumHidden.current > 1 || prevNumHidden.current === 0) && numHidden === 1) {
+            setVisibilityMap((prev) => ({
+                ...prev,
+                [lastEntryId]: true,
+            }));
+        }
+        prevNumHidden.current = numHidden;
+    }, [numHidden, lastEntryId]);
+
+    useEffect(() => {
+        setNumHidden(findNumHidden(visibilityMap));
+    }, [visibilityMap]);
+
+    useEffect((): any => {
+        setVisibilityMap({});
+        const observer = new IntersectionObserver(handleIntersection, {
+            root: navRef.current,
+            rootMargin: '0px -30px 0px 0px',
+            threshold: 1,
+        });
+        // We are adding observers to child elements of the container div
+        // with ref as navRef. Notice that we are adding observers
+        // only if we have the data attribute targetid on the child element
+        if (navRef.current)
+            Array.from(navRef.current.children).forEach((item: any) => {
+                if (item.dataset.targetid) {
+                    observer.observe(item);
+                }
+            });
+        return () => {
+            observer.disconnect();
+        };
+        // eslint-disable-next-line
+    }, [menuLength]);
+
+    function findNumHidden(visMap: {}) {
+        return Object.values(visMap).filter((x) => x === false).length;
+    }
+
+    return (
+        <div
+            className={classes.toolbarWrapper}
+            ref={navRef}
+        >
+            {React.Children.map(children, (child) => {
+                return React.cloneElement(child, {
+                    className: classnames(child.props.className, {
+                        [classes.visible]: !!visibilityMap[child.props['data-targetid']],
+                        [classes.inVisible]: !visibilityMap[child.props['data-targetid']],
+                    }),
+                });
+            })}
+            {numHidden >= 2 && (
+                <OverflowMenu
+                    visibilityMap={visibilityMap}
+                    className={classes.overflowStyle}
+                >
+                    {children}
+                </OverflowMenu>
+            )}
+        </div>
+    );
+});
index fbb4f599b60690a5d5e7723f2a1301f25a513487..6d98aed28e3992e334ad08bbc7fc0b0b88bc77c0 100644 (file)
@@ -76,7 +76,7 @@ export const SearchInput = (props: SearchInputProps) => {
     };
 
     return <form onSubmit={handleSubmit}>
-        <FormControl>
+        <FormControl style={{ width: '14rem'}}>
             <InputLabel>{label}</InputLabel>
             <Input
                 type="text"
index b21d879185ace8f4e2c4f66fc5da653d90438ed6..bf932cdddc7ec99d58d9cc081667c26797a4a603 100644 (file)
@@ -12,7 +12,7 @@ import { Dispatch } from "redux";
 import { fetchSubprocessProgress } from "store/subprocess-panel/subprocess-panel-actions";
 import { ProcessStatusFilter } from "store/resource-type-filters/resource-type-filters";
 
-type CssRules = 'progressWrapper' | 'progressStacked' ;
+type CssRules = 'progressWrapper' | 'progressStacked';
 
 const styles: StyleRulesCallback<CssRules> = (theme) => ({
     progressWrapper: {
@@ -55,9 +55,9 @@ const mapDispatchToProps = (dispatch: Dispatch): ProgressBarActionProps => ({
 });
 
 export const SubprocessProgressBar = connect(null, mapDispatchToProps)(withStyles(styles)(
-    ({process, classes, fetchSubprocessProgress}: ProgressBarProps) => {
+    ({ process, classes, fetchSubprocessProgress }: ProgressBarProps) => {
 
-        const [progressData, setProgressData] = useState<ProgressBarData|undefined>(undefined);
+        const [progressData, setProgressData] = useState<ProgressBarData | undefined>(undefined);
         const requestingContainerUuid = process.containerRequest.containerUuid;
         const isRunning = isProcessRunning(process);
 
@@ -72,25 +72,38 @@ export const SubprocessProgressBar = connect(null, mapDispatchToProps)(withStyle
             }
         }, [fetchSubprocessProgress, isRunning, requestingContainerUuid]);
 
+        let tooltip = "";
+        if (progressData) {
+            let total = 0;
+            [ProcessStatusFilter.COMPLETED,
+            ProcessStatusFilter.RUNNING,
+            ProcessStatusFilter.FAILED,
+            ProcessStatusFilter.QUEUED].forEach(psf => {
+                if (progressData[psf] > 0) {
+                    if (tooltip.length > 0) { tooltip += ", "; }
+                    tooltip += `${progressData[psf]} ${psf}`;
+                    total += progressData[psf];
+                }
+            });
+            if (total > 0) {
+                if (tooltip.length > 0) { tooltip += ", "; }
+                tooltip += `${total} Total`;
+            }
+        }
+
         return progressData !== undefined && getStatusTotal(progressData) > 0 ? <div className={classes.progressWrapper}>
-            <CProgressStacked className={classes.progressStacked}>
-                <Tooltip title={`${progressData[ProcessStatusFilter.COMPLETED]} Completed`}>
+            <Tooltip title={tooltip}>
+                <CProgressStacked className={classes.progressStacked}>
                     <CProgress height={10} color="success"
                         value={getStatusPercent(progressData, ProcessStatusFilter.COMPLETED)} />
-                </Tooltip>
-                <Tooltip title={`${progressData[ProcessStatusFilter.RUNNING]} Running`}>
                     <CProgress height={10} color="success" variant="striped"
                         value={getStatusPercent(progressData, ProcessStatusFilter.RUNNING)} />
-                </Tooltip>
-                <Tooltip title={`${progressData[ProcessStatusFilter.FAILED]} Failed`}>
                     <CProgress height={10} color="danger"
                         value={getStatusPercent(progressData, ProcessStatusFilter.FAILED)} />
-                </Tooltip>
-                <Tooltip title={`${progressData[ProcessStatusFilter.QUEUED]} Queued`}>
                     <CProgress height={10} color="secondary" variant="striped"
                         value={getStatusPercent(progressData, ProcessStatusFilter.QUEUED)} />
-                </Tooltip>
-            </CProgressStacked>
+                </CProgressStacked>
+            </Tooltip>
         </div> : <></>;
     }
 ));
index 0932b3c95e63665e3cbe68e1c416a12e8803a470..078e2a27fa0a98bf3ef231630a1ad22b36d18a53 100644 (file)
@@ -18,6 +18,8 @@ export interface GroupResource extends TrashableResource, ResourceWithProperties
     ensure_unique_name: boolean;
     canWrite: boolean;
     canManage: boolean;
+    // Optional local-only field, undefined for not loaded, null for failed to load
+    memberCount?: number | null;
 }
 
 export enum GroupClass {
index 955d9689afc7f02eb471d0e965ad376cc9af16a4..079cf11e71f6997db4f6c04c1114a0dc3b11d6dc 100644 (file)
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-import { DataExplorerMiddlewareService, dataExplorerToListParams, getDataExplorerColumnFilters, getOrder } from "store/data-explorer/data-explorer-middleware-service";
+import { getDataExplorerColumnFilters } from "store/data-explorer/data-explorer-middleware-service";
 import { RootState } from "../store";
 import { ServiceRepository } from "services/services";
-import { FilterBuilder, joinFilters } from "services/api/filter-builder";
+import { joinFilters } from "services/api/filter-builder";
 import { allProcessesPanelActions } from "./all-processes-panel-action";
 import { Dispatch, MiddlewareAPI } from "redux";
-import { resourcesActions } from "store/resources/resources-actions";
-import { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';
-import { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';
-import { getDataExplorer, DataExplorer } from "store/data-explorer/data-explorer-reducer";
-import { loadMissingProcessesInformation } from "store/project-panel/project-panel-middleware-service";
+import { DataExplorer } from "store/data-explorer/data-explorer-reducer";
 import { DataColumns } from "components/data-table/data-table";
 import {
-    ProcessStatusFilter,
-    buildProcessStatusFilters,
     serializeOnlyProcessTypeFilters
 } from "../resource-type-filters/resource-type-filters";
 import { AllProcessesPanelColumnNames } from "views/all-processes-panel/all-processes-panel";
-import { containerRequestFieldsNoMounts, ContainerRequestResource } from "models/container-request";
+import { ProcessesMiddlewareService } from "store/processes/processes-middleware-service";
+import { ContainerRequestResource } from 'models/container-request';
 
-export class AllProcessesPanelMiddlewareService extends DataExplorerMiddlewareService {
-    constructor(private services: ServiceRepository, id: string) {
-        super(id);
+export class AllProcessesPanelMiddlewareService extends ProcessesMiddlewareService {
+    constructor(services: ServiceRepository, id: string) {
+        super(services, allProcessesPanelActions, id);
     }
 
-    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {
-        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());
-        if (!dataExplorer) {
-            api.dispatch(allProcessesPanelDataExplorerIsNotSet());
-        } else {
-            try {
-                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }
-                const processItems = await this.services.containerRequestService.list(
-                    {
-                        ...getParams(dataExplorer),
-                        // Omit mounts when viewing all process panel
-                        select: containerRequestFieldsNoMounts,
-                    });
+    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {
+        const sup = super.getFilters(api, dataExplorer);
+        if (sup === null) { return null; }
+        const columns = dataExplorer.columns as DataColumns<string, ContainerRequestResource>;
 
-                if (!background) { api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId())); }
-                api.dispatch(resourcesActions.SET_RESOURCES(processItems.items));
-                await api.dispatch<any>(loadMissingProcessesInformation(processItems.items));
-                api.dispatch(allProcessesPanelActions.SET_ITEMS({
-                    items: processItems.items.map((resource: any) => resource.uuid),
-                    itemsAvailable: processItems.itemsAvailable,
-                    page: Math.floor(processItems.offset / processItems.limit),
-                    rowsPerPage: processItems.limit
-                }));
-            } catch {
-                if (!background) { api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId())); }
-                api.dispatch(allProcessesPanelActions.SET_ITEMS({
-                    items: [],
-                    itemsAvailable: 0,
-                    page: 0,
-                    rowsPerPage: dataExplorer.rowsPerPage
-                }));
-                api.dispatch(couldNotFetchAllProcessesListing());
-            }
-        }
+        const typeFilters = serializeOnlyProcessTypeFilters(getDataExplorerColumnFilters(columns, AllProcessesPanelColumnNames.TYPE));
+        return joinFilters(sup, typeFilters);
     }
 }
-
-const getParams = (dataExplorer: DataExplorer) => ({
-    ...dataExplorerToListParams(dataExplorer),
-    order: getOrder<ContainerRequestResource>(dataExplorer),
-    filters: getFilters(dataExplorer)
-});
-
-const getFilters = (dataExplorer: DataExplorer) => {
-    const columns = dataExplorer.columns as DataColumns<string, ContainerRequestResource>;
-    const statusColumnFilters = getDataExplorerColumnFilters(columns, 'Status');
-    const activeStatusFilter = Object.keys(statusColumnFilters).find(
-        filterName => statusColumnFilters[filterName].selected
-    ) || ProcessStatusFilter.ALL;
-
-    const nameFilter = new FilterBuilder().addILike("name", dataExplorer.searchValue).getFilters();
-    const statusFilter = buildProcessStatusFilters(new FilterBuilder(), activeStatusFilter).getFilters();
-    const typeFilters = serializeOnlyProcessTypeFilters(getDataExplorerColumnFilters(columns, AllProcessesPanelColumnNames.TYPE));
-
-    return joinFilters(
-        nameFilter,
-        statusFilter,
-        typeFilters
-    );
-};
-
-const allProcessesPanelDataExplorerIsNotSet = () =>
-    snackbarActions.OPEN_SNACKBAR({
-        message: 'All Processes panel is not ready.',
-        kind: SnackbarKind.ERROR
-    });
-
-const couldNotFetchAllProcessesListing = () =>
-    snackbarActions.OPEN_SNACKBAR({
-        message: 'Could not fetch All Processes listing.',
-        kind: SnackbarKind.ERROR
-    });
index ea050e609f558a91decb73ac7badd65fe18f7d3f..a330b97426e332dd1663942dd22cc89b0046cf12 100644 (file)
@@ -29,6 +29,7 @@ export const dataExplorerActions = unionize({
     SET_EXPLORER_SEARCH_VALUE: ofType<{ id: string; searchValue: string }>(),
     RESET_EXPLORER_SEARCH_VALUE: ofType<{ id: string }>(),
     SET_REQUEST_STATE: ofType<{ id: string; requestState: DataTableRequestState }>(),
+    SET_IS_NOT_FOUND: ofType<{ id: string; isNotFound: boolean }>(),
 });
 
 export type DataExplorerAction = UnionOf<typeof dataExplorerActions>;
@@ -51,4 +52,7 @@ export const bindDataExplorerActions = (id: string) => ({
     SET_EXPLORER_SEARCH_VALUE: (payload: { searchValue: string }) => dataExplorerActions.SET_EXPLORER_SEARCH_VALUE({ ...payload, id }),
     RESET_EXPLORER_SEARCH_VALUE: () => dataExplorerActions.RESET_EXPLORER_SEARCH_VALUE({ id }),
     SET_REQUEST_STATE: (payload: { requestState: DataTableRequestState }) => dataExplorerActions.SET_REQUEST_STATE({ ...payload, id }),
+    SET_IS_NOT_FOUND: (payload: { isNotFound: boolean }) => dataExplorerActions.SET_IS_NOT_FOUND({ ...payload, id }),
 });
+
+export type BoundDataExplorerActions = ReturnType<typeof bindDataExplorerActions>;
index a0a7eb6400b1160f0702d2e4243b94912c85bfa1..2bc8caad365cbaa14232d5eba7536454bc9b609a 100644 (file)
@@ -30,6 +30,7 @@ export interface DataExplorer {
     searchValue: string;
     working?: boolean;
     requestState: DataTableRequestState;
+    isNotFound: boolean;
 }
 
 export const initialDataExplorer: DataExplorer = {
@@ -42,6 +43,7 @@ export const initialDataExplorer: DataExplorer = {
     rowsPerPageOptions: [10, 20, 50, 100, 200, 500],
     searchValue: '',
     requestState: DataTableRequestState.IDLE,
+    isNotFound: false,
 };
 
 export type DataExplorerState = Record<string, DataExplorer>;
@@ -119,6 +121,9 @@ export const dataExplorerReducer = (
         TOGGLE_COLUMN: ({ id, columnName }) =>
             update(state, id, mapColumns(toggleColumn(columnName))),
 
+        SET_IS_NOT_FOUND: ({ id, isNotFound }) =>
+            update(state, id, (explorer) => ({ ...explorer, isNotFound })),
+
         default: () => state,
     });
 };
index 0229834c3bc148ee6c48e64fdca945e11716b80c..a15690b3f4569fb6ac4e50389606e16c800e1d51 100644 (file)
@@ -77,7 +77,6 @@ export class FavoritePanelMiddlewareService extends DataExplorerMiddlewareServic
                     response.items.push(it);
                 });
 
-                api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId()));
                 api.dispatch(resourcesActions.SET_RESOURCES(response.items));
                 await api.dispatch<any>(loadMissingProcessesInformation(response.items));
                 api.dispatch(favoritePanelActions.SET_ITEMS({
@@ -87,8 +86,8 @@ export class FavoritePanelMiddlewareService extends DataExplorerMiddlewareServic
                     rowsPerPage: response.limit
                 }));
                 api.dispatch<any>(updateFavorites(response.items.map((item: any) => item.uuid)));
-            } catch (e) {
                 api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId()));
+            } catch (e) {
                 api.dispatch(favoritePanelActions.SET_ITEMS({
                     items: [],
                     itemsAvailable: 0,
@@ -96,6 +95,7 @@ export class FavoritePanelMiddlewareService extends DataExplorerMiddlewareServic
                     rowsPerPage: dataExplorer.rowsPerPage
                 }));
                 api.dispatch(couldNotFetchFavoritesContents());
+                api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId()));
             }
         }
     }
diff --git a/services/workbench2/src/store/group-details-panel/group-details-panel-members-middleware-service.test.js b/services/workbench2/src/store/group-details-panel/group-details-panel-members-middleware-service.test.js
new file mode 100644 (file)
index 0000000..d386ed3
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import { initialDataExplorer } from '../data-explorer/data-explorer-reducer'
+import { getParams } from './group-details-panel-members-middleware-service'
+
+describe('group-details-panel-members-middleware', () => {
+    describe('getParams', () => {
+        it('should paginate', () => {
+            // given
+            const dataExplorer = initialDataExplorer;
+            let params = getParams(dataExplorer, 'uuid');
+
+            // expect
+            expect(params.offset).toBe(0);
+            expect(params.limit).toBe(50);
+
+            // when
+            dataExplorer.page = 1;
+            params = getParams(dataExplorer, 'uuid');
+
+            // expect
+            expect(params.offset).toBe(50);
+            expect(params.limit).toBe(50);
+        });
+    })
+})
index 507b4eb30fb2aaa1fdd5d38add18f7b56b03c32a..6f95ca4ee231acecd177083aab7992866d852063 100644 (file)
@@ -3,11 +3,11 @@
 // SPDX-License-Identifier: AGPL-3.0
 
 import { Dispatch, MiddlewareAPI } from "redux";
-import { DataExplorerMiddlewareService, listResultsToDataExplorerItemsMeta } from "store/data-explorer/data-explorer-middleware-service";
+import { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from "store/data-explorer/data-explorer-middleware-service";
 import { RootState } from "store/store";
 import { ServiceRepository } from "services/services";
 import { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';
-import { getDataExplorer } from "store/data-explorer/data-explorer-reducer";
+import { DataExplorer, getDataExplorer } from "store/data-explorer/data-explorer-reducer";
 import { FilterBuilder } from 'services/api/filter-builder';
 import { updateResources } from 'store/resources/resources-actions';
 import { getCurrentGroupDetailsPanelUuid, GroupMembersPanelActions } from 'store/group-details-panel/group-details-panel-actions';
@@ -33,12 +33,7 @@ export class GroupDetailsPanelMembersMiddlewareService extends DataExplorerMiddl
                 const groupResource = await this.services.groupsService.get(groupUuid);
                 api.dispatch(updateResources([groupResource]));
 
-                const permissionsIn = await this.services.permissionService.list({
-                    filters: new FilterBuilder()
-                        .addEqual('head_uuid', groupUuid)
-                        .addEqual('link_class', LinkClass.PERMISSION)
-                        .getFilters()
-                });
+                const permissionsIn = await this.services.permissionService.list(getParams(dataExplorer, groupUuid));
                 api.dispatch(updateResources(permissionsIn.items));
 
                 api.dispatch(GroupMembersPanelActions.SET_ITEMS({
@@ -74,6 +69,20 @@ export class GroupDetailsPanelMembersMiddlewareService extends DataExplorerMiddl
     }
 }
 
+export const getParams = (dataExplorer: DataExplorer, groupUuid: string) => ({
+    ...dataExplorerToListParams(dataExplorer),
+    filters: getFilters(groupUuid),
+});
+
+export const getFilters = (groupUuid: string) => {
+    const filters = new FilterBuilder()
+        .addEqual('head_uuid', groupUuid)
+        .addEqual('link_class', LinkClass.PERMISSION)
+        .getFilters();
+
+    return filters;
+};
+
 const couldNotFetchGroupDetailsContents = () =>
     snackbarActions.OPEN_SNACKBAR({
         message: 'Could not fetch group members.',
diff --git a/services/workbench2/src/store/group-details-panel/group-details-panel-permissions-middleware-service.test.js b/services/workbench2/src/store/group-details-panel/group-details-panel-permissions-middleware-service.test.js
new file mode 100644 (file)
index 0000000..1e5e3b1
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import { initialDataExplorer } from '../data-explorer/data-explorer-reducer'
+import { getParams } from './group-details-panel-permissions-middleware-service'
+
+describe('group-details-panel-permissions-middleware', () => {
+    describe('getParams', () => {
+        it('should paginate', () => {
+            // given
+            const dataExplorer = initialDataExplorer;
+            let params = getParams(dataExplorer, 'uuid');
+
+            // expect
+            expect(params.offset).toBe(0);
+            expect(params.limit).toBe(50);
+
+            // when
+            dataExplorer.page = 1;
+            params = getParams(dataExplorer, 'uuid');
+
+            // expect
+            expect(params.offset).toBe(50);
+            expect(params.limit).toBe(50);
+        });
+    })
+})
index 85beecd726b8ac980a537b5b9ab3db0915e19ae1..c6cb05f87bf9c2e8b21462f8e0154cef29d305c5 100644 (file)
@@ -3,11 +3,11 @@
 // SPDX-License-Identifier: AGPL-3.0
 
 import { Dispatch, MiddlewareAPI } from "redux";
-import { DataExplorerMiddlewareService, listResultsToDataExplorerItemsMeta } from "store/data-explorer/data-explorer-middleware-service";
+import { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from "store/data-explorer/data-explorer-middleware-service";
 import { RootState } from "store/store";
 import { ServiceRepository } from "services/services";
 import { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';
-import { getDataExplorer } from "store/data-explorer/data-explorer-reducer";
+import { DataExplorer, getDataExplorer } from "store/data-explorer/data-explorer-reducer";
 import { FilterBuilder } from 'services/api/filter-builder';
 import { updateResources } from 'store/resources/resources-actions';
 import { getCurrentGroupDetailsPanelUuid, GroupPermissionsPanelActions } from 'store/group-details-panel/group-details-panel-actions';
@@ -27,12 +27,7 @@ export class GroupDetailsPanelPermissionsMiddlewareService extends DataExplorerM
             // No-op if data explorer is not set since refresh may be triggered from elsewhere
         } else {
             try {
-                const permissionsOut = await this.services.permissionService.list({
-                    filters: new FilterBuilder()
-                        .addEqual('tail_uuid', groupUuid)
-                        .addEqual('link_class', LinkClass.PERMISSION)
-                        .getFilters()
-                });
+                const permissionsOut = await this.services.permissionService.list(getParams(dataExplorer, groupUuid));
                 api.dispatch(updateResources(permissionsOut.items));
 
                 api.dispatch(GroupPermissionsPanelActions.SET_ITEMS({
@@ -76,6 +71,20 @@ export class GroupDetailsPanelPermissionsMiddlewareService extends DataExplorerM
     }
 }
 
+export const getParams = (dataExplorer: DataExplorer, groupUuid: string) => ({
+    ...dataExplorerToListParams(dataExplorer),
+    filters: getFilters(groupUuid),
+});
+
+export const getFilters = (groupUuid: string) => {
+    const filters = new FilterBuilder()
+        .addEqual('tail_uuid', groupUuid)
+        .addEqual('link_class', LinkClass.PERMISSION)
+        .getFilters();
+
+    return filters;
+};
+
 const couldNotFetchGroupDetailsContents = () =>
     snackbarActions.OPEN_SNACKBAR({
         message: 'Could not fetch group permissions.',
diff --git a/services/workbench2/src/store/groups-panel/groups-panel-middleware-service.test.ts b/services/workbench2/src/store/groups-panel/groups-panel-middleware-service.test.ts
new file mode 100644 (file)
index 0000000..42d88a9
--- /dev/null
@@ -0,0 +1,160 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import Axios, { AxiosInstance, AxiosResponse } from "axios";
+import { mockConfig } from "common/config";
+import { createBrowserHistory } from "history";
+import { GroupsPanelMiddlewareService } from "./groups-panel-middleware-service";
+import { dataExplorerMiddleware } from "store/data-explorer/data-explorer-middleware";
+import { Dispatch, MiddlewareAPI } from "redux";
+import { DataColumns } from "components/data-table/data-table";
+import { dataExplorerActions } from "store/data-explorer/data-explorer-action";
+import { SortDirection } from "components/data-table/data-column";
+import { createTree } from 'models/tree';
+import { DataTableFilterItem } from "components/data-table-filters/data-table-filters-tree";
+import { GROUPS_PANEL_ID } from "./groups-panel-actions";
+import { RootState, RootStore, configureStore } from "store/store";
+import { ServiceRepository, createServices } from "services/services";
+import { ApiActions } from "services/api/api-actions";
+import { ListResults } from "services/common-service/common-service";
+import { GroupResource } from "models/group";
+import { getResource } from "store/resources/resources";
+
+describe("GroupsPanelMiddlewareService", () => {
+    let axiosInst: AxiosInstance;
+    let store: RootStore;
+    let services: ServiceRepository;
+    const config: any = {};
+    const actions: ApiActions = {
+        progressFn: (id: string, working: boolean) => { },
+        errorFn: (id: string, message: string) => { }
+    };
+
+    beforeEach(() => {
+        axiosInst = Axios.create({ headers: {} });
+        services = createServices(mockConfig({}), actions, axiosInst);
+        store = configureStore(createBrowserHistory(), services, config);
+    });
+
+    it("requests group member counts and updates resource store", async () => {
+        // Given
+        const fakeUuid = "zzzzz-j7d0g-000000000000000";
+        axiosInst.get = jest.fn((url: string) => {
+            if (url === '/groups') {
+                return Promise.resolve(
+                    { data: {
+                        kind: "",
+                        offset: 0,
+                        limit: 100,
+                        items: [{
+                            can_manage: true,
+                            can_write: true,
+                            created_at: "2023-11-15T20:57:01.723043000Z",
+                            delete_at: null,
+                            description: null,
+                            etag: "0000000000000000000000000",
+                            frozen_by_uuid: null,
+                            group_class: "role",
+                            href: `/groups/${fakeUuid}`,
+                            is_trashed: false,
+                            kind: "arvados#group",
+                            modified_at: "2023-11-15T20:57:01.719986000Z",
+                            modified_by_client_uuid: null,
+                            modified_by_user_uuid: "zzzzz-tpzed-000000000000000",
+                            name: "Test Group",
+                            owner_uuid: "zzzzz-tpzed-000000000000000",
+                            properties: {},
+                            trash_at: null,
+                            uuid: fakeUuid,
+                            writable_by: [
+                                "zzzzz-tpzed-000000000000000",
+                            ]
+                        }],
+                        items_available: 1,
+                    }} as AxiosResponse);
+            } else if (url === '/links') {
+                return Promise.resolve(
+                    { data: {
+                        items: [],
+                        items_available: 234,
+                        kind: "arvados#linkList",
+                        limit: 0,
+                        offset: 0
+                    }} as AxiosResponse);
+            } else {
+                return Promise.resolve(
+                    { data: {}} as AxiosResponse);
+            }
+        });
+
+        // When
+        await store.dispatch(dataExplorerActions.REQUEST_ITEMS({id: GROUPS_PANEL_ID}));
+        // Wait for async fetching of group count promises to resolve
+        await new Promise(setImmediate);
+
+        // Expect
+        expect(axiosInst.get).toHaveBeenCalledTimes(2);
+        expect(axiosInst.get).toHaveBeenCalledWith('/groups', expect.anything());
+        expect(axiosInst.get).toHaveBeenCalledWith('/links', expect.anything());
+        const group = getResource<GroupResource>(fakeUuid)(store.getState().resources);
+        expect(group?.memberCount).toBe(234);
+    });
+
+    it('requests group member count and stores null on failure', async () => {
+        // Given
+        const fakeUuid = "zzzzz-j7d0g-000000000000000";
+        axiosInst.get = jest.fn((url: string) => {
+            if (url === '/groups') {
+                return Promise.resolve(
+                    { data: {
+                        kind: "",
+                        offset: 0,
+                        limit: 100,
+                        items: [{
+                            can_manage: true,
+                            can_write: true,
+                            created_at: "2023-11-15T20:57:01.723043000Z",
+                            delete_at: null,
+                            description: null,
+                            etag: "0000000000000000000000000",
+                            frozen_by_uuid: null,
+                            group_class: "role",
+                            href: `/groups/${fakeUuid}`,
+                            is_trashed: false,
+                            kind: "arvados#group",
+                            modified_at: "2023-11-15T20:57:01.719986000Z",
+                            modified_by_client_uuid: null,
+                            modified_by_user_uuid: "zzzzz-tpzed-000000000000000",
+                            name: "Test Group",
+                            owner_uuid: "zzzzz-tpzed-000000000000000",
+                            properties: {},
+                            trash_at: null,
+                            uuid: fakeUuid,
+                            writable_by: [
+                                "zzzzz-tpzed-000000000000000",
+                            ]
+                        }],
+                        items_available: 1,
+                    }} as AxiosResponse);
+            } else if (url === '/links') {
+                return Promise.reject();
+            } else {
+                return Promise.resolve({ data: {}} as AxiosResponse);
+            }
+        });
+
+        // When
+        await store.dispatch(dataExplorerActions.REQUEST_ITEMS({id: GROUPS_PANEL_ID}));
+        // Wait for async fetching of group count promises to resolve
+        await new Promise(setImmediate);
+
+        // Expect
+        expect(axiosInst.get).toHaveBeenCalledTimes(2);
+        expect(axiosInst.get).toHaveBeenCalledWith('/groups', expect.anything());
+        expect(axiosInst.get).toHaveBeenCalledWith('/links', expect.anything());
+        const group = getResource<GroupResource>(fakeUuid)(store.getState().resources);
+        expect(group?.memberCount).toBe(null);
+    });
+
+});
index 7d7803f59e6943efd042a0da166c38ccc473bdda..fdfaf1c2cdc0e5910b5b050f55f56145bea83224 100644 (file)
@@ -40,25 +40,40 @@ export class GroupsPanelMiddlewareService extends DataExplorerMiddlewareService
                     .addEqual('group_class', GroupClass.ROLE)
                     .addILike('name', dataExplorer.searchValue)
                     .getFilters();
-                const response = await this.services.groupsService
+                const groups = await this.services.groupsService
                     .list({
                         ...dataExplorerToListParams(dataExplorer),
                         filters,
                         order: order.getOrder(),
                     });
-                api.dispatch(updateResources(response.items));
+                api.dispatch(updateResources(groups.items));
                 api.dispatch(GroupsPanelActions.SET_ITEMS({
-                    ...listResultsToDataExplorerItemsMeta(response),
-                    items: response.items.map(item => item.uuid),
+                    ...listResultsToDataExplorerItemsMeta(groups),
+                    items: groups.items.map(item => item.uuid),
                 }));
-                const permissions = await this.services.permissionService.list({
-                    filters: new FilterBuilder()
-                        .addIn('head_uuid', response.items.map(item => item.uuid))
-                        .getFilters()
-                });
-                api.dispatch(updateResources(permissions.items));
+
+                // Get group member count
+                groups.items.map(group => (
+                    this.services.permissionService.list({
+                        limit: 0,
+                        filters: new FilterBuilder()
+                            .addEqual('head_uuid', group.uuid)
+                            .getFilters()
+                    }).then(members => {
+                        api.dispatch(updateResources([{
+                            ...group,
+                            memberCount: members.itemsAvailable,
+                        } as GroupResource]));
+                    }).catch(e => {
+                        // In case of error, store null to stop spinners and show failure icon
+                        api.dispatch(updateResources([{
+                            ...group,
+                            memberCount: null,
+                        } as GroupResource]));
+                    })
+                ));
             } catch (e) {
-                api.dispatch(couldNotFetchFavoritesContents());
+                api.dispatch(couldNotFetchGroupList());
             } finally {
                 api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));
             }
@@ -72,7 +87,7 @@ const groupsPanelDataExplorerIsNotSet = () =>
         kind: SnackbarKind.ERROR
     });
 
-const couldNotFetchFavoritesContents = () =>
+const couldNotFetchGroupList = () =>
     snackbarActions.OPEN_SNACKBAR({
         message: 'Could not fetch groups.',
         kind: SnackbarKind.ERROR
index 9bcb46d018d0dea2a20071464719eb79322ea4e9..b488932f0bbbcfa1272fcc666ddaf751e727054a 100644 (file)
@@ -9,7 +9,7 @@ type MultiselectToolbarState = {
     isVisible: boolean;
     checkedList: TCheckedList;
     selectedUuid: string;
-    disabledButtons: string[]
+    disabledButtons: string[];
 };
 
 const multiselectToolbarInitialState = {
index 2111afdb2fc89d05eaba56ad46add0c43beccf19..82c267c7a06411c4371586b4075716cd4afa6aad 100644 (file)
@@ -18,9 +18,10 @@ import { ContainerRequestResource } from "models/container-request";
 import { CommandOutputParameter } from "cwlts/mappings/v1.0/CommandOutputParameter";
 import { CommandInputParameter, getIOParamId, WorkflowInputsData } from "models/workflow";
 import { getIOParamDisplayValue, ProcessIOParameter } from "views/process-panel/process-io-card";
-import { OutputDetails, NodeInstanceType, NodeInfo } from "./process-panel";
+import { OutputDetails, NodeInstanceType, NodeInfo, UsageReport } from "./process-panel";
 import { AuthState } from "store/auth/auth-reducer";
 import { ContextMenuResource } from "store/context-menu/context-menu-actions";
+import { OutputDataUpdate } from "./process-panel-reducer";
 
 export const processPanelActions = unionize({
     RESET_PROCESS_PANEL: ofType<{}>(),
@@ -29,10 +30,11 @@ export const processPanelActions = unionize({
     TOGGLE_PROCESS_PANEL_FILTER: ofType<string>(),
     SET_INPUT_RAW: ofType<WorkflowInputsData | null>(),
     SET_INPUT_PARAMS: ofType<ProcessIOParameter[] | null>(),
-    SET_OUTPUT_RAW: ofType<OutputDetails | null>(),
+    SET_OUTPUT_DATA: ofType<OutputDataUpdate | null>(),
     SET_OUTPUT_DEFINITIONS: ofType<CommandOutputParameter[]>(),
     SET_OUTPUT_PARAMS: ofType<ProcessIOParameter[] | null>(),
     SET_NODE_INFO: ofType<NodeInfo>(),
+    SET_USAGE_REPORT: ofType<UsageReport>(),
 });
 
 export type ProcessPanelAction = UnionOf<typeof processPanelActions>;
@@ -71,10 +73,13 @@ export const loadInputs =
 
 export const loadOutputs =
     (containerRequest: ContainerRequestResource) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {
-        const noOutputs = { rawOutputs: {} };
+        const noOutputs: OutputDetails = { raw: {} };
 
         if (!containerRequest.outputUuid) {
-            dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_RAW({ uuid: containerRequest.uuid, outputRaw: noOutputs }));
+            dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_DATA({
+                uuid: containerRequest.uuid,
+                payload: noOutputs
+            }));
             return;
         }
         try {
@@ -86,9 +91,12 @@ export const loadOutputs =
             // If has propsOutput, skip fetching cwl.output.json
             if (propsOutputs !== undefined) {
                 dispatch<ProcessPanelAction>(
-                    processPanelActions.SET_OUTPUT_RAW({
-                        rawOutputs: propsOutputs,
-                        pdh: collection.portableDataHash,
+                    processPanelActions.SET_OUTPUT_DATA({
+                        uuid: containerRequest.uuid,
+                        payload: {
+                            raw: propsOutputs,
+                            pdh: collection.portableDataHash,
+                        },
                     })
                 );
             } else {
@@ -97,17 +105,20 @@ export const loadOutputs =
                 let outputData = outputFile ? await services.collectionService.getFileContents(outputFile) : undefined;
                 if (outputData && (outputData = JSON.parse(outputData)) && collection.portableDataHash) {
                     dispatch<ProcessPanelAction>(
-                        processPanelActions.SET_OUTPUT_RAW({
+                        processPanelActions.SET_OUTPUT_DATA({
                             uuid: containerRequest.uuid,
-                            outputRaw: { rawOutputs: outputData, pdh: collection.portableDataHash },
+                            payload: {
+                                raw: outputData,
+                                pdh: collection.portableDataHash,
+                            },
                         })
                     );
                 } else {
-                    dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_RAW({ uuid: containerRequest.uuid, outputRaw: noOutputs }));
+                    dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_DATA({ uuid: containerRequest.uuid, payload: noOutputs }));
                 }
             }
         } catch {
-            dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_RAW({ uuid: containerRequest.uuid, outputRaw: noOutputs }));
+            dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_DATA({ uuid: containerRequest.uuid, payload: noOutputs }));
         }
     };
 
@@ -135,8 +146,12 @@ export const loadNodeJson =
             } else {
                 dispatch<ProcessPanelAction>(processPanelActions.SET_NODE_INFO(noLog));
             }
+
+            const usageReportFile = files.find(file => file.name === "usage_report.html") as CollectionFile | null;
+            dispatch<ProcessPanelAction>(processPanelActions.SET_USAGE_REPORT({ usageReport: usageReportFile }));
         } catch {
             dispatch<ProcessPanelAction>(processPanelActions.SET_NODE_INFO(noLog));
+            dispatch<ProcessPanelAction>(processPanelActions.SET_USAGE_REPORT({ usageReport: null }));
         }
     };
 
@@ -149,11 +164,11 @@ export const loadOutputDefinitions =
 
 export const updateOutputParams = () => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {
     const outputDefinitions = getState().processPanel.outputDefinitions;
-    const outputRaw = getState().processPanel.outputRaw;
+    const outputData = getState().processPanel.outputData;
 
-    if (outputRaw && outputRaw.rawOutputs) {
+    if (outputData && outputData.raw) {
         dispatch<ProcessPanelAction>(
-            processPanelActions.SET_OUTPUT_PARAMS(formatOutputData(outputDefinitions, outputRaw.rawOutputs, outputRaw.pdh, getState().auth))
+            processPanelActions.SET_OUTPUT_PARAMS(formatOutputData(outputDefinitions, outputData.raw, outputData.pdh, getState().auth))
         );
     }
 };
index ea6de66db415294d183fb208af0b4ee7f68acfc9..ab95b6ac32aeab1b1e3d416335e23c2ce09bf36e 100644 (file)
@@ -2,7 +2,7 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-import { ProcessPanel } from "store/process-panel/process-panel";
+import { OutputDetails, ProcessPanel } from "store/process-panel/process-panel";
 import { ProcessPanelAction, processPanelActions } from "store/process-panel/process-panel-actions";
 
 const initialState: ProcessPanel = {
@@ -10,10 +10,16 @@ const initialState: ProcessPanel = {
     filters: {},
     inputRaw: null,
     inputParams: null,
-    outputRaw: null,
+    outputData: null,
     nodeInfo: null,
     outputDefinitions: [],
     outputParams: null,
+    usageReport: null,
+};
+
+export type OutputDataUpdate = {
+    uuid: string;
+    payload: OutputDetails;
 };
 
 export const processPanelReducer = (state = initialState, action: ProcessPanelAction): ProcessPanel =>
@@ -49,12 +55,12 @@ export const processPanelReducer = (state = initialState, action: ProcessPanelAc
                 return state;
             }
         },
-        SET_OUTPUT_RAW: (data: any) => {
+        SET_OUTPUT_DATA: (update: OutputDataUpdate) => {
             //never set output to {} unless initializing
-            if (state.outputRaw?.rawOutputs && Object.keys(state.outputRaw?.rawOutputs).length && state.containerRequestUuid === data.uuid) {
+            if (state.outputData?.raw && Object.keys(state.outputData?.raw).length && state.containerRequestUuid === update.uuid) {
                 return state;
             }
-            return { ...state, outputRaw: data.outputRaw };
+            return { ...state, outputData: update.payload };
         },
         SET_NODE_INFO: ({ nodeInfo }) => {
             return { ...state, nodeInfo };
@@ -70,5 +76,8 @@ export const processPanelReducer = (state = initialState, action: ProcessPanelAc
         SET_OUTPUT_PARAMS: outputParams => {
             return { ...state, outputParams };
         },
+        SET_USAGE_REPORT: ({ usageReport }) => {
+            return { ...state, usageReport };
+        },
         default: () => state,
     });
index 1ec60ff54c27f69b3fad5ee0494aa53ad3a693d2..12a46a272c08939c6e35ce07e6a0a579e0a2105d 100644 (file)
@@ -7,9 +7,10 @@ import { RouterState } from "react-router-redux";
 import { matchProcessRoute } from "routes/routes";
 import { ProcessIOParameter } from "views/process-panel/process-io-card";
 import { CommandOutputParameter } from 'cwlts/mappings/v1.0/CommandOutputParameter';
+import { CollectionFile } from 'models/collection-file';
 
 export type OutputDetails = {
-    rawOutputs?: any;
+    raw?: any;
     pdh?: string;
 }
 
@@ -36,15 +37,20 @@ export interface NodeInfo {
     nodeInfo: NodeInstanceType | null;
 };
 
+export interface UsageReport {
+    usageReport: CollectionFile | null;
+};
+
 export interface ProcessPanel {
     containerRequestUuid: string;
     filters: { [status: string]: boolean };
     inputRaw: WorkflowInputsData | null;
     inputParams: ProcessIOParameter[] | null;
-    outputRaw: OutputDetails | null;
+    outputData: OutputDetails | null;
     outputDefinitions: CommandOutputParameter[];
     outputParams: ProcessIOParameter[] | null;
     nodeInfo: NodeInstanceType | null;
+    usageReport: CollectionFile | null;
 }
 
 export const getProcessPanelCurrentUuid = (router: RouterState) => {
diff --git a/services/workbench2/src/store/processes/processes-middleware-service.ts b/services/workbench2/src/store/processes/processes-middleware-service.ts
new file mode 100644 (file)
index 0000000..3154e1a
--- /dev/null
@@ -0,0 +1,95 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import { ServiceRepository } from 'services/services';
+import { MiddlewareAPI, Dispatch } from 'redux';
+import {
+    DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta, getDataExplorerColumnFilters, getOrder
+} from 'store/data-explorer/data-explorer-middleware-service';
+import { RootState } from 'store/store';
+import { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';
+import { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';
+import { BoundDataExplorerActions } from 'store/data-explorer/data-explorer-action';
+import { updateResources } from 'store/resources/resources-actions';
+import { ListArguments } from 'services/common-service/common-service';
+import { ProcessResource } from 'models/process';
+import { FilterBuilder, joinFilters } from 'services/api/filter-builder';
+import { DataColumns } from 'components/data-table/data-table';
+import { ProcessStatusFilter, buildProcessStatusFilters } from '../resource-type-filters/resource-type-filters';
+import { ContainerRequestResource, containerRequestFieldsNoMounts } from 'models/container-request';
+import { progressIndicatorActions } from '../progress-indicator/progress-indicator-actions';
+import { loadMissingProcessesInformation } from '../project-panel/project-panel-middleware-service';
+
+export class ProcessesMiddlewareService extends DataExplorerMiddlewareService {
+    constructor(private services: ServiceRepository, private actions: BoundDataExplorerActions, id: string) {
+        super(id);
+    }
+
+    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {
+        const columns = dataExplorer.columns as DataColumns<string, ContainerRequestResource>;
+        const statusColumnFilters = getDataExplorerColumnFilters(columns, 'Status');
+        const activeStatusFilter = Object.keys(statusColumnFilters).find(
+            filterName => statusColumnFilters[filterName].selected
+        ) || ProcessStatusFilter.ALL;
+
+        const nameFilter = new FilterBuilder().addILike("name", dataExplorer.searchValue).getFilters();
+        const statusFilter = buildProcessStatusFilters(new FilterBuilder(), activeStatusFilter).getFilters();
+
+        return joinFilters(
+            nameFilter,
+            statusFilter,
+        );
+    }
+
+    getParams(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): ListArguments | null {
+        const filters = this.getFilters(api, dataExplorer)
+        if (filters === null) {
+            return null;
+        }
+        return {
+            ...dataExplorerToListParams(dataExplorer),
+            order: getOrder<ProcessResource>(dataExplorer),
+            filters
+        };
+    }
+
+    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {
+        const state = api.getState();
+        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());
+
+        try {
+            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }
+
+            const params = this.getParams(api, dataExplorer);
+
+            if (params !== null) {
+                const containerRequests = await this.services.containerRequestService.list(
+                    {
+                        ...this.getParams(api, dataExplorer),
+                        select: containerRequestFieldsNoMounts
+                    });
+                api.dispatch(updateResources(containerRequests.items));
+                await api.dispatch<any>(loadMissingProcessesInformation(containerRequests.items));
+                api.dispatch(this.actions.SET_ITEMS({
+                    ...listResultsToDataExplorerItemsMeta(containerRequests),
+                    items: containerRequests.items.map(resource => resource.uuid),
+                }));
+            } else {
+                api.dispatch(this.actions.SET_ITEMS({
+                    itemsAvailable: 0,
+                    page: 0,
+                    rowsPerPage: dataExplorer.rowsPerPage,
+                    items: [],
+                }));
+            }
+            if (!background) { api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId())); }
+        } catch {
+            api.dispatch(snackbarActions.OPEN_SNACKBAR({
+                message: 'Could not fetch process list.',
+                kind: SnackbarKind.ERROR
+            }));
+            if (!background) { api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId())); }
+        }
+    }
+}
index 366e15ae04759e2a774563d8307fa8c4449eb8fe..89f0576d8660a45f80aa197bbcb85adc83da0645 100644 (file)
@@ -37,6 +37,7 @@ import { defaultCollectionSelectedFields } from "models/collection";
 import { containerRequestFieldsNoMounts } from "models/container-request";
 import { MultiSelectMenuActionNames } from "views-components/multiselect-toolbar/ms-menu-actions";
 import { removeDisabledButton } from "store/multiselect/multiselect-actions";
+import { dataExplorerActions } from "store/data-explorer/data-explorer-action";
 
 export class ProjectPanelMiddlewareService extends DataExplorerMiddlewareService {
     constructor(private services: ServiceRepository, id: string) {
@@ -54,6 +55,7 @@ export class ProjectPanelMiddlewareService extends DataExplorerMiddlewareService
             api.dispatch(projectPanelDataExplorerIsNotSet());
         } else {
             try {
+                api.dispatch<any>(dataExplorerActions.SET_IS_NOT_FOUND({ id: this.id, isNotFound: false }));
                 if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }
                 const response = await this.services.groupsService.contents(projectUuid, getParams(dataExplorer, !!isProjectTrashed));
                 const resourceUuids = response.items.map(item => item.uuid);
@@ -72,7 +74,7 @@ export class ProjectPanelMiddlewareService extends DataExplorerMiddlewareService
                     })
                 );
                 if (e.status === 404) {
-                    // It'll just show up as not found
+                    api.dispatch<any>(dataExplorerActions.SET_IS_NOT_FOUND({ id: this.id, isNotFound: true}));
                 }
                 else {
                     api.dispatch(couldNotFetchProjectContents());
index e1448f640b98b14287557514a0ecc96f4883e99b..791d7e8a4a3df120d1a63cb09c516e29d785ce9c 100644 (file)
@@ -93,6 +93,30 @@ export const getInitialResourceTypeFilters = pipe(
 
 );
 
+// Using pipe() with more than 7 arguments makes the return type be 'any',
+// causing compile issues.
+export const getInitialSearchTypeFilters = pipe(
+    (): DataTableFilters => createTree<DataTableFilterItem>(),
+    pipe(
+        initFilter(ObjectTypeFilter.PROJECT, '', true, true),
+        initFilter(GroupTypeFilter.PROJECT, ObjectTypeFilter.PROJECT),
+        initFilter(GroupTypeFilter.FILTER_GROUP, ObjectTypeFilter.PROJECT),
+    ),
+    pipe(
+        initFilter(ObjectTypeFilter.WORKFLOW, '', false, true),
+        initFilter(ProcessTypeFilter.MAIN_PROCESS, ObjectTypeFilter.WORKFLOW, false),
+        initFilter(ProcessTypeFilter.CHILD_PROCESS, ObjectTypeFilter.WORKFLOW, false),
+        initFilter(ObjectTypeFilter.DEFINITION, ObjectTypeFilter.WORKFLOW, false),
+    ),
+    pipe(
+        initFilter(ObjectTypeFilter.COLLECTION, '', true, true),
+        initFilter(CollectionTypeFilter.GENERAL_COLLECTION, ObjectTypeFilter.COLLECTION),
+        initFilter(CollectionTypeFilter.OUTPUT_COLLECTION, ObjectTypeFilter.COLLECTION),
+        initFilter(CollectionTypeFilter.INTERMEDIATE_COLLECTION, ObjectTypeFilter.COLLECTION, false),
+        initFilter(CollectionTypeFilter.LOG_COLLECTION, ObjectTypeFilter.COLLECTION, false),
+    ),
+);
+
 export const getInitialProcessTypeFilters = pipe(
     (): DataTableFilters => createTree<DataTableFilterItem>(),
     initFilter(ProcessTypeFilter.MAIN_PROCESS),
index af40e86ade09bfc30698eded0bb1b8802dfac577..396c2dfaaecf9253fbe63373e997981d6b81dad9 100644 (file)
@@ -38,7 +38,7 @@ export const searchBarActions = unionize({
     SET_SELECTED_ITEM: ofType<string>(),
     MOVE_UP: ofType<{}>(),
     MOVE_DOWN: ofType<{}>(),
-    SELECT_FIRST_ITEM: ofType<{}>()
+    SELECT_FIRST_ITEM: ofType<{}>(),
 });
 
 export type SearchBarActions = UnionOf<typeof searchBarActions>;
index 5e16c9a0855b24f89014e55551f144e455970476..05b75bf99c4a981f7fda8410d8c73fc74dd3c121 100644 (file)
@@ -42,7 +42,7 @@ const initialState: SearchBar = {
     selectedItem: {
         id: '',
         query: ''
-    }
+    },
 };
 
 const makeSelectedItem = (id: string, query?: string): SearchBarSelectedItem => ({ id, query: query ? query : id });
index 00a69cd2e308f733b617ec5a5b35dadebe42c01c..dab83e0114d431b8e15cc6a6e68078f4d7696e0d 100644 (file)
@@ -17,7 +17,7 @@ import { searchResultsPanelActions } from 'store/search-results-panel/search-res
 import {
     getSearchSessions,
     queryToFilters,
-    getAdvancedDataFromQuery
+    getAdvancedDataFromQuery,
 } from 'store/search-bar/search-bar-actions';
 import { getSortColumn } from "store/data-explorer/data-explorer-reducer";
 import { FilterBuilder, joinFilters } from 'services/api/filter-builder';
@@ -26,6 +26,8 @@ import { serializeResourceTypeFilters } from 'store//resource-type-filters/resou
 import { ProjectPanelColumnNames } from 'views/project-panel/project-panel';
 import { ResourceKind } from 'models/resource';
 import { ContainerRequestResource } from 'models/container-request';
+import { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';
+import { dataExplorerActions } from 'store/data-explorer/data-explorer-action';
 
 export class SearchResultsMiddlewareService extends DataExplorerMiddlewareService {
     constructor(private services: ServiceRepository, id: string) {
@@ -55,12 +57,24 @@ export class SearchResultsMiddlewareService extends DataExplorerMiddlewareServic
             api.dispatch(setItems(initial));
         }
 
+        const numberOfSessions = sessions.length;
+        let numberOfResolvedResponses = 0;
+        let totalNumItemsAvailable = 0;
+        api.dispatch(progressIndicatorActions.START_WORKING(this.id))
+        api.dispatch(dataExplorerActions.SET_IS_NOT_FOUND({ id: this.id, isNotFound: false }));
+
         sessions.forEach(session => {
             const params = getParams(dataExplorer, searchValue, session.apiRevision);
             this.services.groupsService.contents('', params, session)
                 .then((response) => {
                     api.dispatch(updateResources(response.items));
                     api.dispatch(appendItems(response));
+                    numberOfResolvedResponses++;
+                    totalNumItemsAvailable += response.itemsAvailable;
+                    if (numberOfResolvedResponses === numberOfSessions) {
+                        api.dispatch(progressIndicatorActions.STOP_WORKING(this.id))
+                        if(totalNumItemsAvailable === 0) api.dispatch(dataExplorerActions.SET_IS_NOT_FOUND({ id: this.id, isNotFound: true }))
+                    }
                     // Request all containers for process status to be available
                     const containerRequests = response.items.filter((item) => item.kind === ResourceKind.CONTAINER_REQUEST) as ContainerRequestResource[];
                     const containerUuids = containerRequests.map(container => container.containerUuid).filter(uuid => uuid !== null) as string[];
@@ -73,10 +87,11 @@ export class SearchResultsMiddlewareService extends DataExplorerMiddlewareServic
                         .then((containers) => {
                             api.dispatch(updateResources(containers.items));
                         });
-                }).catch(() => {
-                    api.dispatch(couldNotFetchSearchResults(session.clusterId));
-                });
-        }
+                    }).catch(() => {
+                        api.dispatch(couldNotFetchSearchResults(session.clusterId));
+                        api.dispatch(progressIndicatorActions.STOP_WORKING(this.id))
+                    });
+            }
         );
     }
 }
index daa9812e729900fd23fcb2bd04966f6997e764ae..ee861f18be4c7eb4253dc9a6005128bced3df6b5 100644 (file)
@@ -20,9 +20,11 @@ import { collectionPanelFilesReducer } from "./collection-panel/collection-panel
 import { dataExplorerMiddleware } from "./data-explorer/data-explorer-middleware";
 import { FAVORITE_PANEL_ID } from "./favorite-panel/favorite-panel-action";
 import { PROJECT_PANEL_ID } from "./project-panel/project-panel-action";
+import { WORKFLOW_PROCESSES_PANEL_ID } from "./workflow-panel/workflow-panel-actions";
 import { ProjectPanelMiddlewareService } from "./project-panel/project-panel-middleware-service";
 import { FavoritePanelMiddlewareService } from "./favorite-panel/favorite-panel-middleware-service";
 import { AllProcessesPanelMiddlewareService } from "./all-processes-panel/all-processes-panel-middleware-service";
+import { WorkflowProcessesMiddlewareService } from "./workflow-panel/workflow-middleware-service";
 import { collectionPanelReducer } from "./collection-panel/collection-panel-reducer";
 import { dialogReducer } from "./dialog/dialog-reducer";
 import { ServiceRepository } from "services/services";
@@ -96,6 +98,7 @@ export function configureStore(history: History, services: ServiceRepository, co
     const projectPanelMiddleware = dataExplorerMiddleware(new ProjectPanelMiddlewareService(services, PROJECT_PANEL_ID));
     const favoritePanelMiddleware = dataExplorerMiddleware(new FavoritePanelMiddlewareService(services, FAVORITE_PANEL_ID));
     const allProcessessPanelMiddleware = dataExplorerMiddleware(new AllProcessesPanelMiddlewareService(services, ALL_PROCESSES_PANEL_ID));
+    const workflowProcessessPanelMiddleware = dataExplorerMiddleware(new WorkflowProcessesMiddlewareService(services, WORKFLOW_PROCESSES_PANEL_ID));
     const trashPanelMiddleware = dataExplorerMiddleware(new TrashPanelMiddlewareService(services, TRASH_PANEL_ID));
     const searchResultsPanelMiddleware = dataExplorerMiddleware(new SearchResultsMiddlewareService(services, SEARCH_RESULTS_PANEL_ID));
     const sharedWithMePanelMiddleware = dataExplorerMiddleware(new SharedWithMeMiddlewareService(services, SHARED_WITH_ME_PANEL_ID));
@@ -152,6 +155,7 @@ export function configureStore(history: History, services: ServiceRepository, co
         collectionsContentAddress,
         subprocessMiddleware,
         treePickerSearchMiddleware,
+        workflowProcessessPanelMiddleware
     ];
 
     const reduceMiddlewaresFn: (a: Middleware[], b: MiddlewareListReducer) => Middleware[] = (a, b) => b(a, services);
index 5124c8346a6951fe656cf0ae255038a7cfc344bd..0ac5df6a0f52438820623ee3cdbc09d1b99aeee4 100644 (file)
@@ -2,99 +2,32 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-import { ServiceRepository } from 'services/services';
-import { MiddlewareAPI, Dispatch } from 'redux';
-import {
-    DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta, getDataExplorerColumnFilters, getOrder
-} from 'store/data-explorer/data-explorer-middleware-service';
-import { RootState } from 'store/store';
-import { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';
-import { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';
-import { updateResources } from 'store/resources/resources-actions';
-import { ListResults } from 'services/common-service/common-service';
-import { ProcessResource } from 'models/process';
-import { FilterBuilder, joinFilters } from 'services/api/filter-builder';
+import { RootState } from "../store";
+import { ServiceRepository } from "services/services";
+import { FilterBuilder, joinFilters } from "services/api/filter-builder";
+import { Dispatch, MiddlewareAPI } from "redux";
+import { DataExplorer } from "store/data-explorer/data-explorer-reducer";
+import { ProcessesMiddlewareService } from "store/processes/processes-middleware-service";
 import { subprocessPanelActions } from './subprocess-panel-actions';
-import { DataColumns } from 'components/data-table/data-table';
-import { ProcessStatusFilter, buildProcessStatusFilters } from '../resource-type-filters/resource-type-filters';
-import { ContainerRequestResource, containerRequestFieldsNoMounts } from 'models/container-request';
-import { progressIndicatorActions } from '../progress-indicator/progress-indicator-actions';
-import { loadMissingProcessesInformation } from '../project-panel/project-panel-middleware-service';
+import { getProcess } from "store/processes/process";
 
-export class SubprocessMiddlewareService extends DataExplorerMiddlewareService {
-    constructor(private services: ServiceRepository, id: string) {
-        super(id);
+export class SubprocessMiddlewareService extends ProcessesMiddlewareService {
+    constructor(services: ServiceRepository, id: string) {
+        super(services, subprocessPanelActions, id);
     }
 
-    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {
+    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {
         const state = api.getState();
         const parentContainerRequestUuid = state.processPanel.containerRequestUuid;
-        if (parentContainerRequestUuid === "") { return; }
-        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());
+        if (!parentContainerRequestUuid) { return null; }
 
-        try {
-            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }
-            const parentContainerRequest = await this.services.containerRequestService.get(parentContainerRequestUuid);
-            if (parentContainerRequest.containerUuid) {
-                const containerRequests = await this.services.containerRequestService.list(
-                    {
-                        ...getParams(dataExplorer, parentContainerRequest),
-                        select: containerRequestFieldsNoMounts
-                    });
-                api.dispatch(updateResources(containerRequests.items));
-                await api.dispatch<any>(loadMissingProcessesInformation(containerRequests.items));
-                // Populate the actual user view
-                api.dispatch(setItems(containerRequests));
-            }
-            if (!background) { api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId())); }
-        } catch {
-            if (!background) { api.dispatch(progressIndicatorActions.PERSIST_STOP_WORKING(this.getId())); }
-            api.dispatch(couldNotFetchSubprocesses());
-        }
-    }
-}
-
-export const getParams = (
-    dataExplorer: DataExplorer,
-    parentContainerRequest: ContainerRequestResource) => ({
-        ...dataExplorerToListParams(dataExplorer),
-        order: getOrder<ProcessResource>(dataExplorer),
-        filters: getFilters(dataExplorer, parentContainerRequest)
-    });
-
-export const getFilters = (
-    dataExplorer: DataExplorer,
-    parentContainerRequest: ContainerRequestResource) => {
-    const columns = dataExplorer.columns as DataColumns<string, ProcessResource>;
-    const statusColumnFilters = getDataExplorerColumnFilters(columns, 'Status');
-    const activeStatusFilter = Object.keys(statusColumnFilters).find(
-        filterName => statusColumnFilters[filterName].selected
-    ) || ProcessStatusFilter.ALL;
-
-    // Get all the subprocess' container requests and containers.
-    const fb = new FilterBuilder().addEqual('requesting_container_uuid', parentContainerRequest.containerUuid);
-    const statusFilters = buildProcessStatusFilters(fb, activeStatusFilter).getFilters();
+        const process = getProcess(parentContainerRequestUuid)(state.resources);
+        if (!process?.container) { return null; }
 
-    const nameFilters = dataExplorer.searchValue
-        ? new FilterBuilder()
-            .addILike("name", dataExplorer.searchValue)
-            .getFilters()
-        : '';
+        const requesting_container = new FilterBuilder().addEqual('requesting_container_uuid', process.container.uuid).getFilters();
+        const sup = super.getFilters(api, dataExplorer);
+        if (sup === null) { return null; }
 
-    return joinFilters(
-        nameFilters,
-        statusFilters
-    );
-};
-
-export const setItems = (listResults: ListResults<ProcessResource>) =>
-    subprocessPanelActions.SET_ITEMS({
-        ...listResultsToDataExplorerItemsMeta(listResults),
-        items: listResults.items.map(resource => resource.uuid),
-    });
-
-const couldNotFetchSubprocesses = () =>
-    snackbarActions.OPEN_SNACKBAR({
-        message: 'Could not fetch subprocesses.',
-        kind: SnackbarKind.ERROR
-    });
+        return joinFilters(sup, requesting_container);
+    }
+}
index ed05c0b172db3af96fa75fb2203d11dc2b30016f..42a1033dde6597ec5d5e493c991dd60503dbc8d2 100644 (file)
@@ -15,7 +15,7 @@ import {
     initSidePanelTree,
     loadSidePanelTreeProjects,
     SidePanelTreeCategory,
-    SIDE_PANEL_TREE, 
+    SIDE_PANEL_TREE,
 } from "store/side-panel-tree/side-panel-tree-actions";
 import { updateResources } from "store/resources/resources-actions";
 import { projectPanelColumns } from "views/project-panel/project-panel";
@@ -103,6 +103,8 @@ import { userProfileGroupsColumns } from "views/user-profile-panel/user-profile-
 import { selectedToArray, selectedToKindSet } from "components/multiselect-toolbar/MultiselectToolbar";
 import { deselectOne } from "store/multiselect/multiselect-actions";
 import { treePickerActions } from "store/tree-picker/tree-picker-actions";
+import { workflowProcessesPanelColumns } from "views/workflow-panel/workflow-processes-panel-root";
+import { workflowProcessesPanelActions } from "store/workflow-panel/workflow-panel-actions";
 
 export const WORKBENCH_LOADING_SCREEN = "workbenchLoadingScreen";
 
@@ -190,6 +192,7 @@ export const loadWorkbench = () => async (dispatch: Dispatch, getState: () => Ro
             })
         );
         dispatch(subprocessPanelActions.SET_COLUMNS({ columns: subprocessPanelColumns }));
+        dispatch(workflowProcessesPanelActions.SET_COLUMNS({ columns: workflowProcessesPanelColumns }));
 
         if (services.linkAccountService.getAccountToLink()) {
             dispatch(linkAccountPanelActions.HAS_SESSION_DATA());
@@ -590,6 +593,7 @@ export const loadRegisteredWorkflow = (uuid: string) =>
                 await dispatch<any>(finishLoadingProject(workflow.ownerUuid));
                 await dispatch<any>(activateSidePanelTreeItem(workflow.ownerUuid));
                 dispatch<any>(breadcrumbfunc(workflow.ownerUuid));
+                dispatch(workflowProcessesPanelActions.REQUEST_ITEMS());
             }
         }
     });
@@ -753,7 +757,7 @@ export const loadVirtualMachines = handleFirstTimeLoad(async (dispatch: Dispatch
 export const loadVirtualMachinesAdmin = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {
     await dispatch(loadVirtualMachinesPanel());
     dispatch(setVirtualMachinesAdminBreadcrumbs());
-    dispatch(treePickerActions.DEACTIVATE_TREE_PICKER_NODE({pickerId: SIDE_PANEL_TREE} ))
+    dispatch(treePickerActions.DEACTIVATE_TREE_PICKER_NODE({ pickerId: SIDE_PANEL_TREE }))
 });
 
 export const loadRepositories = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {
index 587f02246cb62979e48c2939ffc2f0996d04aada..aa34218942f014d45defbd2c13f5f8fe078613b0 100644 (file)
@@ -13,6 +13,10 @@ import { FilterBuilder } from 'services/api/filter-builder';
 import { WorkflowResource } from 'models/workflow';
 import { ListResults } from 'services/common-service/common-service';
 import { workflowPanelActions } from 'store/workflow-panel/workflow-panel-actions';
+import { matchRegisteredWorkflowRoute } from 'routes/routes';
+import { ProcessesMiddlewareService } from "store/processes/processes-middleware-service";
+import { workflowProcessesPanelActions } from "./workflow-panel-actions";
+import { joinFilters } from "services/api/filter-builder";
 
 export class WorkflowMiddlewareService extends DataExplorerMiddlewareService {
     constructor(private services: ServiceRepository, id: string) {
@@ -56,3 +60,27 @@ const couldNotFetchWorkflows = () =>
         message: 'Could not fetch workflows.',
         kind: SnackbarKind.ERROR
     });
+
+
+export class WorkflowProcessesMiddlewareService extends ProcessesMiddlewareService {
+    constructor(services: ServiceRepository, id: string) {
+        super(services, workflowProcessesPanelActions, id);
+    }
+
+    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {
+        const state = api.getState();
+
+        if (!state.router.location) { return null; }
+
+        const registeredWorkflowMatch = matchRegisteredWorkflowRoute(state.router.location.pathname);
+        if (!registeredWorkflowMatch) { return null; }
+
+        const workflow_uuid = registeredWorkflowMatch.params.id;
+
+        const requesting_container = new FilterBuilder().addEqual('properties.template_uuid', workflow_uuid).getFilters();
+        const sup = super.getFilters(api, dataExplorer);
+        if (sup === null) { return null; }
+
+        return joinFilters(sup, requesting_container);
+    }
+}
index d8c3b6514135414404e6b1132be5d9302483173e..37b96bd9b0a9bb0607869553c69898585a4871f8 100644 (file)
@@ -30,6 +30,9 @@ const UUID_PREFIX_PROPERTY_NAME = 'uuidPrefix';
 const WORKFLOW_PANEL_DETAILS_UUID = 'workflowPanelDetailsUuid';
 export const workflowPanelActions = bindDataExplorerActions(WORKFLOW_PANEL_ID);
 
+export const WORKFLOW_PROCESSES_PANEL_ID = "workflowProcessesPanel";
+export const workflowProcessesPanelActions = bindDataExplorerActions(WORKFLOW_PROCESSES_PANEL_ID);
+
 export const loadWorkflowPanel = () =>
     async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {
         dispatch(workflowPanelActions.REQUEST_ITEMS());
@@ -48,9 +51,10 @@ export const openRunProcess = (workflowUuid: string, ownerUuid?: string, name?:
     async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {
         const response = await services.workflowService.list();
         dispatch(runProcessPanelActions.SET_WORKFLOWS(response.items));
-
+        
         const workflows = getState().runProcessPanel.searchWorkflows;
-        const workflow = workflows.find(workflow => workflow.uuid === workflowUuid);
+        const listedWorkflow = workflows.find(workflow => workflow.uuid === workflowUuid);
+        const workflow = listedWorkflow || await services.workflowService.get(workflowUuid);
         if (workflow) {
             dispatch<any>(navigateToRunProcess);
             dispatch<any>(goToStep(1));
index 2706315179b718124d61bb0eaf3b1bb708c13607..c722e61076088a830a91c54d837b589108580b93 100644 (file)
@@ -118,8 +118,8 @@ export const freezeProjectAction = {
 export const newProjectAction: any = {
     icon: NewProjectIcon,
     name: "New project",
-    execute: (dispatch, resource): void => {
-        dispatch(openProjectCreateDialog(resource.uuid));
+    execute: (dispatch, resources): void => {
+        dispatch(openProjectCreateDialog(resources[0].uuid));
     },
 };
 
index 9b11f2ad3a703d83c22f6eb53bc7b702b5ac6ad3..643949a20e0545169dac1aa6359705ccc6b86919 100644 (file)
@@ -11,7 +11,6 @@ import { dataExplorerActions } from "store/data-explorer/data-explorer-action";
 import { DataColumn } from "components/data-table/data-column";
 import { DataColumns, TCheckedList } from "components/data-table/data-table";
 import { DataTableFilters } from "components/data-table-filters/data-table-filters-tree";
-import { LAST_REFRESH_TIMESTAMP } from "components/refresh-button/refresh-button";
 import { toggleMSToolbar, setCheckedListOnStore } from "store/multiselect/multiselect-actions";
 
 interface Props {
@@ -20,13 +19,13 @@ interface Props {
     onContextMenu?: (event: React.MouseEvent<HTMLElement>, item: any, isAdmin?: boolean) => void;
     onRowDoubleClick: (item: any) => void;
     extractKey?: (item: any) => React.Key;
+    working?: boolean;
 }
 
 const mapStateToProps = ({ progressIndicator, dataExplorer, router, multiselect, detailsPanel, properties}: RootState, { id }: Props) => {
-    const progress = progressIndicator.find(p => p.id === id);
+    const working = !!progressIndicator.some(p => p.id === id && p.working);
     const dataExplorerState = getDataExplorer(dataExplorer, id);
     const currentRoute = router.location ? router.location.pathname : "";
-    const currentRefresh = localStorage.getItem(LAST_REFRESH_TIMESTAMP) || "";
     const isDetailsResourceChecked = multiselect.checkedList[detailsPanel.resourceUuid]
     const isOnlyOneSelected = Object.values(multiselect.checkedList).filter(x => x === true).length === 1;
     const currentItemUuid =
@@ -34,13 +33,12 @@ const mapStateToProps = ({ progressIndicator, dataExplorer, router, multiselect,
     const isMSToolbarVisible = multiselect.isVisible;
     return {
         ...dataExplorerState,
-        working: !!progress?.working,
-        currentRefresh: currentRefresh,
         currentRoute: currentRoute,
         paperKey: currentRoute,
         currentItemUuid,
         isMSToolbarVisible,
         checkedList: multiselect.checkedList,
+        working,
     };
 };
 
index ac8729aa3d32b7ff4dfe0fde5ff05171344dccd2..eb33d12301f5b7a9a58d026a31389f4edfce7695 100644 (file)
@@ -4,7 +4,7 @@
 
 import React from 'react';
 import { mount, configure } from 'enzyme';
-import { ProcessStatus, ResourceFileSize } from './renderers';
+import { GroupMembersCount, ProcessStatus, ResourceFileSize } from './renderers';
 import Adapter from "enzyme-adapter-react-16";
 import { Provider } from 'react-redux';
 import configureMockStore from 'redux-mock-store'
@@ -12,6 +12,10 @@ import { ResourceKind } from '../../models/resource';
 import { ContainerRequestState as CR } from '../../models/container-request';
 import { ContainerState as C } from '../../models/container';
 import { ProcessStatus as PS } from '../../store/processes/process';
+import { MuiThemeProvider } from '@material-ui/core';
+import { CustomTheme } from 'common/custom-theme';
+import { InlinePulser} from 'components/loading/inline-pulser';
+import { ErrorIcon } from "components/icon/icon";
 
 const middlewares = [];
 const mockStore = configureMockStore(middlewares);
@@ -19,7 +23,7 @@ const mockStore = configureMockStore(middlewares);
 configure({ adapter: new Adapter() });
 
 describe('renderers', () => {
-    let props = null;
+    let props: any = null;
 
     describe('ProcessStatus', () => {
         props = {
@@ -161,4 +165,90 @@ describe('renderers', () => {
             expect(wrapper2.text()).toContain('');
         });
     });
+
+    describe('GroupMembersCount', () => {
+        let fakeGroup;
+        beforeEach(() => {
+            props = {
+                uuid: 'zzzzz-j7d0g-000000000000000',
+            };
+            fakeGroup = {
+                "canManage": true,
+                "canWrite": true,
+                "createdAt": "2020-09-24T22:52:57.546521000Z",
+                "deleteAt": null,
+                "description": "Test Group",
+                "etag": "0000000000000000000000000",
+                "frozenByUuid": null,
+                "groupClass": "role",
+                "href": `/groups/${props.uuid}`,
+                "isTrashed": false,
+                "kind": ResourceKind.GROUP,
+                "modifiedAt": "2020-09-24T22:52:57.545669000Z",
+                "modifiedByClientUuid": null,
+                "modifiedByUserUuid": "zzzzz-tpzed-000000000000000",
+                "name": "System group",
+                "ownerUuid": "zzzzz-tpzed-000000000000000",
+                "properties": {},
+                "trashAt": null,
+                "uuid": props.uuid,
+                "writableBy": [
+                    "zzzzz-tpzed-000000000000000",
+                ]
+            };
+        });
+
+        it('shows loading group count when no memberCount', () => {
+            // Given
+            const store = mockStore({resources: {
+                [props.uuid]: fakeGroup,
+            }});
+
+            const wrapper = mount(<Provider store={store}>
+                <MuiThemeProvider theme={CustomTheme}>
+                    <GroupMembersCount {...props} />
+                </MuiThemeProvider>
+            </Provider>);
+
+            expect(wrapper.find(InlinePulser)).toHaveLength(1);
+        });
+
+        it('shows group count when memberCount present', () => {
+            // Given
+            const store = mockStore({resources: {
+                [props.uuid]: {
+                    ...fakeGroup,
+                    "memberCount": 765,
+                }
+            }});
+
+            const wrapper = mount(<Provider store={store}>
+                <MuiThemeProvider theme={CustomTheme}>
+                    <GroupMembersCount {...props} />
+                </MuiThemeProvider>
+            </Provider>);
+
+            expect(wrapper.text()).toBe("765");
+        });
+
+        it('shows group count error icon when memberCount is null', () => {
+            // Given
+            const store = mockStore({resources: {
+                [props.uuid]: {
+                    ...fakeGroup,
+                    "memberCount": null,
+                }
+            }});
+
+            const wrapper = mount(<Provider store={store}>
+                <MuiThemeProvider theme={CustomTheme}>
+                    <GroupMembersCount {...props} />
+                </MuiThemeProvider>
+            </Provider>);
+
+            expect(wrapper.find(ErrorIcon)).toHaveLength(1);
+        });
+
+    });
+
 });
index 56926b513db459dbe818130828761c514ee6fbe9..4ecbc7e10b23a832f327d7800d20a2552e1a1824 100644 (file)
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: AGPL-3.0
 
 import React from "react";
-import { Grid, Typography, withStyles, Tooltip, IconButton, Checkbox, Chip } from "@material-ui/core";
+import { Grid, Typography, withStyles, Tooltip, IconButton, Checkbox, Chip, withTheme } from "@material-ui/core";
 import { FavoriteStar, PublicFavoriteStar } from "../favorite-star/favorite-star";
 import { Resource, ResourceKind, TrashableResource } from "models/resource";
 import {
@@ -21,6 +21,7 @@ import {
     ActiveIcon,
     SetupIcon,
     InactiveIcon,
+    ErrorIcon,
 } from "components/icon/icon";
 import { formatDate, formatFileSize, formatTime } from "common/formatters";
 import { resourceLabel } from "common/labels";
@@ -53,6 +54,7 @@ import { VirtualMachinesResource } from "models/virtual-machines";
 import { CopyToClipboardSnackbar } from "components/copy-to-clipboard-snackbar/copy-to-clipboard-snackbar";
 import { ProjectResource } from "models/project";
 import { ProcessResource } from "models/process";
+import { InlinePulser } from "components/loading/inline-pulser";
 
 const renderName = (dispatch: Dispatch, item: GroupContentsResource) => {
     const navFunc = "groupClass" in item && item.groupClass === GroupClass.ROLE ? navigateToGroupDetails : navigateTo;
@@ -526,9 +528,9 @@ const renderResourceLink = (dispatch: Dispatch, item: Resource ) => {
             onClick={() => {
                 item.kind === ResourceKind.GROUP && (item as GroupResource).groupClass === "role"
                     ? dispatch<any>(navigateToGroupDetails(item.uuid))
-                    : item.kind === ResourceKind.USER 
+                    : item.kind === ResourceKind.USER
                     ? dispatch<any>(navigateToUserProfile(item.uuid))
-                    : dispatch<any>(navigateTo(item.uuid)); 
+                    : dispatch<any>(navigateTo(item.uuid));
             }}
         >
             {resourceLabel(item.kind, item && item.kind === ResourceKind.GROUP ? (item as GroupResource).groupClass || "" : "")}:{" "}
@@ -1135,3 +1137,30 @@ export const ContainerRunTime = connect((state: RootState, props: { uuid: string
         }
     }
 );
+
+export const GroupMembersCount = connect(
+    (state: RootState, props: { uuid: string }) => {
+        const group = getResource<GroupResource>(props.uuid)(state.resources);
+
+        return {
+            value: group?.memberCount,
+        };
+
+    }
+)(withTheme()((props: {value: number | null | undefined, theme:ArvadosTheme}) => {
+    if (props.value === undefined) {
+        // Loading
+        return <Typography component={"div"}>
+            <InlinePulser />
+        </Typography>;
+    } else if (props.value === null) {
+        // Error
+        return <Typography>
+            <Tooltip title="Failed to load member count">
+                <ErrorIcon style={{color: props.theme.customs.colors.greyL}}/>
+            </Tooltip>
+        </Typography>;
+    } else {
+        return <Typography children={props.value} />;
+    }
+}));
index 0ccb0502cb28faabe774b4f7b4aba64c2a7b1313..88360ebcec9b43b77cc53520239a28830374b83a 100644 (file)
@@ -31,7 +31,7 @@ import { createTree } from "models/tree";
 import { getInitialProcessStatusFilters, getInitialProcessTypeFilters } from "store/resource-type-filters/resource-type-filters";
 import { getProcess } from "store/processes/process";
 import { ResourcesState } from "store/resources/resources";
-import { toggleOne } from "store/multiselect/multiselect-actions";
+import { toggleOne, deselectAllOthers } from "store/multiselect/multiselect-actions";
 
 type CssRules = "toolbar" | "button" | "root";
 
@@ -145,6 +145,7 @@ export const AllProcessesPanel = withStyles(styles)(
 
             handleRowClick = (uuid: string) => {
                 this.props.dispatch<any>(toggleOne(uuid))
+                this.props.dispatch<any>(deselectAllOthers(uuid))
                 this.props.dispatch<any>(loadDetailsPanel(uuid));
             };
 
index 33acad50c6cab50c457c699cd38806618e264253..86c85b5c97f4c909b0c7c8464f1a3dcc0af9180a 100644 (file)
@@ -4,23 +4,21 @@
 
 import React from 'react';
 import { connect } from 'react-redux';
-import { Grid, Button, Typography, StyleRulesCallback, WithStyles, withStyles } from "@material-ui/core";
+import { Grid, Button, StyleRulesCallback, WithStyles, withStyles } from "@material-ui/core";
 import { DataExplorer } from "views-components/data-explorer/data-explorer";
 import { DataColumns } from 'components/data-table/data-table';
 import { SortDirection } from 'components/data-table/data-column';
-import { ResourceUuid } from 'views-components/data-explorer/renderers';
+import { GroupMembersCount, ResourceUuid } from 'views-components/data-explorer/renderers';
 import { AddIcon } from 'components/icon/icon';
 import { ResourceName } from 'views-components/data-explorer/renderers';
 import { createTree } from 'models/tree';
 import { GROUPS_PANEL_ID, openCreateGroupDialog } from 'store/groups-panel/groups-panel-actions';
 import { noop } from 'lodash/fp';
 import { ContextMenuKind } from 'views-components/context-menu/context-menu';
-import { getResource, ResourcesState, filterResources } from 'store/resources/resources';
+import { getResource, ResourcesState } from 'store/resources/resources';
 import { GroupResource } from 'models/group';
 import { RootState } from 'store/store';
 import { openContextMenu } from 'store/context-menu/context-menu-actions';
-import { ResourceKind } from 'models/resource';
-import { LinkClass, LinkResource } from 'models/link';
 import { ArvadosTheme } from 'common/custom-theme';
 
 type CssRules = "root";
@@ -122,20 +120,3 @@ export const GroupsPanel = withStyles(styles)(connect(
             }
         }
     }));
-
-
-const GroupMembersCount = connect(
-    (state: RootState, props: { uuid: string }) => {
-
-        const permissions = filterResources((resource: LinkResource) =>
-            resource.kind === ResourceKind.LINK &&
-            resource.linkClass === LinkClass.PERMISSION &&
-            resource.headUuid === props.uuid
-        )(state.resources);
-
-        return {
-            children: permissions.length,
-        };
-
-    }
-)((props: {children: number}) => (<Typography children={props.children} />));
diff --git a/services/workbench2/src/views/process-panel/process-io-card.test.tsx b/services/workbench2/src/views/process-panel/process-io-card.test.tsx
new file mode 100644 (file)
index 0000000..292f6cc
--- /dev/null
@@ -0,0 +1,238 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import React from 'react';
+import { mount, configure } from 'enzyme';
+import { combineReducers, createStore } from "redux";
+import { CircularProgress, MuiThemeProvider, Tab, TableBody } from "@material-ui/core";
+import { CustomTheme } from 'common/custom-theme';
+import Adapter from "enzyme-adapter-react-16";
+import { Provider } from 'react-redux';
+import { ProcessIOCard, ProcessIOCardType } from './process-io-card';
+import { DefaultView } from "components/default-view/default-view";
+import { DefaultCodeSnippet } from "components/default-code-snippet/default-code-snippet";
+import { ProcessOutputCollectionFiles } from './process-output-collection-files';
+import { MemoryRouter } from 'react-router-dom';
+
+
+jest.mock('views/process-panel/process-output-collection-files');
+configure({ adapter: new Adapter() });
+
+describe('renderers', () => {
+    let store;
+
+    describe('ProcessStatus', () => {
+
+        beforeEach(() => {
+            store = createStore(combineReducers({
+                auth: (state: any = {}, action: any) => state,
+            }));
+        });
+
+        it('shows main process input loading when raw or params null', () => {
+            // when
+            let panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.INPUT}
+                            process={false} // Treat as a main process, no requestingContainerUuid
+                            params={null}
+                            raw={{}}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(Tab).exists()).toBeFalsy();
+            expect(panel.find(CircularProgress));
+
+            // when
+            panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.INPUT}
+                            process={false} // Treat as a main process, no requestingContainerUuid
+                            params={[]}
+                            raw={null}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(Tab).exists()).toBeFalsy();
+            expect(panel.find(CircularProgress));
+        });
+
+        it('shows main process empty params and raw', () => {
+            // when
+            let panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.INPUT}
+                            process={false} // Treat as a main process, no requestingContainerUuid
+                            params={[]}
+                            raw={{}}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(CircularProgress).exists()).toBeFalsy();
+            expect(panel.find(Tab).exists()).toBeFalsy();
+            expect(panel.find(DefaultView).text()).toEqual('No parameters found');
+        });
+
+        it('shows main process with raw', () => {
+            // when
+            const raw = {some: 'data'};
+            let panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.INPUT}
+                            process={false} // Treat as a main process, no requestingContainerUuid
+                            params={[]}
+                            raw={raw}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(CircularProgress).exists()).toBeFalsy();
+            expect(panel.find(Tab).length).toBe(1);
+            expect(panel.find(DefaultCodeSnippet).text()).toContain(JSON.stringify(raw, null, 2));
+        });
+
+        it('shows main process with params', () => {
+            // when
+            const parameters = [{id: 'someId', label: 'someLabel', value: [{display: 'someValue'}]}];
+            let panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.INPUT}
+                            process={false} // Treat as a main process, no requestingContainerUuid
+                            params={parameters}
+                            raw={{}}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(CircularProgress).exists()).toBeFalsy();
+            expect(panel.find(Tab).length).toBe(2); // Empty raw is shown if parameters are present
+            expect(panel.find(TableBody).text()).toContain('someId');
+            expect(panel.find(TableBody).text()).toContain('someLabel');
+            expect(panel.find(TableBody).text()).toContain('someValue');
+        });
+
+        // Subprocess
+
+        it('shows subprocess loading', () => {
+            // when
+            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};
+            let panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.INPUT}
+                            process={subprocess} // Treat as a subprocess without outputUuid
+                            params={null}
+                            raw={null}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(Tab).exists()).toBeFalsy();
+            expect(panel.find(CircularProgress));
+        });
+
+        it('shows subprocess mounts', () => {
+            // when
+            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};
+            const sampleMount = {path: '/', pdh: 'abcdef12abcdef12abcdef12abcdef12+0'};
+            let panel = mount(
+                <Provider store={store}>
+                    <MemoryRouter>
+                        <MuiThemeProvider theme={CustomTheme}>
+                            <ProcessIOCard
+                                label={ProcessIOCardType.INPUT}
+                                process={subprocess} // Treat as a subprocess without outputUuid
+                                params={null}
+                                raw={null}
+                                mounts={[sampleMount]}
+                            />
+                        </MuiThemeProvider>
+                    </MemoryRouter>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(CircularProgress).exists()).toBeFalsy();
+            expect(panel.find(Tab).length).toBe(1); // Empty raw is hidden in subprocesses
+            expect(panel.find(TableBody).text()).toContain(sampleMount.pdh);
+
+        });
+
+        it('shows subprocess output collection', () => {
+            // when
+            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};
+            const outputCollection = '123456789';
+            let panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.OUTPUT}
+                            process={subprocess} // Treat as a subprocess with outputUuid
+                            outputUuid={outputCollection}
+                            params={null}
+                            raw={null}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(CircularProgress).exists()).toBeFalsy();
+            expect(panel.find(Tab).length).toBe(1); // Unloaded raw is hidden in subprocesses
+            expect(panel.find(ProcessOutputCollectionFiles).prop('currentItemUuid')).toBe(outputCollection);
+        });
+
+        it('shows empty subprocess raw', () => {
+            // when
+            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};
+            const outputCollection = '123456789';
+            let panel = mount(
+                <Provider store={store}>
+                    <MuiThemeProvider theme={CustomTheme}>
+                        <ProcessIOCard
+                            label={ProcessIOCardType.OUTPUT}
+                            process={subprocess} // Treat as a subprocess with outputUuid
+                            outputUuid={outputCollection}
+                            params={null}
+                            raw={{}}
+                        />
+                    </MuiThemeProvider>
+                </Provider>
+                );
+
+            // then
+            expect(panel.find(CircularProgress).exists()).toBeFalsy();
+            expect(panel.find(Tab).length).toBe(2); // Empty raw is visible in subprocesses
+            expect(panel.find(Tab).first().text()).toBe('Collection');
+            expect(panel.find(ProcessOutputCollectionFiles).prop('currentItemUuid')).toBe(outputCollection);
+        });
+
+    });
+});
index b5afbf6545ed19f2eb84156f02534c3fa09ab3f8..da4d150a299fe9d9193fa9b0ffa6f42cf14863f1 100644 (file)
@@ -89,7 +89,8 @@ type CssRules =
     | "symmetricTabs"
     | "imagePlaceholder"
     | "rowWithPreview"
-    | "labelColumn";
+    | "labelColumn"
+    | "primaryRow";
 
 const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     card: {
@@ -123,7 +124,7 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     },
     tableWrapper: {
         height: "auto",
-        maxHeight: `calc(100% - ${theme.spacing.unit * 4.5}px)`,
+        maxHeight: `calc(100% - ${theme.spacing.unit * 3}px)`,
         overflow: "auto",
     },
     tableRoot: {
@@ -173,10 +174,10 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
         paddingLeft: "20px",
     },
     secondaryRow: {
-        height: "29px",
+        height: "24px",
         verticalAlign: "top",
         position: "relative",
-        top: "-9px",
+        top: "-4px",
     },
     emptyValue: {
         color: theme.customs.colors.grey700,
@@ -184,7 +185,10 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     noBorderRow: {
         "& td": {
             borderBottom: "none",
+            paddingTop: "2px",
+            paddingBottom: "2px",
         },
+        height: "24px",
     },
     symmetricTabs: {
         "& button": {
@@ -206,11 +210,18 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     labelColumn: {
         minWidth: "120px",
     },
+    primaryRow: {
+        height: "24px",
+        "& td": {
+            paddingTop: "2px",
+            paddingBottom: "2px",
+        },
+    },
 });
 
 export enum ProcessIOCardType {
-    INPUT = "Inputs",
-    OUTPUT = "Outputs",
+    INPUT = "Input Parameters",
+    OUTPUT = "Output Parameters",
 }
 export interface ProcessIOCardDataProps {
     process?: Process;
@@ -219,7 +230,7 @@ export interface ProcessIOCardDataProps {
     raw: any;
     mounts?: InputCollectionMount[];
     outputUuid?: string;
-    showParams?: boolean;
+    forceShowParams?: boolean;
 }
 
 export interface ProcessIOCardActionProps {
@@ -251,7 +262,7 @@ export const ProcessIOCard = withStyles(styles)(
             panelName,
             process,
             navigateTo,
-            showParams,
+            forceShowParams,
         }: ProcessIOCardProps) => {
             const [mainProcTabState, setMainProcTabState] = useState(0);
             const [subProcTabState, setSubProcTabState] = useState(0);
@@ -266,14 +277,20 @@ export const ProcessIOCard = withStyles(styles)(
 
             const PanelIcon = label === ProcessIOCardType.INPUT ? InputIcon : OutputIcon;
             const mainProcess = !(process && process!.containerRequest.requestingContainerUuid);
+            const showParamTable = mainProcess || forceShowParams;
 
             const loading = raw === null || raw === undefined || params === null;
+
             const hasRaw = !!(raw && Object.keys(raw).length > 0);
             const hasParams = !!(params && params.length > 0);
+            // isRawLoaded allows subprocess panel to display raw even if it's {}
+            const isRawLoaded = !!(raw && Object.keys(raw).length >= 0);
 
             // Subprocess
             const hasInputMounts = !!(label === ProcessIOCardType.INPUT && mounts && mounts.length);
             const hasOutputCollecton = !!(label === ProcessIOCardType.OUTPUT && outputUuid);
+            // Subprocess should not show loading if hasOutputCollection or hasInputMounts
+            const subProcessLoading = loading && !hasOutputCollecton && !hasInputMounts;
 
             return (
                 <Card
@@ -350,7 +367,7 @@ export const ProcessIOCard = withStyles(styles)(
                         }
                     />
                     <CardContent className={classes.content}>
-                        {mainProcess || showParams ? (
+                        {showParamTable ? (
                             <>
                                 {/* raw is undefined until params are loaded */}
                                 {loading && (
@@ -364,9 +381,9 @@ export const ProcessIOCard = withStyles(styles)(
                                     </Grid>
                                 )}
                                 {/* Once loaded, either raw or params may still be empty
-                                 *   Raw when all params are empty
-                                 *   Params when raw is provided by containerRequest properties but workflow mount is absent for preview
-                                 */}
+                                 *   Raw when all params are empty
+                                 *   Params when raw is provided by containerRequest properties but workflow mount is absent for preview
+                                 */}
                                 {!loading && (hasRaw || hasParams) && (
                                     <>
                                         <Tabs
@@ -377,14 +394,15 @@ export const ProcessIOCard = withStyles(styles)(
                                         >
                                             {/* params will be empty on processes without workflow definitions in mounts, so we only show raw */}
                                             {hasParams && <Tab label="Parameters" />}
-                                            {!showParams && <Tab label="JSON" />}
+                                            {!forceShowParams && <Tab label="JSON" />}
+                                            {hasOutputCollecton && <Tab label="Collection" />}
                                         </Tabs>
                                         {mainProcTabState === 0 && params && hasParams && (
                                             <div className={classes.tableWrapper}>
                                                 <ProcessIOPreview
                                                     data={params}
                                                     showImagePreview={showImagePreview}
-                                                    valueLabel={showParams ? "Default value" : "Value"}
+                                                    valueLabel={forceShowParams ? "Default value" : "Value"}
                                                 />
                                             </div>
                                         )}
@@ -393,6 +411,28 @@ export const ProcessIOCard = withStyles(styles)(
                                                 <ProcessIORaw data={raw} />
                                             </div>
                                         )}
+                                        {mainProcTabState === 2 && hasOutputCollecton && (
+                                            <>
+                                                {outputUuid && (
+                                                    <Typography className={classes.collectionLink}>
+                                                        Output Collection:{" "}
+                                                        <MuiLink
+                                                            className={classes.keepLink}
+                                                            onClick={() => {
+                                                                navigateTo(outputUuid || "");
+                                                            }}
+                                                        >
+                                                            {outputUuid}
+                                                        </MuiLink>
+                                                    </Typography>
+                                                )}
+                                                <ProcessOutputCollectionFiles
+                                                    isWritable={false}
+                                                    currentItemUuid={outputUuid}
+                                                />
+                                            </>
+                                        )}
+
                                     </>
                                 )}
                                 {!loading && !hasRaw && !hasParams && (
@@ -409,7 +449,7 @@ export const ProcessIOCard = withStyles(styles)(
                         ) : (
                             // Subprocess
                             <>
-                                {loading && (
+                                {subProcessLoading ? (
                                     <Grid
                                         container
                                         item
@@ -418,8 +458,7 @@ export const ProcessIOCard = withStyles(styles)(
                                     >
                                         <CircularProgress />
                                     </Grid>
-                                )}
-                                {!loading && (hasInputMounts || hasOutputCollecton || hasRaw) ? (
+                                ) : !subProcessLoading && (hasInputMounts || hasOutputCollecton || isRawLoaded) ? (
                                     <>
                                         <Tabs
                                             value={subProcTabState}
@@ -429,7 +468,7 @@ export const ProcessIOCard = withStyles(styles)(
                                         >
                                             {hasInputMounts && <Tab label="Collections" />}
                                             {hasOutputCollecton && <Tab label="Collection" />}
-                                            <Tab label="JSON" />
+                                            {isRawLoaded && <Tab label="JSON" />}
                                         </Tabs>
                                         <div className={classes.tableWrapper}>
                                             {subProcTabState === 0 && hasInputMounts && <ProcessInputMounts mounts={mounts || []} />}
@@ -454,7 +493,7 @@ export const ProcessIOCard = withStyles(styles)(
                                                     />
                                                 </>
                                             )}
-                                            {(subProcTabState === 1 || (!hasInputMounts && !hasOutputCollecton)) && (
+                                            {isRawLoaded && (subProcTabState === 1 || (!hasInputMounts && !hasOutputCollecton)) && (
                                                 <div className={classes.tableWrapper}>
                                                     <ProcessIORaw data={raw} />
                                                 </div>
@@ -523,6 +562,7 @@ const ProcessIOPreview = memo(
                         const rest = param.value.slice(1);
                         const mainRowClasses = {
                             [classes.noBorderRow]: rest.length > 0,
+                            [classes.primaryRow]: true
                         };
 
                         return (
@@ -549,6 +589,7 @@ const ProcessIOPreview = memo(
                                     const rowClasses = {
                                         [classes.noBorderRow]: i < rest.length - 1,
                                         [classes.secondaryRow]: val.secondary,
+                                        [classes.primaryRow]: !val.secondary,
                                     };
                                     return (
                                         <TableRow
index 50d343d6223c31f0c4a5998298d415ccaaa688bd..091078c4521c0615071ebf217c7baa629975aa49 100644 (file)
@@ -20,7 +20,7 @@ import classNames from 'classnames';
 import { FederationConfig, getNavUrl } from 'routes/routes';
 import { RootState } from 'store/store';
 
-type CssRules = 'root' | 'wordWrap' | 'logText';
+type CssRules = 'root' | 'wordWrapOn' | 'wordWrapOff' | 'logText';
 
 const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     root: {
@@ -35,8 +35,11 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     logText: {
         padding: `0 ${theme.spacing.unit*0.5}px`,
     },
-    wordWrap: {
-        whiteSpace: 'pre-wrap',
+    wordWrapOn: {
+        overflowWrap: 'anywhere',
+    },
+    wordWrapOff: {
+        whiteSpace: 'nowrap',
     },
 });
 
@@ -119,8 +122,8 @@ export const ProcessLogCodeSnippet = withStyles(styles)(connect(mapStateToProps)
                     }
                 }}>
                 { lines.map((line: string, index: number) =>
-                <Typography key={index} component="pre"
-                    className={classNames(classes.logText, wordWrap ? classes.wordWrap : undefined)}>
+                <Typography key={index} component="span"
+                    className={classNames(classes.logText, wordWrap ? classes.wordWrapOn : classes.wordWrapOff)}>
                     {renderLinks(fontSize, auth, dispatch)(line)}
                 </Typography>
                 ) }
index c972c0a6cf9ebf130463c72b39ee69b750970945..2a9b3882e86bec1592764ad46f837938c5eb3aa5 100644 (file)
@@ -24,6 +24,7 @@ import { ProcessCmdCard } from "./process-cmd-card";
 import { ContainerRequestResource } from "models/container-request";
 import { OutputDetails, NodeInstanceType } from "store/process-panel/process-panel";
 import { NotFoundView } from 'views/not-found-panel/not-found-panel';
+import { CollectionFile } from 'models/collection-file';
 
 type CssRules = "root";
 
@@ -41,10 +42,11 @@ export interface ProcessPanelRootDataProps {
     auth: AuthState;
     inputRaw: WorkflowInputsData | null;
     inputParams: ProcessIOParameter[] | null;
-    outputRaw: OutputDetails | null;
+    outputData: OutputDetails | null;
     outputDefinitions: CommandOutputParameter[];
     outputParams: ProcessIOParameter[] | null;
     nodeInfo: NodeInstanceType | null;
+    usageReport: string | null;
 }
 
 export interface ProcessPanelRootActionProps {
@@ -68,12 +70,12 @@ export type ProcessPanelRootProps = ProcessPanelRootDataProps & ProcessPanelRoot
 
 const panelsData: MPVPanelState[] = [
     { name: "Details" },
-    { name: "Command" },
     { name: "Logs", visible: true },
-    { name: "Inputs" },
+    { name: "Subprocesses" },
     { name: "Outputs" },
+    { name: "Inputs" },
+    { name: "Command" },
     { name: "Resources" },
-    { name: "Subprocesses" },
 ];
 
 export const ProcessPanelRoot = withStyles(styles)(
@@ -83,10 +85,11 @@ export const ProcessPanelRoot = withStyles(styles)(
         processLogsPanel,
         inputRaw,
         inputParams,
-        outputRaw,
+        outputData,
         outputDefinitions,
         outputParams,
         nodeInfo,
+        usageReport,
         loadInputs,
         loadOutputs,
         loadNodeJson,
@@ -112,10 +115,12 @@ export const ProcessPanelRoot = withStyles(styles)(
             }
         }, [containerRequest, loadInputs, loadOutputs, loadOutputDefinitions, loadNodeJson]);
 
+        const maxHeight = "100%";
+
         // Trigger processing output params when raw or definitions change
         React.useEffect(() => {
             updateOutputParams();
-        }, [outputRaw, outputDefinitions, updateOutputParams]);
+        }, [outputData, outputDefinitions, updateOutputParams]);
 
         return process ? (
             <MPVContainer
@@ -137,19 +142,11 @@ export const ProcessPanelRoot = withStyles(styles)(
                         resumeOnHoldWorkflow={props.resumeOnHoldWorkflow}
                     />
                 </MPVPanelContent>
-                <MPVPanelContent
-                    forwardProps
-                    xs="auto"
-                    data-cy="process-cmd">
-                    <ProcessCmdCard
-                        onCopy={props.onCopyToClipboard}
-                        process={process}
-                    />
-                </MPVPanelContent>
                 <MPVPanelContent
                     forwardProps
                     xs
-                    minHeight="50%"
+                    minHeight={maxHeight}
+                    maxHeight={maxHeight}
                     data-cy="process-logs">
                     <ProcessLogsCard
                         onCopy={props.onCopyToClipboard}
@@ -168,7 +165,27 @@ export const ProcessPanelRoot = withStyles(styles)(
                 <MPVPanelContent
                     forwardProps
                     xs
-                    maxHeight="50%"
+                    maxHeight={maxHeight}
+                    data-cy="process-children">
+                    <SubprocessPanel process={process} />
+                </MPVPanelContent>
+                <MPVPanelContent
+                    forwardProps
+                    xs
+                    maxHeight={maxHeight}
+                    data-cy="process-outputs">
+                    <ProcessIOCard
+                        label={ProcessIOCardType.OUTPUT}
+                        process={process}
+                        params={outputParams}
+                        raw={outputData?.raw}
+                        outputUuid={outputUuid || ""}
+                    />
+                </MPVPanelContent>
+                <MPVPanelContent
+                    forwardProps
+                    xs
+                    maxHeight={maxHeight}
                     data-cy="process-inputs">
                     <ProcessIOCard
                         label={ProcessIOCardType.INPUT}
@@ -180,15 +197,11 @@ export const ProcessPanelRoot = withStyles(styles)(
                 </MPVPanelContent>
                 <MPVPanelContent
                     forwardProps
-                    xs
-                    maxHeight="50%"
-                    data-cy="process-outputs">
-                    <ProcessIOCard
-                        label={ProcessIOCardType.OUTPUT}
+                    xs="auto"
+                    data-cy="process-cmd">
+                    <ProcessCmdCard
+                        onCopy={props.onCopyToClipboard}
                         process={process}
-                        params={outputParams}
-                        raw={outputRaw?.rawOutputs}
-                        outputUuid={outputUuid || ""}
                     />
                 </MPVPanelContent>
                 <MPVPanelContent
@@ -198,15 +211,9 @@ export const ProcessPanelRoot = withStyles(styles)(
                     <ProcessResourceCard
                         process={process}
                         nodeInfo={nodeInfo}
+                        usageReport={usageReport}
                     />
                 </MPVPanelContent>
-                <MPVPanelContent
-                    forwardProps
-                    xs
-                    maxHeight="50%"
-                    data-cy="process-children">
-                    <SubprocessPanel process={process} />
-                </MPVPanelContent>
             </MPVContainer>
         ) : (
             <NotFoundView
index 4a6b5fd33344600e1a5e6af1d71e4ecbd09b0a29..f305290cc0e83b27cb53f0bc3f65caa1349a5fc9 100644 (file)
@@ -21,22 +21,29 @@ import {
 import { cancelRunningWorkflow, resumeOnHoldWorkflow, startWorkflow } from "store/processes/processes-actions";
 import { navigateToLogCollection, pollProcessLogs, setProcessLogsPanelFilter } from "store/process-logs-panel/process-logs-panel-actions";
 import { snackbarActions, SnackbarKind } from "store/snackbar/snackbar-actions";
+import { getInlineFileUrl } from "views-components/context-menu/actions/helpers";
 
 const mapStateToProps = ({ router, auth, resources, processPanel, processLogsPanel }: RootState): ProcessPanelRootDataProps => {
     const uuid = getProcessPanelCurrentUuid(router) || "";
     const subprocesses = getSubprocesses(uuid)(resources);
+    const process = getProcess(uuid)(resources);
     return {
-        process: getProcess(uuid)(resources),
+        process,
         subprocesses: subprocesses.filter(subprocess => processPanel.filters[getProcessStatus(subprocess)]),
         filters: getFilters(processPanel, subprocesses),
         processLogsPanel: processLogsPanel,
         auth: auth,
         inputRaw: processPanel.inputRaw,
         inputParams: processPanel.inputParams,
-        outputRaw: processPanel.outputRaw,
+        outputData: processPanel.outputData,
         outputDefinitions: processPanel.outputDefinitions,
         outputParams: processPanel.outputParams,
         nodeInfo: processPanel.nodeInfo,
+        usageReport: (process || null) && processPanel.usageReport && getInlineFileUrl(
+            `${auth.config.keepWebServiceUrl}${processPanel.usageReport.url}?api_token=${auth.apiToken}`,
+            auth.config.keepWebServiceUrl,
+            auth.config.keepWebInlineServiceUrl
+        ),
     };
 };
 
index 4e849173fb3f5b17655a8de0e542b1e692255337..d1492ddbf5912238e76d1fbc5b8aeeb181d7d1fe 100644 (file)
@@ -14,6 +14,8 @@ import {
     Tooltip,
     Typography,
     Grid,
+    Link,
+    Button
 } from '@material-ui/core';
 import { ArvadosTheme } from 'common/custom-theme';
 import {
@@ -21,6 +23,7 @@ import {
     MaximizeIcon,
     ResourceIcon,
     UnMaximizeIcon,
+    ShowChartIcon,
 } from 'components/icon/icon';
 import { MPVPanelProps } from 'components/multi-panel-view/multi-panel-view';
 import { connect } from 'react-redux';
@@ -33,9 +36,10 @@ import { MountKind } from 'models/mount-types';
 interface ProcessResourceCardDataProps {
     process: Process;
     nodeInfo: NodeInstanceType | null;
+    usageReport: string | null;
 }
 
-type CssRules = "card" | "header" | "title" | "avatar" | "iconHeader" | "content" | "sectionH3";
+type CssRules = "card" | "header" | "title" | "avatar" | "iconHeader" | "content" | "sectionH3" | "reportButton";
 
 const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     card: {
@@ -64,13 +68,15 @@ const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
         color: theme.customs.colors.greyD,
         fontSize: "0.8125rem",
         textTransform: "uppercase",
+    },
+    reportButton: {
     }
 });
 
 type ProcessResourceCardProps = ProcessResourceCardDataProps & WithStyles<CssRules> & MPVPanelProps;
 
 export const ProcessResourceCard = withStyles(styles)(connect()(
-    ({ classes, nodeInfo, doHidePanel, doMaximizePanel, doUnMaximizePanel, panelMaximized, panelName, process, }: ProcessResourceCardProps) => {
+    ({ classes, nodeInfo, usageReport, doHidePanel, doMaximizePanel, doUnMaximizePanel, panelMaximized, panelName, process, }: ProcessResourceCardProps) => {
         let diskRequest = 0;
         if (process.container?.mounts) {
             for (const mnt in process.container.mounts) {
@@ -96,6 +102,7 @@ export const ProcessResourceCard = withStyles(styles)(connect()(
                 }
                 action={
                     <div>
+                        {usageReport && <Link href={usageReport} className={classes.reportButton} target="_blank"><ShowChartIcon /> Resource usage report</Link>}
                         {doUnMaximizePanel && panelMaximized &&
                             <Tooltip title={`Unmaximize ${panelName || 'panel'}`} disableFocusListener>
                                 <IconButton onClick={doUnMaximizePanel}><UnMaximizeIcon /></IconButton>
index 83b6c8ba47472b2fa230ebf6afe8d5e8af1b61a5..2ddfca8178577e8402041bfbb3807968ea7a91af 100644 (file)
@@ -51,10 +51,9 @@ import { GroupClass, GroupResource } from 'models/group';
 import { CollectionResource } from 'models/collection';
 import { resourceIsFrozen } from 'common/frozen-resources';
 import { ProjectResource } from 'models/project';
-import { NotFoundView } from 'views/not-found-panel/not-found-panel';
 import { deselectAllOthers, toggleOne } from 'store/multiselect/multiselect-actions';
 
-type CssRules = 'root' | 'button';
+type CssRules = 'root' | 'button' ;
 
 const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
     root: {
@@ -244,6 +243,7 @@ interface ProjectPanelDataProps {
     isAdmin: boolean;
     userUuid: string;
     dataExplorerItems: any;
+    working: boolean;
 }
 
 type ProjectPanelProps = ProjectPanelDataProps & DispatchProp & WithStyles<CssRules> & RouteComponentProps<{ id: string }>;
@@ -262,26 +262,20 @@ const mapStateToProps = (state: RootState) => {
 export const ProjectPanel = withStyles(styles)(
     connect(mapStateToProps)(
         class extends React.Component<ProjectPanelProps> {
+
             render() {
                 const { classes } = this.props;
-
-                return this.props.project ?
-                    <div data-cy='project-panel' className={classes.root}>
-                        <DataExplorer
-                            id={PROJECT_PANEL_ID}
-                            onRowClick={this.handleRowClick}
-                            onRowDoubleClick={this.handleRowDoubleClick}
-                            onContextMenu={this.handleContextMenu}
-                            contextMenuColumn={true}
-                            defaultViewIcon={ProjectIcon}
-                            defaultViewMessages={DEFAULT_VIEW_MESSAGES}
-                        />
-                    </div>
-                    :
-                    <NotFoundView
-                        icon={ProjectIcon}
-                        messages={["Project not found"]}
+                return <div data-cy='project-panel' className={classes.root}>
+                    <DataExplorer
+                        id={PROJECT_PANEL_ID}
+                        onRowClick={this.handleRowClick}
+                        onRowDoubleClick={this.handleRowDoubleClick}
+                        onContextMenu={this.handleContextMenu}
+                        contextMenuColumn={true}
+                        defaultViewIcon={ProjectIcon}
+                        defaultViewMessages={DEFAULT_VIEW_MESSAGES}
                     />
+                </div>
             }
 
             isCurrentItemChild = (resource: Resource) => {
index d9b9002e3ea1a33d2d5b8f668e0108138d7e59c9..e9693b50e5917ddfb8542ec26e42fc7cbb004c21 100644 (file)
@@ -21,7 +21,7 @@ import {
 } from 'views-components/data-explorer/renderers';
 import servicesProvider from 'common/service-provider';
 import { createTree } from 'models/tree';
-import { getInitialResourceTypeFilters } from 'store/resource-type-filters/resource-type-filters';
+import { getInitialSearchTypeFilters } from 'store/resource-type-filters/resource-type-filters';
 import { SearchResultsPanelProps } from "./search-results-panel";
 import { Routes } from 'routes/routes';
 import { Link } from 'react-router-dom';
@@ -69,7 +69,7 @@ export const searchResultsPanelColumns: DataColumns<string, GroupContentsResourc
         name: SearchResultsPanelColumnNames.NAME,
         selected: true,
         configurable: true,
-        sort: {direction: SortDirection.NONE, field: "name"},
+        sort: { direction: SortDirection.NONE, field: "name" },
         filters: createTree(),
         render: (uuid: string) => <ResourceName uuid={uuid} />
     },
@@ -84,7 +84,7 @@ export const searchResultsPanelColumns: DataColumns<string, GroupContentsResourc
         name: SearchResultsPanelColumnNames.TYPE,
         selected: true,
         configurable: true,
-        filters: getInitialResourceTypeFilters(),
+        filters: getInitialSearchTypeFilters(),
         render: (uuid: string) => <ResourceType uuid={uuid} />,
     },
     {
@@ -105,7 +105,7 @@ export const searchResultsPanelColumns: DataColumns<string, GroupContentsResourc
         name: SearchResultsPanelColumnNames.LAST_MODIFIED,
         selected: true,
         configurable: true,
-        sort: {direction: SortDirection.DESC, field: "modifiedAt"},
+        sort: { direction: SortDirection.DESC, field: "modifiedAt" },
         filters: createTree(),
         render: uuid => <ResourceLastModifiedDate uuid={uuid} />
     }
@@ -130,8 +130,8 @@ export const SearchResultsPanelView = withStyles(styles, { withTheme: true })(
                         const clusterId = searchUuid.split('-')[0];
                         const serviceType = camelCase(itemKind?.replace('arvados#', ''));
                         const service = Object.values(servicesProvider.getServices())
-                            .filter(({resourceType}) => !!resourceType)
-                            .find(({resourceType}) => camelCase(resourceType).indexOf(serviceType) > -1);
+                            .filter(({ resourceType }) => !!resourceType)
+                            .find(({ resourceType }) => camelCase(resourceType).indexOf(serviceType) > -1);
                         const sessions = getSearchSessions(clusterId, props.sessions);
 
                         if (sessions.length > 0) {
@@ -150,35 +150,35 @@ export const SearchResultsPanelView = withStyles(styles, { withTheme: true })(
                 }
             })();
 
-        // eslint-disable-next-line react-hooks/exhaustive-deps
+            // eslint-disable-next-line react-hooks/exhaustive-deps
         }, [selectedItem]);
 
         const onItemClick = useCallback((uuid) => {
             setSelectedItem(uuid);
             props.onItemClick(uuid);
-        // eslint-disable-next-line react-hooks/exhaustive-deps
-        },[props.onItemClick]);
+            // eslint-disable-next-line react-hooks/exhaustive-deps
+        }, [props.onItemClick]);
 
         return <span data-cy='search-results' className={props.classes.searchResults}>
             <DataExplorer
-            id={SEARCH_RESULTS_PANEL_ID}
-            onRowClick={onItemClick}
-            onRowDoubleClick={props.onItemDoubleClick}
-            onContextMenu={props.onContextMenu}
-            contextMenuColumn={false}
-            elementPath={`/ ${itemPath.reverse().join(' / ')}`}
-            hideSearchInput
-            title={
-                <div>
-                    {loggedIn.length === 1 ?
-                        <span>Searching local cluster <ResourceCluster uuid={props.localCluster} /></span>
-                        : <span>Searching clusters: {loggedIn.map((ss) => <span key={ss.clusterId}>
-                            <a href={props.remoteHostsConfig[ss.clusterId] && props.remoteHostsConfig[ss.clusterId].workbench2Url} style={{ textDecoration: 'none' }}> <ResourceCluster uuid={ss.clusterId} /></a>
-                        </span>)}</span>}
-                    {loggedIn.length === 1 && props.localCluster !== homeCluster ?
-                        <span>To search multiple clusters, <a href={props.remoteHostsConfig[homeCluster] && props.remoteHostsConfig[homeCluster].workbench2Url}> start from your home Workbench.</a></span>
-                        : <span style={{ marginLeft: "2em" }}>Use <Link to={Routes.SITE_MANAGER} >Site Manager</Link> to manage which clusters will be searched.</span>}
-                </div >
-            }
-        /></span>;
+                id={SEARCH_RESULTS_PANEL_ID}
+                onRowClick={onItemClick}
+                onRowDoubleClick={props.onItemDoubleClick}
+                onContextMenu={props.onContextMenu}
+                contextMenuColumn={false}
+                elementPath={`/ ${itemPath.reverse().join(' / ')}`}
+                hideSearchInput
+                title={
+                    <div>
+                        {loggedIn.length === 1 ?
+                            <span>Searching local cluster <ResourceCluster uuid={props.localCluster} /></span>
+                            : <span>Searching clusters: {loggedIn.map((ss) => <span key={ss.clusterId}>
+                                <a href={props.remoteHostsConfig[ss.clusterId] && props.remoteHostsConfig[ss.clusterId].workbench2Url} style={{ textDecoration: 'none' }}> <ResourceCluster uuid={ss.clusterId} /></a>
+                            </span>)}</span>}
+                        {loggedIn.length === 1 && props.localCluster !== homeCluster ?
+                            <span>To search multiple clusters, <a href={props.remoteHostsConfig[homeCluster] && props.remoteHostsConfig[homeCluster].workbench2Url}> start from your home Workbench.</a></span>
+                            : <span style={{ marginLeft: "2em" }}>Use <Link to={Routes.SITE_MANAGER} >Site Manager</Link> to manage which clusters will be searched.</span>}
+                    </div >
+                }
+            /></span>;
     });
index 50192e543dbe7f3cf7a3368a9743cd5a278b394d..aa4c1b29d430538d4fdab7fcf574e1e14030f74e 100644 (file)
@@ -27,6 +27,7 @@ import { openContextMenu, resourceUuidToContextMenuKind } from 'store/context-me
 import { MPVContainer, MPVPanelContent, MPVPanelState } from 'components/multi-panel-view/multi-panel-view';
 import { ProcessIOCard, ProcessIOCardType } from 'views/process-panel/process-io-card';
 import { NotFoundView } from 'views/not-found-panel/not-found-panel';
+import { WorkflowProcessesPanel } from './workflow-processes-panel';
 
 type CssRules = 'root'
     | 'button'
@@ -135,9 +136,10 @@ export const RegisteredWorkflowPanel = withStyles(styles)(connect(
                 const { classes, item, inputParams, outputParams, workflowCollection } = this.props;
                 const panelsData: MPVPanelState[] = [
                     { name: "Details" },
-                    { name: "Inputs" },
+                    { name: "Runs" },
                     { name: "Outputs" },
-                    { name: "Files" },
+                    { name: "Inputs" },
+                    { name: "Definition" },
                 ];
                 return item
                     ? <MPVContainer className={classes.root} spacing={8} direction="column" justify-content="flex-start" wrap="nowrap" panelStates={panelsData}>
@@ -179,24 +181,28 @@ export const RegisteredWorkflowPanel = withStyles(styles)(connect(
                                 </CardContent>
                             </Card>
                         </MPVPanelContent>
-                        <MPVPanelContent forwardProps xs data-cy="process-inputs">
+                        <MPVPanelContent forwardProps xs maxHeight="100%">
+                            <WorkflowProcessesPanel />
+                        </MPVPanelContent>
+                        <MPVPanelContent forwardProps xs data-cy="process-outputs" maxHeight="100%">
                             <ProcessIOCard
-                                label={ProcessIOCardType.INPUT}
-                                params={inputParams}
+                                label={ProcessIOCardType.OUTPUT}
+                                params={outputParams}
                                 raw={{}}
-                                showParams={true}
+                                forceShowParams={true}
                             />
                         </MPVPanelContent>
-                        <MPVPanelContent forwardProps xs data-cy="process-outputs">
+                        <MPVPanelContent forwardProps xs data-cy="process-inputs" maxHeight="100%">
                             <ProcessIOCard
-                                label={ProcessIOCardType.OUTPUT}
-                                params={outputParams}
+                                label={ProcessIOCardType.INPUT}
+                                params={inputParams}
                                 raw={{}}
-                                showParams={true}
+                                forceShowParams={true}
                             />
                         </MPVPanelContent>
-                        <MPVPanelContent xs>
+                        <MPVPanelContent xs maxHeight="100%">
                             <Card className={classes.filesCard}>
+                                <CardHeader title="Workflow Definition" />
                                 <ProcessOutputCollectionFiles isWritable={false} currentItemUuid={workflowCollection} />
                             </Card>
                         </MPVPanelContent>
diff --git a/services/workbench2/src/views/workflow-panel/workflow-processes-panel-root.tsx b/services/workbench2/src/views/workflow-panel/workflow-processes-panel-root.tsx
new file mode 100644 (file)
index 0000000..64f24a2
--- /dev/null
@@ -0,0 +1,126 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import React from 'react';
+import { DataExplorer } from "views-components/data-explorer/data-explorer";
+import { DataColumns } from 'components/data-table/data-table';
+import { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';
+import { ContainerRequestState } from 'models/container-request';
+import { SortDirection } from 'components/data-table/data-column';
+import { ResourceKind } from 'models/resource';
+import { ResourceCreatedAtDate, ProcessStatus, ContainerRunTime } from 'views-components/data-explorer/renderers';
+import { ProcessIcon } from 'components/icon/icon';
+import { ResourceName } from 'views-components/data-explorer/renderers';
+import { WORKFLOW_PROCESSES_PANEL_ID } from 'store/workflow-panel/workflow-panel-actions';
+import { createTree } from 'models/tree';
+import { getInitialProcessStatusFilters } from 'store/resource-type-filters/resource-type-filters';
+import { ResourcesState } from 'store/resources/resources';
+import { MPVPanelProps } from 'components/multi-panel-view/multi-panel-view';
+import { StyleRulesCallback, Typography, WithStyles, withStyles } from '@material-ui/core';
+import { ArvadosTheme } from 'common/custom-theme';
+import { ProcessResource } from 'models/process';
+
+type CssRules = 'iconHeader' | 'cardHeader';
+
+const styles: StyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({
+    iconHeader: {
+        fontSize: '1.875rem',
+        color: theme.customs.colors.greyL,
+        marginRight: theme.spacing.unit * 2,
+    },
+    cardHeader: {
+        display: 'flex',
+    },
+});
+
+export enum WorkflowProcessesPanelColumnNames {
+    NAME = "Name",
+    STATUS = "Status",
+    CREATED_AT = "Created At",
+    RUNTIME = "Run Time"
+}
+
+export interface WorkflowProcessesPanelFilter extends DataTableFilterItem {
+    type: ResourceKind | ContainerRequestState;
+}
+
+export const workflowProcessesPanelColumns: DataColumns<string, ProcessResource> = [
+    {
+        name: WorkflowProcessesPanelColumnNames.NAME,
+        selected: true,
+        configurable: true,
+        sort: { direction: SortDirection.NONE, field: "name" },
+        filters: createTree(),
+        render: uuid => <ResourceName uuid={uuid} />
+    },
+    {
+        name: WorkflowProcessesPanelColumnNames.STATUS,
+        selected: true,
+        configurable: true,
+        mutuallyExclusiveFilters: true,
+        filters: getInitialProcessStatusFilters(),
+        render: uuid => <ProcessStatus uuid={uuid} />,
+    },
+    {
+        name: WorkflowProcessesPanelColumnNames.CREATED_AT,
+        selected: true,
+        configurable: true,
+        sort: { direction: SortDirection.DESC, field: "createdAt" },
+        filters: createTree(),
+        render: uuid => <ResourceCreatedAtDate uuid={uuid} />
+    },
+    {
+        name: WorkflowProcessesPanelColumnNames.RUNTIME,
+        selected: true,
+        configurable: true,
+        filters: createTree(),
+        render: uuid => <ContainerRunTime uuid={uuid} />
+    }
+];
+
+export interface WorkflowProcessesPanelDataProps {
+    resources: ResourcesState;
+}
+
+export interface WorkflowProcessesPanelActionProps {
+    onItemClick: (item: string) => void;
+    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: string, resources: ResourcesState) => void;
+    onItemDoubleClick: (item: string) => void;
+}
+
+type WorkflowProcessesPanelProps = WorkflowProcessesPanelActionProps & WorkflowProcessesPanelDataProps;
+
+const DEFAULT_VIEW_MESSAGES = [
+    'No processes available for listing.',
+    'The current process may not have any or none matches current filtering.'
+];
+
+type WorkflowProcessesTitleProps = WithStyles<CssRules>;
+
+const WorkflowProcessesTitle = withStyles(styles)(
+    ({ classes }: WorkflowProcessesTitleProps) =>
+        <div className={classes.cardHeader}>
+            <ProcessIcon className={classes.iconHeader} /><span></span>
+            <Typography noWrap variant='h6' color='inherit'>
+                Run History
+            </Typography>
+        </div>
+);
+
+export const WorkflowProcessesPanelRoot = (props: WorkflowProcessesPanelProps & MPVPanelProps) => {
+    return <DataExplorer
+        id={WORKFLOW_PROCESSES_PANEL_ID}
+        onRowClick={props.onItemClick}
+        onRowDoubleClick={props.onItemDoubleClick}
+        onContextMenu={(event, item) => props.onContextMenu(event, item, props.resources)}
+        contextMenuColumn={true}
+        defaultViewIcon={ProcessIcon}
+        defaultViewMessages={DEFAULT_VIEW_MESSAGES}
+        doHidePanel={props.doHidePanel}
+        doMaximizePanel={props.doMaximizePanel}
+        doUnMaximizePanel={props.doUnMaximizePanel}
+        panelMaximized={props.panelMaximized}
+        panelName={props.panelName}
+        title={<WorkflowProcessesTitle />} />;
+};
diff --git a/services/workbench2/src/views/workflow-panel/workflow-processes-panel.tsx b/services/workbench2/src/views/workflow-panel/workflow-processes-panel.tsx
new file mode 100644 (file)
index 0000000..48077f9
--- /dev/null
@@ -0,0 +1,36 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+import { Dispatch } from "redux";
+import { connect } from "react-redux";
+import { openProcessContextMenu } from "store/context-menu/context-menu-actions";
+import { WorkflowProcessesPanelRoot, WorkflowProcessesPanelActionProps, WorkflowProcessesPanelDataProps } from "views/workflow-panel/workflow-processes-panel-root";
+import { RootState } from "store/store";
+import { navigateTo } from "store/navigation/navigation-action";
+import { loadDetailsPanel } from "store/details-panel/details-panel-action";
+import { getProcess } from "store/processes/process";
+import { toggleOne, deselectAllOthers } from 'store/multiselect/multiselect-actions';
+
+const mapDispatchToProps = (dispatch: Dispatch): WorkflowProcessesPanelActionProps => ({
+    onContextMenu: (event, resourceUuid, resources) => {
+        const process = getProcess(resourceUuid)(resources);
+        if (process) {
+            dispatch<any>(openProcessContextMenu(event, process));
+        }
+    },
+    onItemClick: (uuid: string) => {
+        dispatch<any>(toggleOne(uuid))
+        dispatch<any>(deselectAllOthers(uuid))
+        dispatch<any>(loadDetailsPanel(uuid));
+    },
+    onItemDoubleClick: uuid => {
+        dispatch<any>(navigateTo(uuid));
+    },
+});
+
+const mapStateToProps = (state: RootState): WorkflowProcessesPanelDataProps => ({
+    resources: state.resources,
+});
+
+export const WorkflowProcessesPanel = connect(mapStateToProps, mapDispatchToProps)(WorkflowProcessesPanelRoot);
index bb3ca955a0ef5ff2667f5d2cceda58fe8d731b02..7c0e2e6aef70e2c11609be2e81a27143266d808b 100644 (file)
@@ -1646,6 +1646,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"@colors/colors@npm:1.5.0":
+  version: 1.5.0
+  resolution: "@colors/colors@npm:1.5.0"
+  checksum: d64d5260bed1d5012ae3fc617d38d1afc0329fec05342f4e6b838f46998855ba56e0a73833f4a80fa8378c84810da254f76a8a19c39d038260dc06dc4e007425
+  languageName: node
+  linkType: hard
+
 "@coreui/coreui@npm:^4.3.2":
   version: 4.3.2
   resolution: "@coreui/coreui@npm:4.3.2"
@@ -1682,21 +1689,9 @@ __metadata:
   languageName: node
   linkType: hard
 
-"@cypress/listr-verbose-renderer@npm:^0.4.1":
-  version: 0.4.1
-  resolution: "@cypress/listr-verbose-renderer@npm:0.4.1"
-  dependencies:
-    chalk: ^1.1.3
-    cli-cursor: ^1.0.2
-    date-fns: ^1.27.2
-    figures: ^1.7.0
-  checksum: 0169c2b30fd4623a7b2ff8354fe72583fbecc774f36321cd45bb84fb30859426093cb298f95ab71cae707792dc04fe2fa77cd57e66cfbdba9c8006b6b888c4a3
-  languageName: node
-  linkType: hard
-
-"@cypress/request@npm:^2.88.5":
-  version: 2.88.5
-  resolution: "@cypress/request@npm:2.88.5"
+"@cypress/request@npm:^3.0.0":
+  version: 3.0.1
+  resolution: "@cypress/request@npm:3.0.1"
   dependencies:
     aws-sign2: ~0.7.0
     aws4: ^1.8.0
@@ -1705,20 +1700,18 @@ __metadata:
     extend: ~3.0.2
     forever-agent: ~0.6.1
     form-data: ~2.3.2
-    har-validator: ~5.1.3
-    http-signature: ~1.2.0
+    http-signature: ~1.3.6
     is-typedarray: ~1.0.0
     isstream: ~0.1.2
     json-stringify-safe: ~5.0.1
     mime-types: ~2.1.19
-    oauth-sign: ~0.9.0
     performance-now: ^2.1.0
-    qs: ~6.5.2
+    qs: 6.10.4
     safe-buffer: ^5.1.2
-    tough-cookie: ~2.5.0
+    tough-cookie: ^4.1.3
     tunnel-agent: ^0.6.0
-    uuid: ^3.3.2
-  checksum: a605f8a623f4665402768f4d7730315a420967d41c44194eeb2a946ce0b74ce3eb8205a73b0cab879fcf65870dbb1189ac60ea67d163c7acd64228e39e65611a
+    uuid: ^8.3.2
+  checksum: 7175522ebdbe30e3c37973e204c437c23ce659e58d5939466615bddcd58d778f3a8ea40f087b965ae8b8138ea8d102b729c6eb18c6324f121f3778f4a2e8e727
   languageName: node
   linkType: hard
 
@@ -1750,6 +1743,29 @@ __metadata:
   languageName: node
   linkType: hard
 
+"@emotion/is-prop-valid@npm:1.2.1":
+  version: 1.2.1
+  resolution: "@emotion/is-prop-valid@npm:1.2.1"
+  dependencies:
+    "@emotion/memoize": ^0.8.1
+  checksum: 8f42dc573a3fad79b021479becb639b8fe3b60bdd1081a775d32388bca418ee53074c7602a4c845c5f75fa6831eb1cbdc4d208cc0299f57014ed3a02abcad16a
+  languageName: node
+  linkType: hard
+
+"@emotion/memoize@npm:^0.8.1":
+  version: 0.8.1
+  resolution: "@emotion/memoize@npm:0.8.1"
+  checksum: a19cc01a29fcc97514948eaab4dc34d8272e934466ed87c07f157887406bc318000c69ae6f813a9001c6a225364df04249842a50e692ef7a9873335fbcc141b0
+  languageName: node
+  linkType: hard
+
+"@emotion/unitless@npm:0.8.0":
+  version: 0.8.0
+  resolution: "@emotion/unitless@npm:0.8.0"
+  checksum: 176141117ed23c0eb6e53a054a69c63e17ae532ec4210907a20b2208f91771821835f1c63dd2ec63e30e22fcc984026d7f933773ee6526dd038e0850919fae7a
+  languageName: node
+  linkType: hard
+
 "@fortawesome/fontawesome-common-types@npm:^0.2.28":
   version: 0.2.35
   resolution: "@fortawesome/fontawesome-common-types@npm:0.2.35"
@@ -2252,20 +2268,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"@samverschueren/stream-to-observable@npm:^0.3.0":
-  version: 0.3.1
-  resolution: "@samverschueren/stream-to-observable@npm:0.3.1"
-  dependencies:
-    any-observable: ^0.3.0
-  peerDependenciesMeta:
-    rxjs:
-      optional: true
-    zen-observable:
-      optional: true
-  checksum: 8ec6d43370f419975295f306699f87989dd64a099a29cf62ddacbbbe32df634f87451504d340e15321e74b0a3ca8a9b447736472f792102e234faa207395e6c9
-  languageName: node
-  linkType: hard
-
 "@sinonjs/commons@npm:^1, @sinonjs/commons@npm:^1.3.0, @sinonjs/commons@npm:^1.4.0, @sinonjs/commons@npm:^1.7.0":
   version: 1.8.3
   resolution: "@sinonjs/commons@npm:1.8.3"
@@ -2928,10 +2930,10 @@ __metadata:
   languageName: node
   linkType: hard
 
-"@types/sinonjs__fake-timers@npm:^6.0.1":
-  version: 6.0.2
-  resolution: "@types/sinonjs__fake-timers@npm:6.0.2"
-  checksum: fe62eec9cffa05aa159a036f671c8ba4117c6abe186d574c92e573117075825a756c56a4fe955bd874b77f6054fa25c1420eb22619312c43412f75f3d95f885f
+"@types/sinonjs__fake-timers@npm:8.1.1":
+  version: 8.1.1
+  resolution: "@types/sinonjs__fake-timers@npm:8.1.1"
+  checksum: ca09d54d47091d87020824a73f026300fa06b17cd9f2f9b9387f28b549364b141ef194ee28db762f6588de71d8febcd17f753163cb7ea116b8387c18e80ebd5c
   languageName: node
   linkType: hard
 
@@ -2949,6 +2951,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"@types/stylis@npm:4.2.0":
+  version: 4.2.0
+  resolution: "@types/stylis@npm:4.2.0"
+  checksum: 02a47584acd2fcb664f7d8270a69686c83752bdfb855f804015d33116a2b09c0b2ac535213a4a7b6d3a78b2915b22b4024cce067ae979beee0e4f8f5fdbc26a9
+  languageName: node
+  linkType: hard
+
 "@types/trusted-types@npm:*":
   version: 2.0.4
   resolution: "@types/trusted-types@npm:2.0.4"
@@ -2999,6 +3008,15 @@ __metadata:
   languageName: node
   linkType: hard
 
+"@types/yauzl@npm:^2.9.1":
+  version: 2.10.3
+  resolution: "@types/yauzl@npm:2.10.3"
+  dependencies:
+    "@types/node": "*"
+  checksum: 5ee966ea7bd6b2802f31ad4281c92c4c0b6dfa593c378a2582c58541fa113bec3d70eb0696b34ad95e8e6861a884cba6c3e351285816693ed176222f840a8c08
+  languageName: node
+  linkType: hard
+
 "@typescript-eslint/eslint-plugin@npm:^2.10.0":
   version: 4.28.0
   resolution: "@typescript-eslint/eslint-plugin@npm:4.28.0"
@@ -3506,6 +3524,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"ansi-colors@npm:^4.1.1":
+  version: 4.1.3
+  resolution: "ansi-colors@npm:4.1.3"
+  checksum: a9c2ec842038a1fabc7db9ece7d3177e2fe1c5dc6f0c51ecfbf5f39911427b89c00b5dc6b8bd95f82a26e9b16aaae2e83d45f060e98070ce4d1333038edceb0e
+  languageName: node
+  linkType: hard
+
 "ansi-escapes@npm:^3.0.0":
   version: 3.2.0
   resolution: "ansi-escapes@npm:3.2.0"
@@ -3513,7 +3538,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"ansi-escapes@npm:^4.2.1":
+"ansi-escapes@npm:^4.2.1, ansi-escapes@npm:^4.3.0":
   version: 4.3.2
   resolution: "ansi-escapes@npm:4.3.2"
   dependencies:
@@ -3584,13 +3609,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"any-observable@npm:^0.3.0":
-  version: 0.3.0
-  resolution: "any-observable@npm:0.3.0"
-  checksum: e715563ebb520ef4b2688c69512bc17e73dc8d5fb9fd29f50dea417cd4e5c8d05d27205461fa22bfd07b9a32134fc8fa88059a16adf52bb5968ccbf338ec4c7f
-  languageName: node
-  linkType: hard
-
 "anymatch@npm:^2.0.0":
   version: 2.0.0
   resolution: "anymatch@npm:2.0.0"
@@ -3625,7 +3643,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"arch@npm:^2.1.2":
+"arch@npm:^2.2.0":
   version: 2.2.0
   resolution: "arch@npm:2.2.0"
   checksum: e21b7635029fe8e9cdd5a026f9a6c659103e63fff423834323cdf836a1bb240a72d0c39ca8c470f84643385cf581bd8eda2cad8bf493e27e54bd9783abe9101f
@@ -3869,7 +3887,7 @@ __metadata:
     caniuse-lite: 1.0.30001299
     classnames: 2.2.6
     cwlts: 1.15.29
-    cypress: 6.3.0
+    cypress: ^13.6.6
     cypress-wait-until: ^3.0.1
     date-fns: ^2.28.0
     debounce: 1.2.0
@@ -3905,6 +3923,7 @@ __metadata:
     react-dropzone: 5.1.1
     react-highlight-words: 0.14.0
     react-idle-timer: 4.3.6
+    react-loader-spinner: ^6.1.6
     react-redux: 5.0.7
     react-router: 4.3.1
     react-router-dom: 4.3.1
@@ -4003,6 +4022,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"astral-regex@npm:^2.0.0":
+  version: 2.0.0
+  resolution: "astral-regex@npm:2.0.0"
+  checksum: 876231688c66400473ba505731df37ea436e574dd524520294cc3bbc54ea40334865e01fa0d074d74d036ee874ee7e62f486ea38bc421ee8e6a871c06f011766
+  languageName: node
+  linkType: hard
+
 "async-each@npm:^1.0.1":
   version: 1.0.3
   resolution: "async-each@npm:1.0.3"
@@ -4496,7 +4522,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"base64-js@npm:^1.0.2":
+"base64-js@npm:^1.0.2, base64-js@npm:^1.3.1":
   version: 1.5.1
   resolution: "base64-js@npm:1.5.1"
   checksum: 669632eb3745404c2f822a18fc3a0122d2f9a7a13f7fb8b5823ee19d1d2ff9ee5b52c53367176ea4ad093c332fd5ab4bd0ebae5a8e27917a4105a4cfc86b1005
@@ -4564,7 +4590,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"blob-util@npm:2.0.2":
+"blob-util@npm:^2.0.2":
   version: 2.0.2
   resolution: "blob-util@npm:2.0.2"
   checksum: d543e6b92e4ca715ca33c78e89a07a2290d43e5b2bc897d7ec588c5c7bbf59df93e45225ac0c9258aa6ce4320358990f99c9288f1c48280f8ec5d7a2e088d19b
@@ -4865,6 +4891,16 @@ __metadata:
   languageName: node
   linkType: hard
 
+"buffer@npm:^5.7.1":
+  version: 5.7.1
+  resolution: "buffer@npm:5.7.1"
+  dependencies:
+    base64-js: ^1.3.1
+    ieee754: ^1.1.13
+  checksum: e2cf8429e1c4c7b8cbd30834ac09bd61da46ce35f5c22a78e6c2f04497d6d25541b16881e30a019c6fd3154150650ccee27a308eff3e26229d788bbdeb08ab84
+  languageName: node
+  linkType: hard
+
 "builtin-modules@npm:^1.1.1":
   version: 1.1.1
   resolution: "builtin-modules@npm:1.1.1"
@@ -5119,6 +5155,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"camelize@npm:^1.0.0":
+  version: 1.0.1
+  resolution: "camelize@npm:1.0.1"
+  checksum: 91d8611d09af725e422a23993890d22b2b72b4cabf7239651856950c76b4bf53fe0d0da7c5e4db05180e898e4e647220e78c9fbc976113bd96d603d1fcbfcb99
+  languageName: node
+  linkType: hard
+
 "caniuse-api@npm:^3.0.0":
   version: 3.0.0
   resolution: "caniuse-api@npm:3.0.0"
@@ -5139,9 +5182,9 @@ __metadata:
   linkType: hard
 
 "caniuse-lite@npm:^1.0.0, caniuse-lite@npm:^1.0.30000981, caniuse-lite@npm:^1.0.30001035, caniuse-lite@npm:^1.0.30001109, caniuse-lite@npm:^1.0.30001541":
-  version: 1.0.30001570
-  resolution: "caniuse-lite@npm:1.0.30001570"
-  checksum: 460be2c7a9b1c8a83b6aae4226661c276d9dada6c84209dee547699cf4b28030b9d1fc29ddd7626acee77412b6401993878ea0ef3eadbf3a63ded9034896ae20
+  version: 1.0.30001593
+  resolution: "caniuse-lite@npm:1.0.30001593"
+  checksum: 3e2b19075563c3222101c8d5e6ab2f6e1ba99c3ad03b8d2449f9ee7ed03e9d3dac0b1fb24c129e9a5d89fdde4abb97392280c0abb113c0c60250a2b49f378c60
   languageName: node
   linkType: hard
 
@@ -5179,7 +5222,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"chalk@npm:^1.0.0, chalk@npm:^1.1.3":
+"chalk@npm:^1.1.3":
   version: 1.1.3
   resolution: "chalk@npm:1.1.3"
   dependencies:
@@ -5321,6 +5364,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"ci-info@npm:^3.2.0":
+  version: 3.9.0
+  resolution: "ci-info@npm:3.9.0"
+  checksum: 6b19dc9b2966d1f8c2041a838217299718f15d6c4b63ae36e4674edd2bee48f780e94761286a56aa59eb305a85fbea4ddffb7630ec063e7ec7e7e5ad42549a87
+  languageName: node
+  linkType: hard
+
 "cipher-base@npm:^1.0.0, cipher-base@npm:^1.0.1, cipher-base@npm:^1.0.3":
   version: 1.0.4
   resolution: "cipher-base@npm:1.0.4"
@@ -5380,24 +5430,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"cli-cursor@npm:^1.0.2":
-  version: 1.0.2
-  resolution: "cli-cursor@npm:1.0.2"
-  dependencies:
-    restore-cursor: ^1.0.1
-  checksum: e3b4400d5e925ed11c7596f82e80e170693f69ac6f0f21da2a400043c37548dd780f985a1a5ef1ffb038e36fc6711d1d4f066b104eed851ae76e34bd883cf2bf
-  languageName: node
-  linkType: hard
-
-"cli-cursor@npm:^2.0.0, cli-cursor@npm:^2.1.0":
-  version: 2.1.0
-  resolution: "cli-cursor@npm:2.1.0"
-  dependencies:
-    restore-cursor: ^2.0.0
-  checksum: d88e97bfdac01046a3ffe7d49f06757b3126559d7e44aa2122637eb179284dc6cd49fca2fac4f67c19faaf7e6dab716b6fe1dfcd309977407d8c7578ec2d044d
-  languageName: node
-  linkType: hard
-
 "cli-cursor@npm:^3.1.0":
   version: 3.1.0
   resolution: "cli-cursor@npm:3.1.0"
@@ -5407,27 +5439,26 @@ __metadata:
   languageName: node
   linkType: hard
 
-"cli-table3@npm:~0.6.0":
-  version: 0.6.0
-  resolution: "cli-table3@npm:0.6.0"
+"cli-table3@npm:~0.6.1":
+  version: 0.6.3
+  resolution: "cli-table3@npm:0.6.3"
   dependencies:
-    colors: ^1.1.2
-    object-assign: ^4.1.0
+    "@colors/colors": 1.5.0
     string-width: ^4.2.0
   dependenciesMeta:
-    colors:
+    "@colors/colors":
       optional: true
-  checksum: 98682a2d3eef5ad07d34a08f90398d0640004e28ecf8eb59006436f11ed7b4d453db09f46c2ea880618fbd61fee66321b3b3ee1b20276bc708b6baf6f9663d75
+  checksum: 09897f68467973f827c04e7eaadf13b55f8aec49ecd6647cc276386ea660059322e2dd8020a8b6b84d422dbdd619597046fa89cbbbdc95b2cea149a2df7c096c
   languageName: node
   linkType: hard
 
-"cli-truncate@npm:^0.2.1":
-  version: 0.2.1
-  resolution: "cli-truncate@npm:0.2.1"
+"cli-truncate@npm:^2.1.0":
+  version: 2.1.0
+  resolution: "cli-truncate@npm:2.1.0"
   dependencies:
-    slice-ansi: 0.0.4
-    string-width: ^1.0.1
-  checksum: c2e4b8d95275d8c772ced60977341e87530b81a1160b0e26a252a6c39b794fdf7a1236bf5bc7150558f759deb960cbabc0f993964327bde80790bcd330b698a0
+    slice-ansi: ^3.0.0
+    string-width: ^4.2.0
+  checksum: bf1e4e6195392dc718bf9cd71f317b6300dc4a9191d052f31046b8773230ece4fa09458813bf0e3455a5e68c0690d2ea2c197d14a8b85a7b5e01c97f4b5feb5d
   languageName: node
   linkType: hard
 
@@ -5623,10 +5654,10 @@ __metadata:
   languageName: node
   linkType: hard
 
-"colors@npm:^1.1.2":
-  version: 1.4.0
-  resolution: "colors@npm:1.4.0"
-  checksum: 98aa2c2418ad87dedf25d781be69dc5fc5908e279d9d30c34d8b702e586a0474605b3a189511482b9d5ed0d20c867515d22749537f7bc546256c6014f3ebdcec
+"colorette@npm:^2.0.16":
+  version: 2.0.20
+  resolution: "colorette@npm:2.0.20"
+  checksum: 0c016fea2b91b733eb9f4bcdb580018f52c0bc0979443dad930e5037a968237ac53d9beb98e218d2e9235834f8eebce7f8e080422d6194e957454255bde71d3d
   languageName: node
   linkType: hard
 
@@ -5653,10 +5684,10 @@ __metadata:
   languageName: node
   linkType: hard
 
-"commander@npm:^5.1.0":
-  version: 5.1.0
-  resolution: "commander@npm:5.1.0"
-  checksum: 0b7fec1712fbcc6230fcb161d8d73b4730fa91a21dc089515489402ad78810547683f058e2a9835929c212fead1d6a6ade70db28bbb03edbc2829a9ab7d69447
+"commander@npm:^6.2.1":
+  version: 6.2.1
+  resolution: "commander@npm:6.2.1"
+  checksum: d7090410c0de6bc5c67d3ca41c41760d6d268f3c799e530aafb73b7437d1826bbf0d2a3edac33f8b57cc9887b4a986dce307fa5557e109be40eadb7c43b21742
   languageName: node
   linkType: hard
 
@@ -5721,7 +5752,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"concat-stream@npm:^1.5.0, concat-stream@npm:^1.6.2":
+"concat-stream@npm:^1.5.0":
   version: 1.6.2
   resolution: "concat-stream@npm:1.6.2"
   dependencies:
@@ -6041,6 +6072,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"css-color-keywords@npm:^1.0.0":
+  version: 1.0.0
+  resolution: "css-color-keywords@npm:1.0.0"
+  checksum: 8f125e3ad477bd03c77b533044bd9e8a6f7c0da52d49bbc0bbe38327b3829d6ba04d368ca49dd9ff3b667d2fc8f1698d891c198bbf8feade1a5501bf5a296408
+  languageName: node
+  linkType: hard
+
 "css-color-names@npm:0.0.4, css-color-names@npm:^0.0.4":
   version: 0.0.4
   resolution: "css-color-names@npm:0.0.4"
@@ -6135,6 +6173,17 @@ __metadata:
   languageName: node
   linkType: hard
 
+"css-to-react-native@npm:3.2.0":
+  version: 3.2.0
+  resolution: "css-to-react-native@npm:3.2.0"
+  dependencies:
+    camelize: ^1.0.0
+    css-color-keywords: ^1.0.0
+    postcss-value-parser: ^4.0.2
+  checksum: 263be65e805aef02c3f20c064665c998a8c35293e1505dbe6e3054fb186b01a9897ac6cf121f9840e5a9dfe3fb3994f6fcd0af84a865f1df78ba5bf89e77adce
+  languageName: node
+  linkType: hard
+
 "css-tree@npm:1.0.0-alpha.37":
   version: 1.0.0-alpha.37
   resolution: "css-tree@npm:1.0.0-alpha.37"
@@ -6320,6 +6369,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"csstype@npm:3.1.2":
+  version: 3.1.2
+  resolution: "csstype@npm:3.1.2"
+  checksum: e1a52e6c25c1314d6beef5168da704ab29c5186b877c07d822bd0806717d9a265e8493a2e35ca7e68d0f5d472d43fac1cdce70fd79fd0853dff81f3028d857b5
+  languageName: node
+  linkType: hard
+
 "csstype@npm:^2.0.0, csstype@npm:^2.5.2":
   version: 2.6.17
   resolution: "csstype@npm:2.6.17"
@@ -6367,51 +6423,55 @@ __metadata:
   languageName: node
   linkType: hard
 
-"cypress@npm:6.3.0":
-  version: 6.3.0
-  resolution: "cypress@npm:6.3.0"
+"cypress@npm:^13.6.6":
+  version: 13.6.6
+  resolution: "cypress@npm:13.6.6"
   dependencies:
-    "@cypress/listr-verbose-renderer": ^0.4.1
-    "@cypress/request": ^2.88.5
+    "@cypress/request": ^3.0.0
     "@cypress/xvfb": ^1.2.4
-    "@types/sinonjs__fake-timers": ^6.0.1
+    "@types/sinonjs__fake-timers": 8.1.1
     "@types/sizzle": ^2.3.2
-    arch: ^2.1.2
-    blob-util: 2.0.2
+    arch: ^2.2.0
+    blob-util: ^2.0.2
     bluebird: ^3.7.2
+    buffer: ^5.7.1
     cachedir: ^2.3.0
     chalk: ^4.1.0
     check-more-types: ^2.24.0
-    cli-table3: ~0.6.0
-    commander: ^5.1.0
+    cli-cursor: ^3.1.0
+    cli-table3: ~0.6.1
+    commander: ^6.2.1
     common-tags: ^1.8.0
-    debug: ^4.1.1
-    eventemitter2: ^6.4.2
-    execa: ^4.0.2
+    dayjs: ^1.10.4
+    debug: ^4.3.4
+    enquirer: ^2.3.6
+    eventemitter2: 6.4.7
+    execa: 4.1.0
     executable: ^4.1.1
-    extract-zip: ^1.7.0
-    fs-extra: ^9.0.1
+    extract-zip: 2.0.1
+    figures: ^3.2.0
+    fs-extra: ^9.1.0
     getos: ^3.2.1
-    is-ci: ^2.0.0
-    is-installed-globally: ^0.3.2
+    is-ci: ^3.0.1
+    is-installed-globally: ~0.4.0
     lazy-ass: ^1.6.0
-    listr: ^0.14.3
-    lodash: ^4.17.19
+    listr2: ^3.8.3
+    lodash: ^4.17.21
     log-symbols: ^4.0.0
-    minimist: ^1.2.5
-    moment: ^2.27.0
+    minimist: ^1.2.8
     ospath: ^1.2.2
-    pretty-bytes: ^5.4.1
-    ramda: ~0.26.1
+    pretty-bytes: ^5.6.0
+    process: ^0.11.10
+    proxy-from-env: 1.0.0
     request-progress: ^3.0.0
-    supports-color: ^7.2.0
+    semver: ^7.5.3
+    supports-color: ^8.1.1
     tmp: ~0.2.1
     untildify: ^4.0.0
-    url: ^0.11.0
     yauzl: ^2.10.0
   bin:
     cypress: bin/cypress
-  checksum: beaf86d7b88828569105e9458e23d7e52821424fcf231d4155a10ac5ed26568239157b24465f7cb8d6a66e708f4cc81b511689d05c5a4377f694b3c3e82676dd
+  checksum: 8a7db7d2941ea9fd698b9311b4f23fb6491038fe57e4c19b29a1ee58a25f9d98646674f876c1068a97428c2e81548bb0dd8701cd08e84c6b17ed75f9c2266908
   languageName: node
   linkType: hard
 
@@ -6452,13 +6512,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"date-fns@npm:^1.27.2":
-  version: 1.30.1
-  resolution: "date-fns@npm:1.30.1"
-  checksum: 86b1f3269cbb1f3ee5ac9959775ea6600436f4ee2b78430cd427b41a0c9fabf740b1a5d401c085f3003539a6f4755c7c56c19fbd70ce11f6f673f6bc8075b710
-  languageName: node
-  linkType: hard
-
 "date-fns@npm:^2.28.0":
   version: 2.28.0
   resolution: "date-fns@npm:2.28.0"
@@ -6466,6 +6519,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"dayjs@npm:^1.10.4":
+  version: 1.11.10
+  resolution: "dayjs@npm:1.11.10"
+  checksum: a6b5a3813b8884f5cd557e2e6b7fa569f4c5d0c97aca9558e38534af4f2d60daafd3ff8c2000fed3435cfcec9e805bcebd99f90130c6d1c5ef524084ced588c4
+  languageName: node
+  linkType: hard
+
 "debounce@npm:1.2.0":
   version: 1.2.0
   resolution: "debounce@npm:1.2.0"
@@ -6522,7 +6582,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"debug@npm:^4.3.3":
+"debug@npm:^4.3.3, debug@npm:^4.3.4":
   version: 4.3.4
   resolution: "debug@npm:4.3.4"
   dependencies:
@@ -7148,13 +7208,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"elegant-spinner@npm:^1.0.1":
-  version: 1.0.1
-  resolution: "elegant-spinner@npm:1.0.1"
-  checksum: d6a773d950c5d403b5f0fa402787e37dde99989ab6c943558fe8491cf7cd0df0e2747a9ff4d391d5a5f20a447cc9e9a63bdc956354ba47bea462f1603a5b04fe
-  languageName: node
-  linkType: hard
-
 "elliptic@npm:6.5.4, elliptic@npm:^6.5.3":
   version: 6.5.4
   resolution: "elliptic@npm:6.5.4"
@@ -7234,6 +7287,16 @@ __metadata:
   languageName: node
   linkType: hard
 
+"enquirer@npm:^2.3.6":
+  version: 2.4.1
+  resolution: "enquirer@npm:2.4.1"
+  dependencies:
+    ansi-colors: ^4.1.1
+    strip-ansi: ^6.0.1
+  checksum: f080f11a74209647dbf347a7c6a83c8a47ae1ebf1e75073a808bc1088eb780aa54075bfecd1bcdb3e3c724520edb8e6ee05da031529436b421b71066fcc48cb5
+  languageName: node
+  linkType: hard
+
 "entities@npm:^2.0.0":
   version: 2.2.0
   resolution: "entities@npm:2.2.0"
@@ -7787,10 +7850,10 @@ __metadata:
   languageName: node
   linkType: hard
 
-"eventemitter2@npm:^6.4.2":
-  version: 6.4.4
-  resolution: "eventemitter2@npm:6.4.4"
-  checksum: b5e707039973d5a770bc4c64255604df66df3a1f63389dccb7118af163b9f790ca7596463d7868426339301ad9de5ef1c3f4a9c7ac3b93874c5ca792916dede1
+"eventemitter2@npm:6.4.7":
+  version: 6.4.7
+  resolution: "eventemitter2@npm:6.4.7"
+  checksum: 1b36a77e139d6965ebf3a36c01fa00c089ae6b80faa1911e52888f40b3a7057b36a2cc45dcd1ad87cda3798fe7b97a0aabcbb8175a8b96092a23bb7d0f039e66
   languageName: node
   linkType: hard
 
@@ -7833,22 +7896,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"execa@npm:^1.0.0":
-  version: 1.0.0
-  resolution: "execa@npm:1.0.0"
-  dependencies:
-    cross-spawn: ^6.0.0
-    get-stream: ^4.0.0
-    is-stream: ^1.1.0
-    npm-run-path: ^2.0.0
-    p-finally: ^1.0.0
-    signal-exit: ^3.0.0
-    strip-eof: ^1.0.0
-  checksum: ddf1342c1c7d02dd93b41364cd847640f6163350d9439071abf70bf4ceb1b9b2b2e37f54babb1d8dc1df8e0d8def32d0e81e74a2e62c3e1d70c303eb4c306bc4
-  languageName: node
-  linkType: hard
-
-"execa@npm:^4.0.2":
+"execa@npm:4.1.0":
   version: 4.1.0
   resolution: "execa@npm:4.1.0"
   dependencies:
@@ -7865,6 +7913,21 @@ __metadata:
   languageName: node
   linkType: hard
 
+"execa@npm:^1.0.0":
+  version: 1.0.0
+  resolution: "execa@npm:1.0.0"
+  dependencies:
+    cross-spawn: ^6.0.0
+    get-stream: ^4.0.0
+    is-stream: ^1.1.0
+    npm-run-path: ^2.0.0
+    p-finally: ^1.0.0
+    signal-exit: ^3.0.0
+    strip-eof: ^1.0.0
+  checksum: ddf1342c1c7d02dd93b41364cd847640f6163350d9439071abf70bf4ceb1b9b2b2e37f54babb1d8dc1df8e0d8def32d0e81e74a2e62c3e1d70c303eb4c306bc4
+  languageName: node
+  linkType: hard
+
 "executable@npm:^4.1.1":
   version: 4.1.1
   resolution: "executable@npm:4.1.1"
@@ -7874,13 +7937,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"exit-hook@npm:^1.0.0":
-  version: 1.1.1
-  resolution: "exit-hook@npm:1.1.1"
-  checksum: 1b4f16da7c202cd336ca07acb052922639182b4e2f1ad4007ed481bb774ce93469f505dec1371d9cd580ac54146a9fd260f053b0e4a48fa87c49fa3dc4a3f144
-  languageName: node
-  linkType: hard
-
 "exit@npm:^0.1.2":
   version: 0.1.2
   resolution: "exit@npm:0.1.2"
@@ -8017,17 +8073,20 @@ __metadata:
   languageName: node
   linkType: hard
 
-"extract-zip@npm:^1.7.0":
-  version: 1.7.0
-  resolution: "extract-zip@npm:1.7.0"
+"extract-zip@npm:2.0.1":
+  version: 2.0.1
+  resolution: "extract-zip@npm:2.0.1"
   dependencies:
-    concat-stream: ^1.6.2
-    debug: ^2.6.9
-    mkdirp: ^0.5.4
+    "@types/yauzl": ^2.9.1
+    debug: ^4.1.1
+    get-stream: ^5.1.0
     yauzl: ^2.10.0
+  dependenciesMeta:
+    "@types/yauzl":
+      optional: true
   bin:
     extract-zip: cli.js
-  checksum: 011bab660d738614555773d381a6ba4815d98c1cfcdcdf027e154ebcc9fc8c9ef637b3ea5c9b2144013100071ee41722ed041fc9aacc60f6198ef747cac0c073
+  checksum: 8cbda9debdd6d6980819cc69734d874ddd71051c9fe5bde1ef307ebcedfe949ba57b004894b585f758b7c9eeeea0e3d87f2dda89b7d25320459c2c9643ebb635
   languageName: node
   linkType: hard
 
@@ -8183,26 +8242,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"figures@npm:^1.7.0":
-  version: 1.7.0
-  resolution: "figures@npm:1.7.0"
-  dependencies:
-    escape-string-regexp: ^1.0.5
-    object-assign: ^4.1.0
-  checksum: d77206deba991a7977f864b8c8edf9b8b43b441be005482db04b0526e36263adbdb22c1c6d2df15a1ad78d12029bd1aa41ccebcb5d425e1f2cf629c6daaa8e10
-  languageName: node
-  linkType: hard
-
-"figures@npm:^2.0.0":
-  version: 2.0.0
-  resolution: "figures@npm:2.0.0"
-  dependencies:
-    escape-string-regexp: ^1.0.5
-  checksum: 081beb16ea57d1716f8447c694f637668322398b57017b20929376aaf5def9823b35245b734cdd87e4832dc96e9c6f46274833cada77bfe15e5f980fea1fd21f
-  languageName: node
-  linkType: hard
-
-"figures@npm:^3.0.0":
+"figures@npm:^3.0.0, figures@npm:^3.2.0":
   version: 3.2.0
   resolution: "figures@npm:3.2.0"
   dependencies:
@@ -8537,7 +8577,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"fs-extra@npm:^9.0.1":
+"fs-extra@npm:^9.1.0":
   version: 9.1.0
   resolution: "fs-extra@npm:9.1.0"
   dependencies:
@@ -8779,7 +8819,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"get-stream@npm:^5.0.0":
+"get-stream@npm:^5.0.0, get-stream@npm:^5.1.0":
   version: 5.2.0
   resolution: "get-stream@npm:5.2.0"
   dependencies:
@@ -8866,12 +8906,12 @@ __metadata:
   languageName: node
   linkType: hard
 
-"global-dirs@npm:^2.0.1":
-  version: 2.1.0
-  resolution: "global-dirs@npm:2.1.0"
+"global-dirs@npm:^3.0.0":
+  version: 3.0.1
+  resolution: "global-dirs@npm:3.0.1"
   dependencies:
-    ini: 1.3.7
-  checksum: f80b74032c0359a6af7f37d153b8ced67710135ed7ab45b03efe688f5792ef859b660561beeb79ecce3106071c2547196c0971dfecdb2332139892129487233d
+    ini: 2.0.0
+  checksum: 70147b80261601fd40ac02a104581432325c1c47329706acd773f3a6ce99bb36d1d996038c85ccacd482ad22258ec233c586b6a91535b1a116b89663d49d6438
   languageName: node
   linkType: hard
 
@@ -9462,6 +9502,17 @@ __metadata:
   languageName: node
   linkType: hard
 
+"http-signature@npm:~1.3.6":
+  version: 1.3.6
+  resolution: "http-signature@npm:1.3.6"
+  dependencies:
+    assert-plus: ^1.0.0
+    jsprim: ^2.0.2
+    sshpk: ^1.14.1
+  checksum: 10be2af4764e71fee0281392937050201ee576ac755c543f570d6d87134ce5e858663fe999a7adb3e4e368e1e356d0d7fec6b9542295b875726ff615188e7a0c
+  languageName: node
+  linkType: hard
+
 "https-browserify@npm:^1.0.0":
   version: 1.0.0
   resolution: "https-browserify@npm:1.0.0"
@@ -9538,7 +9589,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"ieee754@npm:^1.1.4":
+"ieee754@npm:^1.1.13, ieee754@npm:^1.1.4":
   version: 1.2.1
   resolution: "ieee754@npm:1.2.1"
   checksum: 5144c0c9815e54ada181d80a0b810221a253562422e7c6c3a60b1901154184f49326ec239d618c416c1c5945a2e197107aee8d986a3dd836b53dffefd99b5e7e
@@ -9683,13 +9734,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"indent-string@npm:^3.0.0":
-  version: 3.2.0
-  resolution: "indent-string@npm:3.2.0"
-  checksum: a0b72603bba6c985d367fda3a25aad16423d2056b22a7e83ee2dd9ce0ce3d03d1e078644b679087aa7edf1cfb457f0d96d9eeadc0b12f38582088cc00e995d2f
-  languageName: node
-  linkType: hard
-
 "indent-string@npm:^4.0.0":
   version: 4.0.0
   resolution: "indent-string@npm:4.0.0"
@@ -9742,10 +9786,10 @@ __metadata:
   languageName: node
   linkType: hard
 
-"ini@npm:1.3.7":
-  version: 1.3.7
-  resolution: "ini@npm:1.3.7"
-  checksum: f8f3801e8eb039f9e03cdc27ceb494a7ac6e6ca7b2dd8394a9ef97ed5ae66930fadefd5ec908e41e4b103d3c9063b5788d47de5e8e892083c7a67b489f3b962d
+"ini@npm:2.0.0":
+  version: 2.0.0
+  resolution: "ini@npm:2.0.0"
+  checksum: e7aadc5fb2e4aefc666d74ee2160c073995a4061556b1b5b4241ecb19ad609243b9cceafe91bae49c219519394bbd31512516cb22a3b1ca6e66d869e0447e84e
   languageName: node
   linkType: hard
 
@@ -9977,6 +10021,17 @@ __metadata:
   languageName: node
   linkType: hard
 
+"is-ci@npm:^3.0.1":
+  version: 3.0.1
+  resolution: "is-ci@npm:3.0.1"
+  dependencies:
+    ci-info: ^3.2.0
+  bin:
+    is-ci: bin.js
+  checksum: 192c66dc7826d58f803ecae624860dccf1899fc1f3ac5505284c0a5cf5f889046ffeb958fa651e5725d5705c5bcb14f055b79150ea5fcad7456a9569de60260e
+  languageName: node
+  linkType: hard
+
 "is-color-stop@npm:^1.0.0":
   version: 1.1.0
   resolution: "is-color-stop@npm:1.1.0"
@@ -10166,13 +10221,13 @@ __metadata:
   languageName: node
   linkType: hard
 
-"is-installed-globally@npm:^0.3.2":
-  version: 0.3.2
-  resolution: "is-installed-globally@npm:0.3.2"
+"is-installed-globally@npm:~0.4.0":
+  version: 0.4.0
+  resolution: "is-installed-globally@npm:0.4.0"
   dependencies:
-    global-dirs: ^2.0.1
-    is-path-inside: ^3.0.1
-  checksum: 7f7489ae3026cc3b9f61426108d5911c864ac545bc90ef46e2eda4461c34a1f287a64f765895893398f0769235c59e63f25283c939c661bfe9be5250b1ed99cb
+    global-dirs: ^3.0.0
+    is-path-inside: ^3.0.2
+  checksum: 3359840d5982d22e9b350034237b2cda2a12bac1b48a721912e1ab8e0631dd07d45a2797a120b7b87552759a65ba03e819f1bd63f2d7ab8657ec0b44ee0bf399
   languageName: node
   linkType: hard
 
@@ -10227,15 +10282,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"is-observable@npm:^1.1.0":
-  version: 1.1.0
-  resolution: "is-observable@npm:1.1.0"
-  dependencies:
-    symbol-observable: ^1.1.0
-  checksum: ab3d7e740915e6b53a81d96ce7d581f4dd26dacceb95278b74e7bf3123221073ea02cde810f864cff94ed5c394f18248deefd6a8f2d40137d868130eb5be6f85
-  languageName: node
-  linkType: hard
-
 "is-path-cwd@npm:^2.0.0":
   version: 2.2.0
   resolution: "is-path-cwd@npm:2.2.0"
@@ -10261,7 +10307,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"is-path-inside@npm:^3.0.1":
+"is-path-inside@npm:^3.0.2":
   version: 3.0.3
   resolution: "is-path-inside@npm:3.0.3"
   checksum: abd50f06186a052b349c15e55b182326f1936c89a78bf6c8f2b707412517c097ce04bc49a0ca221787bc44e1049f51f09a2ffb63d22899051988d3a618ba13e9
@@ -11278,6 +11324,18 @@ __metadata:
   languageName: node
   linkType: hard
 
+"jsprim@npm:^2.0.2":
+  version: 2.0.2
+  resolution: "jsprim@npm:2.0.2"
+  dependencies:
+    assert-plus: 1.0.0
+    extsprintf: 1.3.0
+    json-schema: 0.4.0
+    verror: 1.10.0
+  checksum: d175f6b1991e160cb0aa39bc857da780e035611986b5492f32395411879fdaf4e513d98677f08f7352dac93a16b66b8361c674b86a3fa406e2e7af6b26321838
+  languageName: node
+  linkType: hard
+
 "jss-camel-case@npm:^6.0.0":
   version: 6.1.0
   resolution: "jss-camel-case@npm:6.1.0"
@@ -11529,57 +11587,24 @@ __metadata:
   languageName: node
   linkType: hard
 
-"listr-silent-renderer@npm:^1.1.1":
-  version: 1.1.1
-  resolution: "listr-silent-renderer@npm:1.1.1"
-  checksum: 81982612e4d207be2e69c4dcf2a6e0aaa6080e41bfe0b73e8d0b040dcdb79874248b1040558793a2f0fcc9c2252ec8af47379650f59bf2a7656c11cd5a48c948
-  languageName: node
-  linkType: hard
-
-"listr-update-renderer@npm:^0.5.0":
-  version: 0.5.0
-  resolution: "listr-update-renderer@npm:0.5.0"
+"listr2@npm:^3.8.3":
+  version: 3.14.0
+  resolution: "listr2@npm:3.14.0"
   dependencies:
-    chalk: ^1.1.3
-    cli-truncate: ^0.2.1
-    elegant-spinner: ^1.0.1
-    figures: ^1.7.0
-    indent-string: ^3.0.0
-    log-symbols: ^1.0.2
-    log-update: ^2.3.0
-    strip-ansi: ^3.0.1
+    cli-truncate: ^2.1.0
+    colorette: ^2.0.16
+    log-update: ^4.0.0
+    p-map: ^4.0.0
+    rfdc: ^1.3.0
+    rxjs: ^7.5.1
+    through: ^2.3.8
+    wrap-ansi: ^7.0.0
   peerDependencies:
-    listr: ^0.14.2
-  checksum: 2dddc763837a9086a684545ee9049fcb102d423b0c840ad929471ab461075ed78d5c79f1e8334cd7a76aa9076e7631c04a38733bb4d88c23ca6082c087335864
-  languageName: node
-  linkType: hard
-
-"listr-verbose-renderer@npm:^0.5.0":
-  version: 0.5.0
-  resolution: "listr-verbose-renderer@npm:0.5.0"
-  dependencies:
-    chalk: ^2.4.1
-    cli-cursor: ^2.1.0
-    date-fns: ^1.27.2
-    figures: ^2.0.0
-  checksum: 3e504be729f9dd15b40db743e403673b76331774411dbc29d6f48136f6ba8bc1dee645a4e621c1cb781e6e69a58b78cb9aa8c153c7ceccfe4e4ea74d563bca3a
-  languageName: node
-  linkType: hard
-
-"listr@npm:^0.14.3":
-  version: 0.14.3
-  resolution: "listr@npm:0.14.3"
-  dependencies:
-    "@samverschueren/stream-to-observable": ^0.3.0
-    is-observable: ^1.1.0
-    is-promise: ^2.1.0
-    is-stream: ^1.1.0
-    listr-silent-renderer: ^1.1.1
-    listr-update-renderer: ^0.5.0
-    listr-verbose-renderer: ^0.5.0
-    p-map: ^2.0.0
-    rxjs: ^6.3.3
-  checksum: 932d69430c2bed2f987c53b2ea2070786187de29bc4a9fa8e93fdfdf2390d7c0ff9415eb1b31136f76b134cbb930fb18af039fc341263a02b107abc6d2c31a00
+    enquirer: ">= 2.3.0 < 3"
+  peerDependenciesMeta:
+    enquirer:
+      optional: true
+  checksum: fdb8b2d6bdf5df9371ebd5082bee46c6d0ca3d1e5f2b11fbb5a127839855d5f3da9d4968fce94f0a5ec67cac2459766abbb1faeef621065ebb1829b11ef9476d
   languageName: node
   linkType: hard
 
@@ -11809,15 +11834,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"log-symbols@npm:^1.0.2":
-  version: 1.0.2
-  resolution: "log-symbols@npm:1.0.2"
-  dependencies:
-    chalk: ^1.0.0
-  checksum: 5214ade9381db5d40528c171fdfd459b75cad7040eb6a347294ae47fa80cfebba4adbc3aa73a1c9da744cbfa240dd93b38f80df8615717affeea6c4bb6b8dfe7
-  languageName: node
-  linkType: hard
-
 "log-symbols@npm:^4.0.0":
   version: 4.1.0
   resolution: "log-symbols@npm:4.1.0"
@@ -11828,14 +11844,15 @@ __metadata:
   languageName: node
   linkType: hard
 
-"log-update@npm:^2.3.0":
-  version: 2.3.0
-  resolution: "log-update@npm:2.3.0"
+"log-update@npm:^4.0.0":
+  version: 4.0.0
+  resolution: "log-update@npm:4.0.0"
   dependencies:
-    ansi-escapes: ^3.0.0
-    cli-cursor: ^2.0.0
-    wrap-ansi: ^3.0.1
-  checksum: 84fd8e93bfc316eb6ca479a37743f2edcb7563fe5b9161205ce2980f0b3c822717b8f8f1871369697fcb0208521d7b8d00750c594edc3f8a8273dd8b48dd14a3
+    ansi-escapes: ^4.3.0
+    cli-cursor: ^3.1.0
+    slice-ansi: ^4.0.0
+    wrap-ansi: ^6.2.0
+  checksum: ae2f85bbabc1906034154fb7d4c4477c79b3e703d22d78adee8b3862fa913942772e7fa11713e3d96fb46de4e3cabefbf5d0a544344f03b58d3c4bff52aa9eb2
   languageName: node
   linkType: hard
 
@@ -12432,7 +12449,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"minimist@npm:^1.1.1, minimist@npm:^1.1.3, minimist@npm:^1.2.0, minimist@npm:^1.2.5":
+"minimist@npm:^1.1.1, minimist@npm:^1.1.3, minimist@npm:^1.2.0, minimist@npm:^1.2.5, minimist@npm:^1.2.8":
   version: 1.2.8
   resolution: "minimist@npm:1.2.8"
   checksum: 75a6d645fb122dad29c06a7597bddea977258957ed88d7a6df59b5cd3fe4a527e253e9bbf2e783e4b73657f9098b96a5fe96ab8a113655d4109108577ecf85b0
@@ -12602,7 +12619,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"mkdirp@npm:>=0.5 0, mkdirp@npm:^0.5.1, mkdirp@npm:^0.5.3, mkdirp@npm:^0.5.4, mkdirp@npm:^0.5.5, mkdirp@npm:~0.5.1":
+"mkdirp@npm:>=0.5 0, mkdirp@npm:^0.5.1, mkdirp@npm:^0.5.3, mkdirp@npm:^0.5.5, mkdirp@npm:~0.5.1":
   version: 0.5.5
   resolution: "mkdirp@npm:0.5.5"
   dependencies:
@@ -12622,7 +12639,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"moment@npm:^2.27.0, moment@npm:^2.29.4":
+"moment@npm:^2.29.4":
   version: 2.29.4
   resolution: "moment@npm:2.29.4"
   checksum: 0ec3f9c2bcba38dc2451b1daed5daded747f17610b92427bebe1d08d48d8b7bdd8d9197500b072d14e326dd0ccf3e326b9e3d07c5895d3d49e39b6803b76e80e
@@ -12722,6 +12739,15 @@ __metadata:
   languageName: node
   linkType: hard
 
+"nanoid@npm:^3.3.6":
+  version: 3.3.7
+  resolution: "nanoid@npm:3.3.7"
+  bin:
+    nanoid: bin/nanoid.cjs
+  checksum: d36c427e530713e4ac6567d488b489a36582ef89da1d6d4e3b87eded11eb10d7042a877958c6f104929809b2ab0bafa17652b076cdf84324aa75b30b722204f2
+  languageName: node
+  linkType: hard
+
 "nanomatch@npm:^1.2.9":
   version: 1.2.13
   resolution: "nanomatch@npm:1.2.13"
@@ -13361,22 +13387,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"onetime@npm:^1.0.0":
-  version: 1.1.0
-  resolution: "onetime@npm:1.1.0"
-  checksum: 4e9ab082cad172bd69c5f86630f55132c78e89e62b6e7abc5b4df922c3a5a397eeb88ad4810c8493a40a6ea5e54c146810ea8553db609903db3643985b301f67
-  languageName: node
-  linkType: hard
-
-"onetime@npm:^2.0.0":
-  version: 2.0.1
-  resolution: "onetime@npm:2.0.1"
-  dependencies:
-    mimic-fn: ^1.0.0
-  checksum: bb44015ac7a525d0fb43b029a583d4ad359834632b4424ca209b438aacf6d669dda81b5edfbdb42c22636e607b276ba5589f46694a729e3bc27948ce26f4cc1a
-  languageName: node
-  linkType: hard
-
 "onetime@npm:^5.1.0":
   version: 5.1.2
   resolution: "onetime@npm:5.1.2"
@@ -14871,6 +14881,17 @@ __metadata:
   languageName: node
   linkType: hard
 
+"postcss@npm:8.4.31":
+  version: 8.4.31
+  resolution: "postcss@npm:8.4.31"
+  dependencies:
+    nanoid: ^3.3.6
+    picocolors: ^1.0.0
+    source-map-js: ^1.0.2
+  checksum: 1d8611341b073143ad90486fcdfeab49edd243377b1f51834dc4f6d028e82ce5190e4f11bb2633276864503654fb7cab28e67abdc0fbf9d1f88cad4a0ff0beea
+  languageName: node
+  linkType: hard
+
 "postcss@npm:^7, postcss@npm:^7.0.0, postcss@npm:^7.0.1, postcss@npm:^7.0.14, postcss@npm:^7.0.17, postcss@npm:^7.0.2, postcss@npm:^7.0.23, postcss@npm:^7.0.27, postcss@npm:^7.0.32, postcss@npm:^7.0.5, postcss@npm:^7.0.6":
   version: 7.0.39
   resolution: "postcss@npm:7.0.39"
@@ -14895,7 +14916,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"pretty-bytes@npm:^5.1.0, pretty-bytes@npm:^5.4.1":
+"pretty-bytes@npm:^5.1.0, pretty-bytes@npm:^5.6.0":
   version: 5.6.0
   resolution: "pretty-bytes@npm:5.6.0"
   checksum: 9c082500d1e93434b5b291bd651662936b8bd6204ec9fa17d563116a192d6d86b98f6d328526b4e8d783c07d5499e2614a807520249692da9ec81564b2f439cd
@@ -15052,6 +15073,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"proxy-from-env@npm:1.0.0":
+  version: 1.0.0
+  resolution: "proxy-from-env@npm:1.0.0"
+  checksum: 292e28d1de0c315958d71d8315eb546dd3cd8c8cbc2dab7c54eeb9f5c17f421771964ad0b5e1f77011bab2305bdae42e1757ce33bdb1ccc3e87732322a8efcf1
+  languageName: node
+  linkType: hard
+
 "prr@npm:~1.0.1":
   version: 1.0.1
   resolution: "prr@npm:1.0.1"
@@ -15066,6 +15094,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"psl@npm:^1.1.33":
+  version: 1.9.0
+  resolution: "psl@npm:1.9.0"
+  checksum: 20c4277f640c93d393130673f392618e9a8044c6c7bf61c53917a0fddb4952790f5f362c6c730a9c32b124813e173733f9895add8d26f566ed0ea0654b2e711d
+  languageName: node
+  linkType: hard
+
 "public-encrypt@npm:^4.0.0":
   version: 4.0.3
   resolution: "public-encrypt@npm:4.0.3"
@@ -15139,6 +15174,15 @@ __metadata:
   languageName: node
   linkType: hard
 
+"qs@npm:6.10.4":
+  version: 6.10.4
+  resolution: "qs@npm:6.10.4"
+  dependencies:
+    side-channel: ^1.0.4
+  checksum: 31e4fedd759d01eae52dde6692abab175f9af3e639993c5caaa513a2a3607b34d8058d3ae52ceeccf37c3025f22ed5e90e9ddd6c2537e19c0562ddd10dc5b1eb
+  languageName: node
+  linkType: hard
+
 "qs@npm:6.7.0":
   version: 6.7.0
   resolution: "qs@npm:6.7.0"
@@ -15225,13 +15269,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"ramda@npm:~0.26.1":
-  version: 0.26.1
-  resolution: "ramda@npm:0.26.1"
-  checksum: 19c2730e44c129538151ae034c89be9b2c6a4ccc7c65cff57497418bc532ce09282f98cd927c39b0b03c6bc3f1d1a12d822b7b07f96a1634f4958a6c05b7b384
-  languageName: node
-  linkType: hard
-
 "randexp@npm:0.4.6":
   version: 0.4.6
   resolution: "randexp@npm:0.4.6"
@@ -15450,6 +15487,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"react-is@npm:^18.2.0":
+  version: 18.2.0
+  resolution: "react-is@npm:18.2.0"
+  checksum: e72d0ba81b5922759e4aff17e0252bd29988f9642ed817f56b25a3e217e13eea8a7f2322af99a06edb779da12d5d636e9fda473d620df9a3da0df2a74141d53e
+  languageName: node
+  linkType: hard
+
 "react-lifecycles-compat@npm:^3.0.2, react-lifecycles-compat@npm:^3.0.4":
   version: 3.0.4
   resolution: "react-lifecycles-compat@npm:3.0.4"
@@ -15457,6 +15501,19 @@ __metadata:
   languageName: node
   linkType: hard
 
+"react-loader-spinner@npm:^6.1.6":
+  version: 6.1.6
+  resolution: "react-loader-spinner@npm:6.1.6"
+  dependencies:
+    react-is: ^18.2.0
+    styled-components: ^6.1.2
+  peerDependencies:
+    react: ^16.0.0 || ^17.0.0 || ^18.0.0
+    react-dom: ^16.0.0 || ^17.0.0 || ^18.0.0
+  checksum: 07fbb2de7aaf9348c4c67116e25100a0a9511e51cf45be69948d618113361059a9a9688d87c142cebd80dcf6832a91f0eee7f4b303d106bd6677c51caa6aa5e3
+  languageName: node
+  linkType: hard
+
 "react-redux@npm:5.0.7":
   version: 5.0.7
   resolution: "react-redux@npm:5.0.7"
@@ -16411,26 +16468,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"restore-cursor@npm:^1.0.1":
-  version: 1.0.1
-  resolution: "restore-cursor@npm:1.0.1"
-  dependencies:
-    exit-hook: ^1.0.0
-    onetime: ^1.0.0
-  checksum: e40bd1a540d69970341fc734dfada908815a44f91903211f34d32c47da33f6e7824bbc97f6e76aff387137d6b2a1ada3d3d2dc1b654b8accdc8ed5721c46cbfa
-  languageName: node
-  linkType: hard
-
-"restore-cursor@npm:^2.0.0":
-  version: 2.0.0
-  resolution: "restore-cursor@npm:2.0.0"
-  dependencies:
-    onetime: ^2.0.0
-    signal-exit: ^3.0.2
-  checksum: 482e13d02d834b6e5e3aa90304a8b5e840775d6f06916cc92a50038adf9f098dcc72405b567da8a37e137ae40ad3e31896fa3136ae62f7a426c2fbf53d036536
-  languageName: node
-  linkType: hard
-
 "restore-cursor@npm:^3.1.0":
   version: 3.1.0
   resolution: "restore-cursor@npm:3.1.0"
@@ -16479,6 +16516,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"rfdc@npm:^1.3.0":
+  version: 1.3.1
+  resolution: "rfdc@npm:1.3.1"
+  checksum: d5d1e930aeac7e0e0a485f97db1356e388bdbeff34906d206fe524dd5ada76e95f186944d2e68307183fdc39a54928d4426bbb6734851692cfe9195efba58b79
+  languageName: node
+  linkType: hard
+
 "rgb-regex@npm:^1.0.1":
   version: 1.0.1
   resolution: "rgb-regex@npm:1.0.1"
@@ -16578,7 +16622,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"rxjs@npm:^6.3.3, rxjs@npm:^6.5.3, rxjs@npm:^6.5.5, rxjs@npm:^6.6.0":
+"rxjs@npm:^6.5.3, rxjs@npm:^6.5.5, rxjs@npm:^6.6.0":
   version: 6.6.7
   resolution: "rxjs@npm:6.6.7"
   dependencies:
@@ -16587,6 +16631,15 @@ __metadata:
   languageName: node
   linkType: hard
 
+"rxjs@npm:^7.5.1":
+  version: 7.8.1
+  resolution: "rxjs@npm:7.8.1"
+  dependencies:
+    tslib: ^2.1.0
+  checksum: de4b53db1063e618ec2eca0f7965d9137cabe98cf6be9272efe6c86b47c17b987383df8574861bcced18ebd590764125a901d5506082be84a8b8e364bf05f119
+  languageName: node
+  linkType: hard
+
 "safe-buffer@npm:5.1.2, safe-buffer@npm:~5.1.0, safe-buffer@npm:~5.1.1":
   version: 5.1.2
   resolution: "safe-buffer@npm:5.1.2"
@@ -16827,6 +16880,17 @@ __metadata:
   languageName: node
   linkType: hard
 
+"semver@npm:^7.5.3":
+  version: 7.6.0
+  resolution: "semver@npm:7.6.0"
+  dependencies:
+    lru-cache: ^6.0.0
+  bin:
+    semver: bin/semver.js
+  checksum: 7427f05b70786c696640edc29fdd4bc33b2acf3bbe1740b955029044f80575fc664e1a512e4113c3af21e767154a94b4aa214bf6cd6e42a1f6dba5914e0b208c
+  languageName: node
+  linkType: hard
+
 "send@npm:0.17.1":
   version: 0.17.1
   resolution: "send@npm:0.17.1"
@@ -16957,7 +17021,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"shallowequal@npm:^1.0.2":
+"shallowequal@npm:1.1.0, shallowequal@npm:^1.0.2":
   version: 1.1.0
   resolution: "shallowequal@npm:1.1.0"
   checksum: f4c1de0837f106d2dbbfd5d0720a5d059d1c66b42b580965c8f06bb1db684be8783538b684092648c981294bf817869f743a066538771dbecb293df78f765e00
@@ -17094,13 +17158,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"slice-ansi@npm:0.0.4":
-  version: 0.0.4
-  resolution: "slice-ansi@npm:0.0.4"
-  checksum: 481d969c6aa771b27d7baacd6fe321751a0b9eb410274bda10ca81ea641bbfe747e428025d6d8f15bd635fdcfd57e8b2d54681ee6b0ce0c40f78644b144759e3
-  languageName: node
-  linkType: hard
-
 "slice-ansi@npm:^2.1.0":
   version: 2.1.0
   resolution: "slice-ansi@npm:2.1.0"
@@ -17112,6 +17169,28 @@ __metadata:
   languageName: node
   linkType: hard
 
+"slice-ansi@npm:^3.0.0":
+  version: 3.0.0
+  resolution: "slice-ansi@npm:3.0.0"
+  dependencies:
+    ansi-styles: ^4.0.0
+    astral-regex: ^2.0.0
+    is-fullwidth-code-point: ^3.0.0
+  checksum: 5ec6d022d12e016347e9e3e98a7eb2a592213a43a65f1b61b74d2c78288da0aded781f665807a9f3876b9daa9ad94f64f77d7633a0458876c3a4fdc4eb223f24
+  languageName: node
+  linkType: hard
+
+"slice-ansi@npm:^4.0.0":
+  version: 4.0.0
+  resolution: "slice-ansi@npm:4.0.0"
+  dependencies:
+    ansi-styles: ^4.0.0
+    astral-regex: ^2.0.0
+    is-fullwidth-code-point: ^3.0.0
+  checksum: 4a82d7f085b0e1b070e004941ada3c40d3818563ac44766cca4ceadd2080427d337554f9f99a13aaeb3b4a94d9964d9466c807b3d7b7541d1ec37ee32d308756
+  languageName: node
+  linkType: hard
+
 "smart-buffer@npm:^4.2.0":
   version: 4.2.0
   resolution: "smart-buffer@npm:4.2.0"
@@ -17249,6 +17328,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"source-map-js@npm:^1.0.2":
+  version: 1.0.2
+  resolution: "source-map-js@npm:1.0.2"
+  checksum: c049a7fc4deb9a7e9b481ae3d424cc793cb4845daa690bc5a05d428bf41bf231ced49b4cf0c9e77f9d42fdb3d20d6187619fc586605f5eabe995a316da8d377c
+  languageName: node
+  linkType: hard
+
 "source-map-resolve@npm:^0.5.0, source-map-resolve@npm:^0.5.2":
   version: 0.5.3
   resolution: "source-map-resolve@npm:0.5.3"
@@ -17402,6 +17488,27 @@ __metadata:
   languageName: node
   linkType: hard
 
+"sshpk@npm:^1.14.1":
+  version: 1.18.0
+  resolution: "sshpk@npm:1.18.0"
+  dependencies:
+    asn1: ~0.2.3
+    assert-plus: ^1.0.0
+    bcrypt-pbkdf: ^1.0.0
+    dashdash: ^1.12.0
+    ecc-jsbn: ~0.1.1
+    getpass: ^0.1.1
+    jsbn: ~0.1.0
+    safer-buffer: ^2.0.2
+    tweetnacl: ~0.14.0
+  bin:
+    sshpk-conv: bin/sshpk-conv
+    sshpk-sign: bin/sshpk-sign
+    sshpk-verify: bin/sshpk-verify
+  checksum: 01d43374eee3a7e37b3b82fdbecd5518cbb2e47ccbed27d2ae30f9753f22bd6ffad31225cb8ef013bc3fb7785e686cea619203ee1439a228f965558c367c3cfa
+  languageName: node
+  linkType: hard
+
 "sshpk@npm:^1.7.0":
   version: 1.16.1
   resolution: "sshpk@npm:1.16.1"
@@ -17605,16 +17712,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"string-width@npm:^2.1.1":
-  version: 2.1.1
-  resolution: "string-width@npm:2.1.1"
-  dependencies:
-    is-fullwidth-code-point: ^2.0.0
-    strip-ansi: ^4.0.0
-  checksum: d6173abe088c615c8dffaf3861dc5d5906ed3dc2d6fd67ff2bd2e2b5dce7fd683c5240699cf0b1b8aa679a3b3bd6b28b5053c824cb89b813d7f6541d8f89064a
-  languageName: node
-  linkType: hard
-
 "string-width@npm:^3.0.0, string-width@npm:^3.1.0":
   version: 3.1.0
   resolution: "string-width@npm:3.1.0"
@@ -17835,6 +17932,26 @@ __metadata:
   languageName: node
   linkType: hard
 
+"styled-components@npm:^6.1.2":
+  version: 6.1.8
+  resolution: "styled-components@npm:6.1.8"
+  dependencies:
+    "@emotion/is-prop-valid": 1.2.1
+    "@emotion/unitless": 0.8.0
+    "@types/stylis": 4.2.0
+    css-to-react-native: 3.2.0
+    csstype: 3.1.2
+    postcss: 8.4.31
+    shallowequal: 1.1.0
+    stylis: 4.3.1
+    tslib: 2.5.0
+  peerDependencies:
+    react: ">= 16.8.0"
+    react-dom: ">= 16.8.0"
+  checksum: 367858097ca57911cc310ddf95d16fed162fbb1d2f187366b33ce5e6e22c324f9bcc7206686624a3edd15e3e9605875c8c041ac5ffb430bbee98f1ad0be71604
+  languageName: node
+  linkType: hard
+
 "stylehacks@npm:^4.0.0":
   version: 4.0.3
   resolution: "stylehacks@npm:4.0.3"
@@ -17846,6 +17963,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"stylis@npm:4.3.1":
+  version: 4.3.1
+  resolution: "stylis@npm:4.3.1"
+  checksum: d365f1b008677b2147e8391e9cf20094a4202a5f9789562e7d9d0a3bd6f0b3067d39e8fd17cce5323903a56f6c45388e3d839e9c0bb5a738c91726992b14966d
+  languageName: node
+  linkType: hard
+
 "supports-color@npm:^2.0.0":
   version: 2.0.0
   resolution: "supports-color@npm:2.0.0"
@@ -17871,7 +17995,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"supports-color@npm:^7.0.0, supports-color@npm:^7.1.0, supports-color@npm:^7.2.0":
+"supports-color@npm:^7.0.0, supports-color@npm:^7.1.0":
   version: 7.2.0
   resolution: "supports-color@npm:7.2.0"
   dependencies:
@@ -17880,6 +18004,15 @@ __metadata:
   languageName: node
   linkType: hard
 
+"supports-color@npm:^8.1.1":
+  version: 8.1.1
+  resolution: "supports-color@npm:8.1.1"
+  dependencies:
+    has-flag: ^4.0.0
+  checksum: c052193a7e43c6cdc741eb7f378df605636e01ad434badf7324f17fb60c69a880d8d8fcdcb562cf94c2350e57b937d7425ab5b8326c67c2adc48f7c87c1db406
+  languageName: node
+  linkType: hard
+
 "svg-parser@npm:^2.0.0":
   version: 2.0.4
   resolution: "svg-parser@npm:2.0.4"
@@ -18058,7 +18191,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"through@npm:^2.3.6":
+"through@npm:^2.3.6, through@npm:^2.3.8":
   version: 2.3.8
   resolution: "through@npm:2.3.8"
   checksum: a38c3e059853c494af95d50c072b83f8b676a9ba2818dcc5b108ef252230735c54e0185437618596c790bbba8fcdaef5b290405981ffa09dce67b1f1bf190cbd
@@ -18221,6 +18354,18 @@ __metadata:
   languageName: node
   linkType: hard
 
+"tough-cookie@npm:^4.1.3":
+  version: 4.1.3
+  resolution: "tough-cookie@npm:4.1.3"
+  dependencies:
+    psl: ^1.1.33
+    punycode: ^2.1.1
+    universalify: ^0.2.0
+    url-parse: ^1.5.3
+  checksum: c9226afff36492a52118432611af083d1d8493a53ff41ec4ea48e5b583aec744b989e4280bcf476c910ec1525a89a4a0f1cae81c08b18fb2ec3a9b3a72b91dcc
+  languageName: node
+  linkType: hard
+
 "tr46@npm:^1.0.1":
   version: 1.0.1
   resolution: "tr46@npm:1.0.1"
@@ -18297,6 +18442,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"tslib@npm:2.5.0":
+  version: 2.5.0
+  resolution: "tslib@npm:2.5.0"
+  checksum: ae3ed5f9ce29932d049908ebfdf21b3a003a85653a9a140d614da6b767a93ef94f460e52c3d787f0e4f383546981713f165037dc2274df212ea9f8a4541004e1
+  languageName: node
+  linkType: hard
+
 "tslib@npm:^1.8.0, tslib@npm:^1.8.1, tslib@npm:^1.9.0, tslib@npm:^1.9.3":
   version: 1.14.1
   resolution: "tslib@npm:1.14.1"
@@ -18311,6 +18463,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"tslib@npm:^2.1.0":
+  version: 2.6.2
+  resolution: "tslib@npm:2.6.2"
+  checksum: 329ea56123005922f39642318e3d1f0f8265d1e7fcb92c633e0809521da75eeaca28d2cf96d7248229deb40e5c19adf408259f4b9640afd20d13aecc1430f3ad
+  languageName: node
+  linkType: hard
+
 "tslint-etc@npm:1.6.0":
   version: 1.6.0
   resolution: "tslint-etc@npm:1.6.0"
@@ -18632,6 +18791,13 @@ __metadata:
   languageName: node
   linkType: hard
 
+"universalify@npm:^0.2.0":
+  version: 0.2.0
+  resolution: "universalify@npm:0.2.0"
+  checksum: e86134cb12919d177c2353196a4cc09981524ee87abf621f7bc8d249dbbbebaec5e7d1314b96061497981350df786e4c5128dbf442eba104d6e765bc260678b5
+  languageName: node
+  linkType: hard
+
 "universalify@npm:^2.0.0":
   version: 2.0.0
   resolution: "universalify@npm:2.0.0"
@@ -18724,7 +18890,7 @@ __metadata:
   languageName: node
   linkType: hard
 
-"url-parse@npm:^1.4.3":
+"url-parse@npm:^1.4.3, url-parse@npm:^1.5.3":
   version: 1.5.10
   resolution: "url-parse@npm:1.5.10"
   dependencies:
@@ -18843,6 +19009,15 @@ __metadata:
   languageName: node
   linkType: hard
 
+"uuid@npm:^8.3.2":
+  version: 8.3.2
+  resolution: "uuid@npm:8.3.2"
+  bin:
+    uuid: dist/bin/uuid
+  checksum: 5575a8a75c13120e2f10e6ddc801b2c7ed7d8f3c8ac22c7ed0c7b2ba6383ec0abda88c905085d630e251719e0777045ae3236f04c812184b7c765f63a70e58df
+  languageName: node
+  linkType: hard
+
 "v8-compile-cache@npm:^2.0.3":
   version: 2.3.0
   resolution: "v8-compile-cache@npm:2.3.0"
@@ -19485,16 +19660,6 @@ __metadata:
   languageName: node
   linkType: hard
 
-"wrap-ansi@npm:^3.0.1":
-  version: 3.0.1
-  resolution: "wrap-ansi@npm:3.0.1"
-  dependencies:
-    string-width: ^2.1.1
-    strip-ansi: ^4.0.0
-  checksum: 1ceed09986d58cf6e0b88ea29084e70ef3463b3b891a04a8dbf245abb1fb678358986bdc43e12bcc92a696ced17327d079bc796f4d709d15aad7b8c1a7e7c83a
-  languageName: node
-  linkType: hard
-
 "wrap-ansi@npm:^5.1.0":
   version: 5.1.0
   resolution: "wrap-ansi@npm:5.1.0"
@@ -19506,6 +19671,17 @@ __metadata:
   languageName: node
   linkType: hard
 
+"wrap-ansi@npm:^6.2.0":
+  version: 6.2.0
+  resolution: "wrap-ansi@npm:6.2.0"
+  dependencies:
+    ansi-styles: ^4.0.0
+    string-width: ^4.1.0
+    strip-ansi: ^6.0.0
+  checksum: 6cd96a410161ff617b63581a08376f0cb9162375adeb7956e10c8cd397821f7eb2a6de24eb22a0b28401300bf228c86e50617cd568209b5f6775b93c97d2fe3a
+  languageName: node
+  linkType: hard
+
 "wrap-ansi@npm:^7.0.0":
   version: 7.0.0
   resolution: "wrap-ansi@npm:7.0.0"
index 778b752fd40a8d740a137c2c19c85c855aef4a0a..d8b240883169e72b6914a5f23ca7d62f8aef9447 100644 (file)
@@ -79,10 +79,10 @@ FROM debian:11
 ENV DEBIAN_FRONTEND noninteractive
 
 # The arvbox-specific dependencies are
-#  gnupg2 runit python3-pip python3-setuptools python3-yaml shellinabox netcat-openbsd less
+#  gnupg2 runit python3-dev python3-venv shellinabox netcat-openbsd less
 RUN apt-get update && \
     apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
-    gnupg2 runit python3-pip python3-setuptools python3-yaml shellinabox netcat-openbsd less vim-tiny && \
+    gnupg2 runit python3-dev python3-venv shellinabox netcat-openbsd less vim-tiny && \
     apt-get clean
 
 ENV GOPATH /var/lib/gopath
@@ -93,6 +93,17 @@ COPY --from=base $GOPATH/bin/arvados-server $GOPATH/bin/arvados-server
 RUN $GOPATH/bin/arvados-server --version
 RUN $GOPATH/bin/arvados-server install -type test
 
+# Set up a virtualenv for all Python tools in arvbox.
+# This is used mainly by the `sdk` service, but `doc` and internal scripts
+# also rely on it.
+# 1. Install wheel just to modernize the virtualenv.
+# 2. Install setuptools as an sdk build dependency; PyYAML for all tests
+#    and yml_override.py; and pdoc for the doc service.
+# Everything else is installed by the sdk service on boot.
+RUN python3 -m venv /opt/arvados-py \
+ && /opt/arvados-py/bin/pip install --no-cache-dir wheel \
+ && /opt/arvados-py/bin/pip install --no-cache-dir setuptools PyYAML pdoc
+
 RUN /etc/init.d/postgresql start && \
     su postgres -c 'dropuser arvados' && \
     su postgres -c 'createuser -s arvbox' && \
index 9c5df83c0e91b25f523531a5512b7efa00c2370f..54ec9403ad9135179682eca94817b7be6973d6e9 100644 (file)
@@ -6,7 +6,7 @@ export RUBY_VERSION=3.2.2
 export BUNDLER_VERSION=2.4.22
 
 export DEBIAN_FRONTEND=noninteractive
-export PATH=${PATH}:/usr/local/go/bin:/var/lib/arvados/bin:/usr/src/arvados/sdk/cli/binstubs
+export PATH=${PATH}:/usr/local/go/bin:/var/lib/arvados/bin:/opt/arvados-py/bin:/usr/src/arvados/sdk/cli/binstubs
 export npm_config_cache=/var/lib/npm
 export npm_config_cache_min=Infinity
 export R_LIBS=/var/lib/Rlibs
@@ -101,23 +101,21 @@ bundler_binstubs() {
     flock $GEMLOCK $BUNDLER binstubs --all
 }
 
-PYCMD=""
-pip_install() {
-    pushd /var/lib/pip
-    for p in $(ls http*.tar.gz) $(ls http*.tar.bz2) $(ls http*.whl) $(ls http*.zip) ; do
-        if test -f $p ; then
-            ln -sf $p $(echo $p | sed 's/.*%2F\(.*\)/\1/')
-        fi
+# Usage: Pass any number of directories. Relative directories will be taken as
+# relative to /usr/src/arvados. This function will build an sdist from each,
+# then pip install them all in the arvbox virtualenv.
+pip_install_sdist() {
+    local sdist_dir="$(mktemp --directory --tmpdir py_sdist.XXXXXXXX)"
+    trap 'rm -rf "$sdist_dir"' RETURN
+    local src_dir
+    for src_dir in "$@"; do
+        case "$src_dir" in
+            /*) ;;
+            *) src_dir="/usr/src/arvados/$src_dir" ;;
+        esac
+        env -C "$src_dir" /opt/arvados-py/bin/python3 setup.py sdist --dist-dir="$sdist_dir" \
+            || return
     done
-    popd
-
-    if [ "$PYCMD" = "python3" ]; then
-        if ! pip3 install --prefix /usr/local --no-index --find-links /var/lib/pip $1 ; then
-            pip3 install --prefix /usr/local $1
-        fi
-    else
-        if ! pip install --no-index --find-links /var/lib/pip $1 ; then
-            pip install $1
-        fi
-    fi
+    /opt/arvados-py/bin/pip install "$sdist_dir"/* || return
+    return
 }
index 4cafd8c09c2f6fbbd0758b53a9a0a1368765159d..9224b80f52dafa5321d4f85fcd38840f6cfa8c5b 100755 (executable)
@@ -14,6 +14,7 @@ if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then
     mkdir -p $ARVADOS_CONTAINER_PATH/git \
           /var/lib/passenger /var/lib/gopath \
           /var/lib/pip /var/lib/npm
+    /opt/arvados-py/bin/pip config --site set global.cache-dir /var/lib/pip
 
     if test -z "$ARVBOX_HOME" ; then
         ARVBOX_HOME=$ARVADOS_CONTAINER_PATH
@@ -31,7 +32,7 @@ if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then
     useradd --groups docker crunch
 
     if [[ "$1" != --no-chown ]] ; then
-        chown arvbox:arvbox -R /usr/local $ARVADOS_CONTAINER_PATH \
+        chown arvbox:arvbox -R /usr/local /opt/arvados-py $ARVADOS_CONTAINER_PATH \
               /var/lib/passenger /var/lib/postgresql \
               /var/lib/nginx /var/log/nginx /etc/ssl/private \
               /var/lib/gopath /var/lib/pip /var/lib/npm
@@ -43,7 +44,7 @@ if ! grep "^arvbox:" /etc/passwd >/dev/null 2>/dev/null ; then
     echo "arvbox    ALL=(crunch) NOPASSWD: ALL" >> /etc/sudoers
 
     cat <<EOF > /etc/profile.d/paths.sh
-export PATH=/var/lib/arvados/bin:/usr/local/bin:/usr/bin:/bin:/usr/src/arvados/sdk/cli/binstubs
+export PATH=/var/lib/arvados/bin:/usr/local/bin:/usr/bin:/bin:/opt/arvados-py/bin:/usr/src/arvados/sdk/cli/binstubs
 export npm_config_cache=/var/lib/npm
 export npm_config_cache_min=Infinity
 export R_LIBS=/var/lib/Rlibs
index ab046b11d42751d3939cc4aaee6cba73f055f598..cb44b984b72d72641551f7c5a0a71efdd3cdfed5 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/opt/arvados-py/bin/python3
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
 # SPDX-License-Identifier: AGPL-3.0
index c40e1175edbe9e55326109cb666c630c4eda621e..0a04918012aa65952bd97d221755c37b86c624ed 100755 (executable)
@@ -18,18 +18,15 @@ cd /usr/src/arvados/doc
 run_bundler --without=development
 
 # Generating the Python and R docs is expensive, so for development if the file
-# "no-sdk" exists then skip the Python and R stuff.
-if [[ ! -f /usr/src/arvados/doc/no-sdk ]] ; then
-    cd /usr/src/arvados/sdk/R
-    R --quiet --vanilla --file=install_deps.R
-
-    export PYCMD=python3
-    pip_install pdoc
+# "no-sdk" exists then skip installing R stuff.
+if [[ ! -f no-sdk ]] ; then
+    env -C ../sdk/R R --quiet --vanilla --file=install_deps.R
 fi
 
 if test "$1" = "--only-deps" ; then
     exit
 fi
 
-cd /usr/src/arvados/doc
+# Active the arvbox virtualenv so we can import pdoc for PySDK doc generation.
+. /opt/arvados-py/bin/activate
 flock $GEMLOCK bundle exec rake generate baseurl=http://$localip:${services[doc]} arvados_api_host=$localip:${services[controller-ssl]} arvados_workbench_host=http://$localip
index 5bff5610529d43688340d0181ae5f8342437709f..216066530ee15ac0fc2a899ec4cfcea7c2769f4c 100755 (executable)
@@ -8,32 +8,10 @@ set -eux -o pipefail
 
 . /usr/local/lib/arvbox/common.sh
 
-mkdir -p ~/.pip /var/lib/pip
-cat > ~/.pip/pip.conf <<EOF
-[global]
-download_cache = /var/lib/pip
-EOF
-
 cd /usr/src/arvados/sdk/ruby
-run_bundler
-bundler_binstubs
+run_bundler --binstubs=binstubs
 
 cd /usr/src/arvados/sdk/cli
-run_bundler
-bundler_binstubs
-
-export PYCMD=python3
-
-pip_install wheel
-
-cd /usr/src/arvados/sdk/python
-$PYCMD setup.py sdist
-pip_install $(ls dist/arvados-python-client-*.tar.gz | tail -n1)
-
-cd /usr/src/arvados/services/fuse
-$PYCMD setup.py sdist
-pip_install $(ls dist/arvados_fuse-*.tar.gz | tail -n1)
+run_bundler --binstubs=binstubs
 
-cd /usr/src/arvados/sdk/cwl
-$PYCMD setup.py sdist
-pip_install $(ls dist/arvados-cwl-runner-*.tar.gz | tail -n1)
+pip_install_sdist sdk/python services/fuse sdk/cwl
index 5e952b8d5c7e167c31ee3f5fce2cd65eb8bbd810..2079bb1d0beb7ea44f3a02856f389e2f3fa2fa20 100755 (executable)
@@ -16,8 +16,7 @@ if test "$1" != "--only-deps" ; then
 fi
 
 cd /usr/src/arvados/services/login-sync
-run_bundler
-bundler_binstubs
+run_bundler --binstubs=binstubs
 
 if test "$1" = "--only-deps" ; then
     exit
index 7f35ac1d686984fbbc51101f8aa1a508e8ae28e0..5f9ee68e4fc7c2480e1ed4f4bb0ca61b28c0edfc 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/opt/arvados-py/bin/python3
 # Copyright (C) The Arvados Authors. All rights reserved.
 #
 # SPDX-License-Identifier: AGPL-3.0
@@ -10,12 +10,12 @@ fn = sys.argv[1]
 
 try:
     with open(fn+".override") as f:
-        b = yaml.load(f)
+        b = yaml.safe_load(f)
 except IOError:
     exit()
 
 with open(fn) as f:
-    a = yaml.load(f)
+    a = yaml.safe_load(f)
 
 def recursiveMerge(a, b):
     if isinstance(a, dict) and isinstance(b, dict):
@@ -27,4 +27,4 @@ def recursiveMerge(a, b):
         return b
 
 with open(fn, "w") as f:
-    yaml.dump(recursiveMerge(a, b), f)
+    yaml.safe_dump(recursiveMerge(a, b), f)
index a9323214ce1096a0a1cd0cccbf93bf5dad1bb221..370c3f3a3a2794b4889adc545db556a56958d3e6 100644 (file)
@@ -80,17 +80,17 @@ wait_for_apt_locks && $SUDO DEBIAN_FRONTEND=noninteractive apt-get -qq --yes ins
 dockerversion=5:20.10.13~3-0
 if [[ "$DIST" =~ ^debian ]]; then
   family="debian"
-  if [ "$DIST" == "debian10" ]; then
-    distro="buster"
-  elif [ "$DIST" == "debian11" ]; then
+  if [ "$DIST" == "debian11" ]; then
     distro="bullseye"
+  elif [ "$DIST" == "debian12" ]; then
+    distro="bookworm"
   fi
 elif [[ "$DIST" =~ ^ubuntu ]]; then
   family="ubuntu"
-  if [ "$DIST" == "ubuntu1804" ]; then
-    distro="bionic"
-  elif [ "$DIST" == "ubuntu2004" ]; then
+  if [ "$DIST" == "ubuntu2004" ]; then
     distro="focal"
+  elif [ "$DIST" == "ubuntu2204" ]; then
+    distro="jammy"
   fi
 else
   echo "Unsupported distribution $DIST"
@@ -158,7 +158,7 @@ else
   unzip -q /tmp/awscliv2.zip -d /tmp && $SUDO /tmp/aws/install
   # Pinned to v2.4.5 because we apply a patch below
   #export EBS_AUTOSCALE_VERSION=$(curl --silent "https://api.github.com/repos/awslabs/amazon-ebs-autoscale/releases/latest" | jq -r .tag_name)
-  export EBS_AUTOSCALE_VERSION="5ca6e24e05787b8ae1184c2a10db80053ddd3038"
+  export EBS_AUTOSCALE_VERSION="ee323f0751c2b6f733692e805b51b9bf3c251bac"
   cd /opt && $SUDO git clone https://github.com/arvados/amazon-ebs-autoscale.git
   cd /opt/amazon-ebs-autoscale && $SUDO git checkout $EBS_AUTOSCALE_VERSION
 
@@ -186,8 +186,7 @@ if [ "$NVIDIA_GPU_SUPPORT" == "1" ]; then
   $SUDO apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/3bf863cc.pub
   $SUDO apt-get -y install software-properties-common
   $SUDO add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/$DIST/x86_64/ /"
-  # Ubuntu 18.04's add-apt-repository does not understand 'contrib'
-  $SUDO add-apt-repository contrib || true
+  $SUDO add-apt-repository contrib
   $SUDO apt-get update
   $SUDO apt-get -y install cuda
 
index abc63a2e9246526612f3a00c7bb2f86bcfb91a18..d9790fb45ce6e2d334d64b64d72f843e30378f33 100644 (file)
@@ -22,14 +22,21 @@ ensure_umount() {
 # First make sure docker is not using /tmp, then unmount everything under it.
 if [ -d /etc/sv/docker.io ]
 then
+  # TODO: Actually detect Docker state with runit
+  DOCKER_ACTIVE=true
   sv stop docker.io || service stop docker.io || true
 else
-  systemctl disable --now docker.service docker.socket || true
+  if systemctl --quiet is-active docker.service docker.socket; then
+    systemctl stop docker.service docker.socket || true
+    DOCKER_ACTIVE=true
+  else
+    DOCKER_ACTIVE=false
+  fi
 fi
 
 ensure_umount "$MOUNTPATH/docker/aufs"
 
-/bin/bash /opt/amazon-ebs-autoscale/install.sh -f lvm.ext4 -m $MOUNTPATH 2>&1 > /var/log/ebs-autoscale-install.log
+/bin/bash /opt/amazon-ebs-autoscale/install.sh --imdsv2 -f lvm.ext4 -m $MOUNTPATH 2>&1 > /var/log/ebs-autoscale-install.log
 
 # Make sure docker uses the big partition
 cat <<EOF > /etc/docker/daemon.json
@@ -38,13 +45,18 @@ cat <<EOF > /etc/docker/daemon.json
 }
 EOF
 
+if ! $DOCKER_ACTIVE; then
+  # Nothing else to do
+  exit 0
+fi
+
 # restart docker
 if [ -d /etc/sv/docker.io ]
 then
   ## runit
   sv up docker.io
 else
-  systemctl enable --now docker.service docker.socket
+  systemctl start docker.service docker.socket || true
 fi
 
 end=$((SECONDS+60))
index a76dc121096527101ee5c35e2434625d205252fb..726ff0cdcd4d20ff308e32d69edc7a054bd2af1b 100644 (file)
@@ -119,9 +119,16 @@ mkfs.xfs -f "$CRYPTPATH"
 # First make sure docker is not using /tmp, then unmount everything under it.
 if [ -d /etc/sv/docker.io ]
 then
+  # TODO: Actually detect Docker state with runit
+  DOCKER_ACTIVE=true
   sv stop docker.io || service stop docker.io || true
 else
-  systemctl disable --now docker.service docker.socket || true
+  if systemctl --quiet is-active docker.service docker.socket; then
+    systemctl stop docker.service docker.socket || true
+    DOCKER_ACTIVE=true
+  else
+    DOCKER_ACTIVE=false
+  fi
 fi
 
 ensure_umount "$MOUNTPATH/docker/aufs"
@@ -137,13 +144,18 @@ cat <<EOF > /etc/docker/daemon.json
 }
 EOF
 
+if ! $DOCKER_ACTIVE; then
+  # Nothing else to do
+  exit 0
+fi
+
 # restart docker
 if [ -d /etc/sv/docker.io ]
 then
   ## runit
   sv up docker.io
 else
-  systemctl enable --now docker.service docker.socket || true
+  systemctl start docker.service docker.socket || true
 fi
 
 end=$((SECONDS+60))
index ec7acb8083928f6f35f2af7835ee92f8a4a895dc..c5a1068eff9b9e54b607ff814f13734ee367e8d5 100644 (file)
@@ -7,8 +7,10 @@ import gzip
 from io import open
 import logging
 import sys
+import arvados
 
-from crunchstat_summary import logger, summarizer
+from crunchstat_summary import logger, summarizer, reader
+from crunchstat_summary._version import __version__
 
 
 class ArgumentParser(argparse.ArgumentParser):
@@ -28,9 +30,6 @@ class ArgumentParser(argparse.ArgumentParser):
             help='[Deprecated] Look up the specified container find its container request '
             'and read its log data from Keep (or from the Arvados event log, '
             'if the job is still running)')
-        src.add_argument(
-            '--pipeline-instance', type=str, metavar='UUID',
-            help='[Deprecated] Summarize each component of the given pipeline instance (historical pre-1.4)')
         src.add_argument(
             '--log-file', type=str,
             help='Read log data from a regular file')
@@ -46,6 +45,9 @@ class ArgumentParser(argparse.ArgumentParser):
         self.add_argument(
             '--verbose', '-v', action='count', default=0,
             help='Log more information (once for progress, twice for debug)')
+        self.add_argument('--version', action='version',
+                         version="%s %s" % (sys.argv[0], __version__),
+                         help='Print version and exit.')
 
 
 class UTF8Decode(object):
@@ -82,10 +84,9 @@ class Command(object):
         kwargs = {
             'skip_child_jobs': self.args.skip_child_jobs,
             'threads': self.args.threads,
+            'arv': arvados.api('v1')
         }
-        if self.args.pipeline_instance:
-            self.summer = summarizer.NewSummarizer(self.args.pipeline_instance, **kwargs)
-        elif self.args.job:
+        if self.args.job:
             self.summer = summarizer.NewSummarizer(self.args.job, **kwargs)
         elif self.args.container:
             self.summer = summarizer.NewSummarizer(self.args.container, **kwargs)
@@ -94,9 +95,9 @@ class Command(object):
                 fh = UTF8Decode(gzip.open(self.args.log_file))
             else:
                 fh = open(self.args.log_file, mode = 'r', encoding = 'utf-8')
-            self.summer = summarizer.Summarizer(fh, **kwargs)
+            self.summer = summarizer.Summarizer(reader.StubReader(fh), **kwargs)
         else:
-            self.summer = summarizer.Summarizer(sys.stdin, **kwargs)
+            self.summer = summarizer.Summarizer(reader.StubReader(sys.stdin), **kwargs)
         return self.summer.run()
 
     def report(self):
index 52e5534ef179f1124c90084e68596b8a43bf08e4..76c92107042bf9663324489c6e5ed9a0d4576981 100644 (file)
@@ -40,9 +40,7 @@ window.onload = function() {
         },
     }
     chartdata.forEach(function(section, section_idx) {
-        var h1 = document.createElement('h1');
-        h1.appendChild(document.createTextNode(section.label));
-        document.body.appendChild(h1);
+        var chartDiv = document.getElementById("chart");
         section.charts.forEach(function(chart, chart_idx) {
             // Skip chart if every series has zero data points
             if (0 == chart.data.reduce(function(len, series) {
@@ -54,7 +52,7 @@ window.onload = function() {
             var div = document.createElement('div');
             div.setAttribute('id', id);
             div.setAttribute('style', 'width: 100%; height: 150px');
-            document.body.appendChild(div);
+            chartDiv.appendChild(div);
             chart.options.valueFormatter = function(y) {
             }
             chart.options.axes = {
@@ -68,6 +66,17 @@ window.onload = function() {
                     valueFormatter: fmt.iso,
                 },
             }
+            var div2 = document.createElement('div');
+            div2.setAttribute('style', 'width: 150px; height: 150px');
+            chart.options.labelsDiv = div2;
+            chart.options.labelsSeparateLines = true;
+
+            var div3 = document.createElement('div');
+            div3.setAttribute('style', 'display: flex; padding-bottom: 16px');
+            div3.appendChild(div);
+            div3.appendChild(div2);
+            chartDiv.appendChild(div3);
+
             charts[id] = new Dygraph(div, chart.data, chart.options);
         });
     });
index 8ccdbc2fcf04e45ca3ab3ec6e2270933d050ea1c..0198d765c3533df4cdeb42096fedc0cd57d20051 100644 (file)
@@ -4,6 +4,7 @@
 
 import arvados
 import itertools
+import json
 import queue
 import threading
 
@@ -11,24 +12,26 @@ from crunchstat_summary import logger
 
 
 class CollectionReader(object):
-    def __init__(self, collection_id):
+    def __init__(self, collection_id, api_client=None, collection_object=None):
         self._collection_id = collection_id
         self._label = collection_id
         self._readers = []
+        self._api_client = api_client
+        self._collection = collection_object or arvados.collection.CollectionReader(self._collection_id, api_client=self._api_client)
 
     def __str__(self):
         return self._label
 
     def __iter__(self):
         logger.debug('load collection %s', self._collection_id)
-        collection = arvados.collection.CollectionReader(self._collection_id)
-        filenames = [filename for filename in collection]
+
+        filenames = [filename for filename in self._collection]
         # Crunch2 has multiple stats files
         if len(filenames) > 1:
             filenames = ['crunchstat.txt', 'arv-mount.txt']
         for filename in filenames:
             try:
-                self._readers.append(collection.open(filename))
+                self._readers.append(self._collection.open(filename, "rt"))
             except IOError:
                 logger.warn('Unable to open %s', filename)
         self._label = "{}/{}".format(self._collection_id, filenames[0])
@@ -43,6 +46,14 @@ class CollectionReader(object):
                 reader.close()
             self._readers = []
 
+    def node_info(self):
+        try:
+            with self._collection.open("node.json", "rt") as f:
+                return json.load(f)
+        except IOError:
+            logger.warn('Unable to open node.json')
+        return {}
+
 
 class LiveLogReader(object):
     EOF = None
@@ -63,7 +74,7 @@ class LiveLogReader(object):
             ['event_type', 'in', self.event_types]]
         try:
             while True:
-                page = arvados.api().logs().index(
+                page = arvados.api().logs().list(
                     limit=1000,
                     order=['id asc'],
                     filters=filters + [['id','>',str(last_id)]],
@@ -105,3 +116,25 @@ class LiveLogReader(object):
 
     def __exit__(self, exc_type, exc_val, exc_tb):
         pass
+
+    def node_info(self):
+        return {}
+
+class StubReader(object):
+    def __init__(self, fh):
+        self.fh = fh
+
+    def __str__(self):
+        return ""
+
+    def __iter__(self):
+        return iter(self.fh)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
+
+    def node_info(self):
+        return {}
index 2bd329719bec70300d268b06bb6abe8e3abf245c..bc41fdae33272d3df98ad8c998bf5a05db308120 100644 (file)
@@ -12,15 +12,17 @@ import itertools
 import math
 import re
 import sys
-import threading
 import _strptime
+import arvados.util
+
+from concurrent.futures import ThreadPoolExecutor
 
 from crunchstat_summary import logger
 
 # Recommend memory constraints that are this multiple of an integral
 # number of GiB. (Actual nodes tend to be sold in sizes like 8 GiB
 # that have amounts like 7.5 GiB according to the kernel.)
-AVAILABLE_RAM_RATIO = 0.95
+AVAILABLE_RAM_RATIO = 0.90
 MB=2**20
 
 # Workaround datetime.datetime.strptime() thread-safety bug by calling
@@ -63,8 +65,11 @@ class Summarizer(object):
         # are already suitable.  If applicable, the subclass
         # constructor will overwrite this with something useful.
         self.existing_constraints = {}
+        self.node_info = {}
+        self.cost = 0
+        self.arv_config = {}
 
-        logger.debug("%s: logdata %s", self.label, logdata)
+        logger.info("%s: logdata %s", self.label, logdata)
 
     def run(self):
         logger.debug("%s: parsing logdata %s", self.label, self._logdata)
@@ -72,78 +77,23 @@ class Summarizer(object):
             self._run(logdata)
 
     def _run(self, logdata):
-        self.detected_crunch1 = False
-        for line in logdata:
-            if not self.detected_crunch1 and '-8i9sb-' in line:
-                self.detected_crunch1 = True
-
-            if self.detected_crunch1:
-                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) job_task (?P<task_uuid>\S+)$', line)
-                if m:
-                    seq = int(m.group('seq'))
-                    uuid = m.group('task_uuid')
-                    self.seq_to_uuid[seq] = uuid
-                    logger.debug('%s: seq %d is task %s', self.label, seq, uuid)
-                    continue
-
-                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) (success in|failure \(#., permanent\) after) (?P<elapsed>\d+) seconds', line)
-                if m:
-                    task_id = self.seq_to_uuid[int(m.group('seq'))]
-                    elapsed = int(m.group('elapsed'))
-                    self.task_stats[task_id]['time'] = {'elapsed': elapsed}
-                    if elapsed > self.stats_max['time']['elapsed']:
-                        self.stats_max['time']['elapsed'] = elapsed
-                    continue
+        if not self.node_info:
+            self.node_info = logdata.node_info()
 
-                m = re.search(r'^\S+ \S+ \d+ (?P<seq>\d+) stderr Queued job (?P<uuid>\S+)$', line)
-                if m:
-                    uuid = m.group('uuid')
-                    if self._skip_child_jobs:
-                        logger.warning('%s: omitting stats from child job %s'
-                                       ' because --skip-child-jobs flag is on',
-                                       self.label, uuid)
-                        continue
-                    logger.debug('%s: follow %s', self.label, uuid)
-                    child_summarizer = NewSummarizer(uuid)
-                    child_summarizer.stats_max = self.stats_max
-                    child_summarizer.task_stats = self.task_stats
-                    child_summarizer.tasks = self.tasks
-                    child_summarizer.starttime = self.starttime
-                    child_summarizer.run()
-                    logger.debug('%s: done %s', self.label, uuid)
-                    continue
-
-                # 2017-12-02_17:15:08 e51c5-8i9sb-mfp68stkxnqdd6m 63676 0 stderr crunchstat: keepcalls 0 put 2576 get -- interval 10.0000 seconds 0 put 2576 get
-                m = re.search(r'^(?P<timestamp>[^\s.]+)(\.\d+)? (?P<job_uuid>\S+) \d+ (?P<seq>\d+) stderr (?P<crunchstat>crunchstat: )(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
-                if not m:
-                    continue
-            else:
-                # crunch2
-                # 2017-12-01T16:56:24.723509200Z crunchstat: keepcalls 0 put 3 get -- interval 10.0000 seconds 0 put 3 get
-                m = re.search(r'^(?P<timestamp>\S+) (?P<crunchstat>crunchstat: )?(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
-                if not m:
-                    continue
+        for line in logdata:
+            # crunch2
+            # 2017-12-01T16:56:24.723509200Z crunchstat: keepcalls 0 put 3 get -- interval 10.0000 seconds 0 put 3 get
+            m = re.search(r'^(?P<timestamp>\S+) (?P<crunchstat>crunchstat: )?(?P<category>\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\n$', line)
+            if not m:
+                continue
 
             if self.label is None:
                 try:
                     self.label = m.group('job_uuid')
                 except IndexError:
                     self.label = 'label #1'
-            category = m.group('category')
-            if category.endswith(':'):
-                # "stderr crunchstat: notice: ..."
-                continue
-            elif category in ('error', 'caught'):
-                continue
-            elif category in ('read', 'open', 'cgroup', 'CID', 'Running'):
-                # "stderr crunchstat: read /proc/1234/net/dev: ..."
-                # (old logs are less careful with unprefixed error messages)
-                continue
 
-            if self.detected_crunch1:
-                task_id = self.seq_to_uuid[int(m.group('seq'))]
-            else:
-                task_id = 'container'
+            task_id = 'container'
             task = self.tasks[task_id]
 
             # Use the first and last crunchstat timestamps as
@@ -172,12 +122,23 @@ class Summarizer(object):
             if self.finishtime is None or timestamp > self.finishtime:
                 self.finishtime = timestamp
 
-            if (not self.detected_crunch1) and task.starttime is not None and task.finishtime is not None:
+            if task.starttime is not None and task.finishtime is not None:
                 elapsed = (task.finishtime - task.starttime).seconds
                 self.task_stats[task_id]['time'] = {'elapsed': elapsed}
                 if elapsed > self.stats_max['time']['elapsed']:
                     self.stats_max['time']['elapsed'] = elapsed
 
+            category = m.group('category')
+            if category.endswith(':'):
+                # "stderr crunchstat: notice: ..."
+                continue
+            elif category in ('error', 'caught'):
+                continue
+            elif category in ('read', 'open', 'cgroup', 'CID', 'Running'):
+                # "stderr crunchstat: read /proc/1234/net/dev: ..."
+                # (old logs are less careful with unprefixed error messages)
+                continue
+
             this_interval_s = None
             for group in ['current', 'interval']:
                 if not m.group(group):
@@ -244,55 +205,73 @@ class Summarizer(object):
                     self.job_tot[category][stat] += val
         logger.debug('%s: done totals', self.label)
 
-        missing_category = {
-            'cpu': 'CPU',
-            'mem': 'memory',
-            'net:': 'network I/O',
-            'statfs': 'storage space',
-        }
-        for task_stat in self.task_stats.values():
-            for category in task_stat.keys():
-                for checkcat in missing_category:
-                    if checkcat.endswith(':'):
-                        if category.startswith(checkcat):
-                            missing_category.pop(checkcat)
-                            break
-                    else:
-                        if category == checkcat:
-                            missing_category.pop(checkcat)
-                            break
-        for catlabel in missing_category.values():
-            logger.warning('%s: %s stats are missing -- possible cluster configuration issue',
-                        self.label, catlabel)
+        if self.stats_max['time'].get('elapsed', 0) > 20:
+            # needs to have executed for at least 20 seconds or we may
+            # not have collected any metrics and these warnings are duds.
+            missing_category = {
+                'cpu': 'CPU',
+                'mem': 'memory',
+                'net:': 'network I/O',
+                'statfs': 'storage space',
+            }
+            for task_stat in self.task_stats.values():
+                for category in task_stat.keys():
+                    for checkcat in missing_category:
+                        if checkcat.endswith(':'):
+                            if category.startswith(checkcat):
+                                missing_category.pop(checkcat)
+                                break
+                        else:
+                            if category == checkcat:
+                                missing_category.pop(checkcat)
+                                break
+            for catlabel in missing_category.values():
+                logger.warning('%s: %s stats are missing -- possible cluster configuration issue',
+                            self.label, catlabel)
 
     def long_label(self):
         label = self.label
         if hasattr(self, 'process') and self.process['uuid'] not in label:
             label = '{} ({})'.format(label, self.process['uuid'])
-        if self.finishtime:
-            label += ' -- elapsed time '
-            s = (self.finishtime - self.starttime).total_seconds()
-            if s > 86400:
-                label += '{}d'.format(int(s/86400))
-            if s > 3600:
-                label += '{}h'.format(int(s/3600) % 24)
-            if s > 60:
-                label += '{}m'.format(int(s/60) % 60)
-            label += '{}s'.format(int(s) % 60)
+        return label
+
+    def elapsed_time(self):
+        if not self.finishtime:
+            return ""
+        label = ""
+        s = (self.finishtime - self.starttime).total_seconds()
+        if s > 86400:
+            label += '{}d '.format(int(s/86400))
+        if s > 3600:
+            label += '{}h '.format(int(s/3600) % 24)
+        if s > 60:
+            label += '{}m '.format(int(s/60) % 60)
+        label += '{}s'.format(int(s) % 60)
         return label
 
     def text_report(self):
         if not self.tasks:
             return "(no report generated)\n"
         return "\n".join(itertools.chain(
-            self._text_report_gen(),
-            self._recommend_gen())) + "\n"
+            self._text_report_table_gen(lambda x: "\t".join(x),
+                                  lambda x: "\t".join(x)),
+            self._text_report_agg_gen(lambda x: "# {}: {}{}".format(x[0], x[1], x[2])),
+            self._recommend_gen(lambda x: "#!! "+x))) + "\n"
 
     def html_report(self):
-        return WEBCHART_CLASS(self.label, [self]).html()
+        tophtml = """{}\n<table class='aggtable'><tbody>{}</tbody></table>\n""".format(
+            "\n".join(self._recommend_gen(lambda x: "<p>{}</p>".format(x))),
+            "\n".join(self._text_report_agg_gen(lambda x: "<tr><th>{}</th><td>{}{}</td></tr>".format(*x))))
+
+        bottomhtml = """<table class='metricstable'><tbody>{}</tbody></table>\n""".format(
+            "\n".join(self._text_report_table_gen(lambda x: "<tr><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th></tr>".format(*x),
+                                                        lambda x: "<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>".format(*x))))
+        label = self.long_label()
 
-    def _text_report_gen(self):
-        yield "\t".join(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
+        return WEBCHART_CLASS(label, [self]).html(tophtml, bottomhtml)
+
+    def _text_report_table_gen(self, headerformat, rowformat):
+        yield headerformat(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])
         for category, stat_max in sorted(self.stats_max.items()):
             for stat, val in sorted(stat_max.items()):
                 if stat.endswith('__rate'):
@@ -300,66 +279,135 @@ class Summarizer(object):
                 max_rate = self._format(stat_max.get(stat+'__rate', '-'))
                 val = self._format(val)
                 tot = self._format(self.job_tot[category].get(stat, '-'))
-                yield "\t".join([category, stat, str(val), max_rate, tot])
-        for args in (
-                ('Number of tasks: {}',
+                yield rowformat([category, stat, str(val), max_rate, tot])
+
+    def _text_report_agg_gen(self, aggformat):
+        by_single_task = ""
+        if len(self.tasks) > 1:
+            by_single_task = " by a single task"
+
+        metrics = [
+            ('Elapsed time',
+             self.elapsed_time(),
+             None,
+             ''),
+
+            ('Estimated cost',
+             '${:.3f}'.format(self.cost),
+             None,
+             '') if self.cost > 0 else None,
+
+            ('Assigned instance type',
+             self.node_info.get('ProviderType'),
+             None,
+             '') if self.node_info.get('ProviderType') else None,
+
+            ('Instance hourly price',
+             '${:.3f}'.format(self.node_info.get('Price')),
+             None,
+             '') if self.node_info.get('Price') else None,
+
+            ('Max CPU usage in a single interval',
+             self.stats_max['cpu']['user+sys__rate'],
+             lambda x: x * 100,
+             '%'),
+
+            ('Overall CPU usage',
+             float(self.job_tot['cpu']['user+sys']) /
+             self.job_tot['time']['elapsed']
+             if self.job_tot['time']['elapsed'] > 0 else 0,
+             lambda x: x * 100,
+             '%'),
+
+            ('Requested CPU cores',
+             self.existing_constraints.get(self._map_runtime_constraint('vcpus')),
+             None,
+             '') if self.existing_constraints.get(self._map_runtime_constraint('vcpus')) else None,
+
+            ('Instance VCPUs',
+             self.node_info.get('VCPUs'),
+             None,
+             '') if self.node_info.get('VCPUs') else None,
+
+            ('Max memory used{}'.format(by_single_task),
+             self.stats_max['mem']['rss'],
+             lambda x: x / 2**20,
+             'MB'),
+
+            ('Requested RAM',
+             self.existing_constraints.get(self._map_runtime_constraint('ram')),
+             lambda x: x / 2**20,
+             'MB') if self.existing_constraints.get(self._map_runtime_constraint('ram')) else None,
+
+            ('Maximum RAM request for this instance type',
+             (self.node_info.get('RAM') - self.arv_config.get('Containers', {}).get('ReserveExtraRAM', 0))*.95,
+             lambda x: x / 2**20,
+             'MB') if self.node_info.get('RAM') else None,
+
+            ('Max network traffic{}'.format(by_single_task),
+             self.stats_max['net:eth0']['tx+rx'] +
+             self.stats_max['net:keep0']['tx+rx'],
+             lambda x: x / 1e9,
+             'GB'),
+
+            ('Max network speed in a single interval',
+             self.stats_max['net:eth0']['tx+rx__rate'] +
+             self.stats_max['net:keep0']['tx+rx__rate'],
+             lambda x: x / 1e6,
+             'MB/s'),
+
+            ('Keep cache miss rate',
+             (float(self.job_tot['keepcache']['miss']) /
+              float(self.job_tot['keepcalls']['get']))
+             if self.job_tot['keepcalls']['get'] > 0 else 0,
+             lambda x: x * 100.0,
+             '%'),
+
+            ('Keep cache utilization',
+             (float(self.job_tot['blkio:0:0']['read']) /
+              float(self.job_tot['net:keep0']['rx']))
+             if self.job_tot['net:keep0']['rx'] > 0 else 0,
+             lambda x: x * 100.0,
+             '%'),
+
+            ('Temp disk utilization',
+             (float(self.job_tot['statfs']['used']) /
+              float(self.job_tot['statfs']['total']))
+             if self.job_tot['statfs']['total'] > 0 else 0,
+             lambda x: x * 100.0,
+             '%'),
+        ]
+
+        if len(self.tasks) > 1:
+            metrics.insert(0, ('Number of tasks',
                  len(self.tasks),
-                 None),
-                ('Max CPU time spent by a single task: {}s',
-                 self.stats_max['cpu']['user+sys'],
-                 None),
-                ('Max CPU usage in a single interval: {}%',
-                 self.stats_max['cpu']['user+sys__rate'],
-                 lambda x: x * 100),
-                ('Overall CPU usage: {}%',
-                 float(self.job_tot['cpu']['user+sys']) /
-                 self.job_tot['time']['elapsed']
-                 if self.job_tot['time']['elapsed'] > 0 else 0,
-                 lambda x: x * 100),
-                ('Max memory used by a single task: {}GB',
-                 self.stats_max['mem']['rss'],
-                 lambda x: x / 1e9),
-                ('Max network traffic in a single task: {}GB',
-                 self.stats_max['net:eth0']['tx+rx'] +
-                 self.stats_max['net:keep0']['tx+rx'],
-                 lambda x: x / 1e9),
-                ('Max network speed in a single interval: {}MB/s',
-                 self.stats_max['net:eth0']['tx+rx__rate'] +
-                 self.stats_max['net:keep0']['tx+rx__rate'],
-                 lambda x: x / 1e6),
-                ('Keep cache miss rate {}%',
-                 (float(self.job_tot['keepcache']['miss']) /
-                 float(self.job_tot['keepcalls']['get']))
-                 if self.job_tot['keepcalls']['get'] > 0 else 0,
-                 lambda x: x * 100.0),
-                ('Keep cache utilization {}%',
-                 (float(self.job_tot['blkio:0:0']['read']) /
-                 float(self.job_tot['net:keep0']['rx']))
-                 if self.job_tot['net:keep0']['rx'] > 0 else 0,
-                 lambda x: x * 100.0),
-               ('Temp disk utilization {}%',
-                 (float(self.job_tot['statfs']['used']) /
-                 float(self.job_tot['statfs']['total']))
-                 if self.job_tot['statfs']['total'] > 0 else 0,
-                 lambda x: x * 100.0),
-                ):
-            format_string, val, transform = args
+                 None,
+                 ''))
+        for args in metrics:
+            if args is None:
+                continue
+            format_string, val, transform, suffix = args
             if val == float('-Inf'):
                 continue
             if transform:
                 val = transform(val)
-            yield "# "+format_string.format(self._format(val))
+            yield aggformat((format_string, self._format(val), suffix))
 
-    def _recommend_gen(self):
+    def _recommend_gen(self, recommendformat):
         # TODO recommend fixing job granularity if elapsed time is too short
+
+        if self.stats_max['time'].get('elapsed', 0) <= 20:
+            # Not enough data
+            return []
+
         return itertools.chain(
-            self._recommend_cpu(),
-            self._recommend_ram(),
-            self._recommend_keep_cache(),
-            self._recommend_temp_disk(),
+            self._recommend_cpu(recommendformat),
+            self._recommend_ram(recommendformat),
+            self._recommend_keep_cache(recommendformat),
+            self._recommend_temp_disk(recommendformat),
             )
 
-    def _recommend_cpu(self):
+    def _recommend_cpu(self, recommendformat):
         """Recommend asking for 4 cores if max CPU usage was 333%"""
 
         constraint_key = self._map_runtime_constraint('vcpus')
@@ -373,19 +421,17 @@ class Summarizer(object):
         asked_cores = self.existing_constraints.get(constraint_key)
         if asked_cores is None:
             asked_cores = 1
-        # TODO: This should be more nuanced in cases where max >> avg
-        if used_cores < asked_cores:
-            yield (
-                '#!! {} max CPU usage was {}% -- '
-                'try reducing runtime_constraints to "{}":{}'
+
+        if used_cores < (asked_cores*.5):
+            yield recommendformat(
+                '{} peak CPU usage was only {}% out of possible {}% ({} cores requested)'
             ).format(
                 self.label,
                 math.ceil(cpu_max_rate*100),
-                constraint_key,
-                int(used_cores))
+                asked_cores*100, asked_cores)
 
     # FIXME: This needs to be updated to account for current a-d-c algorithms
-    def _recommend_ram(self):
+    def _recommend_ram(self, recommendformat):
         """Recommend an economical RAM constraint for this job.
 
         Nodes that are advertised as "8 gibibytes" actually have what
@@ -424,55 +470,63 @@ class Summarizer(object):
         if used_bytes == float('-Inf'):
             logger.warning('%s: no memory usage data', self.label)
             return
+        if not self.existing_constraints.get(constraint_key):
+            return
         used_mib = math.ceil(float(used_bytes) / MB)
-        asked_mib = self.existing_constraints.get(constraint_key)
+        asked_mib = self.existing_constraints.get(constraint_key) / MB
 
         nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024
-        if used_mib > 0 and (asked_mib is None or (
-                math.ceil(nearlygibs(used_mib)) < nearlygibs(asked_mib))):
-            yield (
-                '#!! {} max RSS was {} MiB -- '
-                'try reducing runtime_constraints to "{}":{}'
+        ratio = 0.5
+        recommend_mib = int(math.ceil(nearlygibs(used_mib/ratio))*AVAILABLE_RAM_RATIO*1024)
+        if used_mib > 0 and (used_mib / asked_mib) < ratio and asked_mib > recommend_mib:
+            yield recommendformat(
+                '{} peak RAM usage was only {}% ({} MiB used / {} MiB requested)'
             ).format(
                 self.label,
+                int(math.ceil(100*(used_mib / asked_mib))),
                 int(used_mib),
-                constraint_key,
-                int(math.ceil(nearlygibs(used_mib))*AVAILABLE_RAM_RATIO*1024*(MB)/self._runtime_constraint_mem_unit()))
+                int(asked_mib))
+
+    def _recommend_keep_cache(self, recommendformat):
+        """Recommend increasing keep cache if utilization < 50%.
+
+        This means the amount of data returned to the program is less
+        than 50% of the amount of data actually downloaded by
+        arv-mount.
+        """
 
-    def _recommend_keep_cache(self):
-        """Recommend increasing keep cache if utilization < 80%"""
-        constraint_key = self._map_runtime_constraint('keep_cache_ram')
         if self.job_tot['net:keep0']['rx'] == 0:
             return
+
+        miss_rate = (float(self.job_tot['keepcache']['miss']) /
+                     float(self.job_tot['keepcalls']['get']))
+
         utilization = (float(self.job_tot['blkio:0:0']['read']) /
                        float(self.job_tot['net:keep0']['rx']))
         # FIXME: the default on this get won't work correctly
-        asked_cache = self.existing_constraints.get(constraint_key, 256) * self._runtime_constraint_mem_unit()
+        asked_cache = self.existing_constraints.get('keep_cache_ram') or self.existing_constraints.get('keep_cache_disk')
 
-        if utilization < 0.8:
-            yield (
-                '#!! {} Keep cache utilization was {:.2f}% -- '
-                'try doubling runtime_constraints to "{}":{} (or more)'
+        if utilization < 0.5 and miss_rate > .05:
+            yield recommendformat(
+                '{} Keep cache utilization was only {:.2f}% and miss rate was {:.2f}% -- '
+                'recommend increasing keep_cache'
             ).format(
                 self.label,
                 utilization * 100.0,
-                constraint_key,
-                math.ceil(asked_cache * 2 / self._runtime_constraint_mem_unit()))
+                miss_rate * 100.0)
 
 
-    def _recommend_temp_disk(self):
-        """Recommend decreasing temp disk if utilization < 50%"""
-        total = float(self.job_tot['statfs']['total'])
-        utilization = (float(self.job_tot['statfs']['used']) / total) if total > 0 else 0.0
+    def _recommend_temp_disk(self, recommendformat):
+        """This recommendation is disabled for the time being.  It was
+        using the total disk on the node and not the amount of disk
+        requested, so it would trigger a false positive basically
+        every time.  To get the amount of disk requested we need to
+        fish it out of the mounts, which is extra work I don't want do
+        right now.  You can find the old code at commit 616d135e77
 
-        if utilization < 50.8 and total > 0:
-            yield (
-                '#!! {} max temp disk utilization was {:.0f}% of {:.0f} MiB -- '
-                'consider reducing "tmpdirMin" and/or "outdirMin"'
-            ).format(
-                self.label,
-                utilization * 100.0,
-                total / MB)
+        """
+
+        return []
 
 
     def _format(self, val):
@@ -487,18 +541,11 @@ class Summarizer(object):
     def _runtime_constraint_mem_unit(self):
         if hasattr(self, 'runtime_constraint_mem_unit'):
             return self.runtime_constraint_mem_unit
-        elif self.detected_crunch1:
-            return JobSummarizer.runtime_constraint_mem_unit
         else:
             return ContainerRequestSummarizer.runtime_constraint_mem_unit
 
     def _map_runtime_constraint(self, key):
-        if hasattr(self, 'map_runtime_constraint'):
-            return self.map_runtime_constraint[key]
-        elif self.detected_crunch1:
-            return JobSummarizer.map_runtime_constraint[key]
-        else:
-            return key
+        return key
 
 
 class CollectionSummarizer(Summarizer):
@@ -517,7 +564,7 @@ def NewSummarizer(process_or_uuid, **kwargs):
     else:
         uuid = process_or_uuid
         process = None
-        arv = arvados.api('v1')
+        arv = kwargs.get("arv") or arvados.api('v1')
 
     if '-dz642-' in uuid:
         if process is None:
@@ -530,14 +577,6 @@ def NewSummarizer(process_or_uuid, **kwargs):
         if process is None:
             process = arv.container_requests().get(uuid=uuid).execute()
         klass = ContainerRequestTreeSummarizer
-    elif '-8i9sb-' in uuid:
-        if process is None:
-            process = arv.jobs().get(uuid=uuid).execute()
-        klass = JobTreeSummarizer
-    elif '-d1hrv-' in uuid:
-        if process is None:
-            process = arv.pipeline_instances().get(uuid=uuid).execute()
-        klass = PipelineSummarizer
     elif '-4zz18-' in uuid:
         return CollectionSummarizer(collection_id=uuid)
     else:
@@ -551,6 +590,7 @@ class ProcessSummarizer(Summarizer):
     def __init__(self, process, label=None, **kwargs):
         rdr = None
         self.process = process
+        arv = kwargs.get("arv") or arvados.api('v1')
         if label is None:
             label = self.process.get('name', self.process['uuid'])
         # Pre-Arvados v1.4 everything is in 'log'
@@ -558,7 +598,10 @@ class ProcessSummarizer(Summarizer):
         log_collection = self.process.get('log', self.process.get('log_uuid'))
         if log_collection and self.process.get('state') != 'Uncommitted': # arvados.util.CR_UNCOMMITTED:
             try:
-                rdr = crunchstat_summary.reader.CollectionReader(log_collection)
+                rdr = crunchstat_summary.reader.CollectionReader(
+                    log_collection,
+                    api_client=arv,
+                    collection_object=kwargs.get("collection_object"))
             except arvados.errors.NotFoundError as e:
                 logger.warning("Trying event logs after failing to read "
                                "log collection %s: %s", self.process['log'], e)
@@ -566,17 +609,11 @@ class ProcessSummarizer(Summarizer):
             uuid = self.process.get('container_uuid', self.process.get('uuid'))
             rdr = crunchstat_summary.reader.LiveLogReader(uuid)
             label = label + ' (partial)'
+
         super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs)
         self.existing_constraints = self.process.get('runtime_constraints', {})
-
-
-class JobSummarizer(ProcessSummarizer):
-    runtime_constraint_mem_unit = MB
-    map_runtime_constraint = {
-        'keep_cache_ram': 'keep_cache_mb_per_task',
-        'ram': 'min_ram_mb_per_node',
-        'vcpus': 'min_cores_per_node',
-    }
+        self.arv_config = arv.config()
+        self.cost = self.process.get('cost', 0)
 
 
 class ContainerRequestSummarizer(ProcessSummarizer):
@@ -585,26 +622,26 @@ class ContainerRequestSummarizer(ProcessSummarizer):
 
 class MultiSummarizer(object):
     def __init__(self, children={}, label=None, threads=1, **kwargs):
-        self.throttle = threading.Semaphore(threads)
         self.children = children
         self.label = label
-
-    def run_and_release(self, target, *args, **kwargs):
-        try:
-            return target(*args, **kwargs)
-        finally:
-            self.throttle.release()
+        self.threadcount = threads
 
     def run(self):
-        threads = []
-        for child in self.children.values():
-            self.throttle.acquire()
-            t = threading.Thread(target=self.run_and_release, args=(child.run, ))
-            t.daemon = True
-            t.start()
-            threads.append(t)
-        for t in threads:
-            t.join()
+        if self.threadcount > 1 and len(self.children) > 1:
+            completed = 0
+            def run_and_progress(child):
+                try:
+                    child.run()
+                except Exception as e:
+                    logger.exception("parse error")
+                completed += 1
+                logger.info("%s/%s summarized %s", completed, len(self.children), child.label)
+            with ThreadPoolExecutor(max_workers=self.threadcount) as tpe:
+                for child in self.children.values():
+                    tpe.submit(run_and_progress, child)
+        else:
+            for child in self.children.values():
+                child.run()
 
     def text_report(self):
         txt = ''
@@ -632,57 +669,26 @@ class MultiSummarizer(object):
         return d
 
     def html_report(self):
-        return WEBCHART_CLASS(self.label, iter(self._descendants().values())).html()
+        tophtml = ""
+        bottomhtml = ""
+        label = self.label
+        if len(self._descendants()) == 1:
+            summarizer = next(iter(self._descendants().values()))
+            tophtml = """{}\n<table class='aggtable'><tbody>{}</tbody></table>\n""".format(
+                "\n".join(summarizer._recommend_gen(lambda x: "<p>{}</p>".format(x))),
+                "\n".join(summarizer._text_report_agg_gen(lambda x: "<tr><th>{}</th><td>{}{}</td></tr>".format(*x))))
 
+            bottomhtml = """<table class='metricstable'><tbody>{}</tbody></table>\n""".format(
+                "\n".join(summarizer._text_report_table_gen(lambda x: "<tr><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th></tr>".format(*x),
+                                                            lambda x: "<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>".format(*x))))
+            label = summarizer.long_label()
 
-class JobTreeSummarizer(MultiSummarizer):
-    """Summarizes a job and all children listed in its components field."""
-    def __init__(self, job, label=None, **kwargs):
-        arv = arvados.api('v1')
-        label = label or job.get('name', job['uuid'])
-        children = collections.OrderedDict()
-        children[job['uuid']] = JobSummarizer(job, label=label, **kwargs)
-        if job.get('components', None):
-            preloaded = {}
-            for j in arv.jobs().index(
-                    limit=len(job['components']),
-                    filters=[['uuid','in',list(job['components'].values())]]).execute()['items']:
-                preloaded[j['uuid']] = j
-            for cname in sorted(job['components'].keys()):
-                child_uuid = job['components'][cname]
-                j = (preloaded.get(child_uuid) or
-                     arv.jobs().get(uuid=child_uuid).execute())
-                children[child_uuid] = JobTreeSummarizer(job=j, label=cname, **kwargs)
-
-        super(JobTreeSummarizer, self).__init__(
-            children=children,
-            label=label,
-            **kwargs)
-
-
-class PipelineSummarizer(MultiSummarizer):
-    def __init__(self, instance, **kwargs):
-        children = collections.OrderedDict()
-        for cname, component in instance['components'].items():
-            if 'job' not in component:
-                logger.warning(
-                    "%s: skipping component with no job assigned", cname)
-            else:
-                logger.info(
-                    "%s: job %s", cname, component['job']['uuid'])
-                summarizer = JobTreeSummarizer(component['job'], label=cname, **kwargs)
-                summarizer.label = '{} {}'.format(
-                    cname, component['job']['uuid'])
-                children[cname] = summarizer
-        super(PipelineSummarizer, self).__init__(
-            children=children,
-            label=instance['uuid'],
-            **kwargs)
+        return WEBCHART_CLASS(label, iter(self._descendants().values())).html(tophtml, bottomhtml)
 
 
 class ContainerRequestTreeSummarizer(MultiSummarizer):
     def __init__(self, root, skip_child_jobs=False, **kwargs):
-        arv = arvados.api('v1')
+        arv = kwargs.get("arv") or arvados.api('v1')
 
         label = kwargs.pop('label', None) or root.get('name') or root['uuid']
         root['name'] = label
@@ -698,22 +704,15 @@ class ContainerRequestTreeSummarizer(MultiSummarizer):
             summer.sort_key = sort_key
             children[current['uuid']] = summer
 
-            page_filters = []
-            while True:
-                child_crs = arv.container_requests().index(
-                    order=['uuid asc'],
-                    filters=page_filters+[
-                        ['requesting_container_uuid', '=', current['container_uuid']]],
-                ).execute()
-                if not child_crs['items']:
-                    break
-                elif skip_child_jobs:
-                    logger.warning('%s: omitting stats from %d child containers'
-                                   ' because --skip-child-jobs flag is on',
-                                   label, child_crs['items_available'])
-                    break
-                page_filters = [['uuid', '>', child_crs['items'][-1]['uuid']]]
-                for cr in child_crs['items']:
+            if skip_child_jobs:
+                child_crs = arv.container_requests().list(filters=[['requesting_container_uuid', '=', current['container_uuid']]],
+                                                          limit=0).execute()
+                logger.warning('%s: omitting stats from child containers'
+                               ' because --skip-child-jobs flag is on',
+                               label, child_crs['items_available'])
+            else:
+                for cr in arvados.util.keyset_list_all(arv.container_requests().list,
+                                                       filters=[['requesting_container_uuid', '=', current['container_uuid']]]):
                     if cr['container_uuid']:
                         logger.debug('%s: container req %s', current['uuid'], cr['uuid'])
                         cr['name'] = cr.get('name') or cr['uuid']
index 31afcf64e906166788bf06b9caa4ed191ead13c9..f959661246f0dffc55cef06bbece384978f3b86a 100644 (file)
@@ -20,19 +20,91 @@ class WebChart(object):
     JSLIB = None
     JSASSET = None
 
+    STYLE = '''
+        body {
+          background: #fafafa;
+          font-family: "Roboto", "Helvetica", "Arial", sans-serif;
+          font-size: 0.875rem;
+          color: rgba(0, 0, 0, 0.87);
+          font-weight: 400;
+        }
+        .card {
+          background: #ffffff;
+          box-shadow: 0px 1px 5px 0px rgba(0,0,0,0.2),0px 2px 2px 0px rgba(0,0,0,0.14),0px 3px 1px -2px rgba(0,0,0,0.12);
+          border-radius: 4px;
+          margin: 20px;
+        }
+        .content {
+          padding: 2px 16px 8px 16px;
+        }
+        table {
+          border-spacing: 0px;
+        }
+        tr {
+          height: 36px;
+          text-align: left;
+        }
+        th {
+          padding-right: 4em;
+          border-top: 1px solid rgba(224, 224, 224, 1);
+        }
+        td {
+          padding-right: 2em;
+          border-top: 1px solid rgba(224, 224, 224, 1);
+        }
+        #chart {
+          margin-left: -20px;
+        }
+    '''
+
     def __init__(self, label, summarizers):
         self.label = label
         self.summarizers = summarizers
 
-    def html(self):
+    def html(self, beforechart='', afterchart=''):
         return '''<!doctype html><html><head>
         <title>{} stats</title>
         <script type="text/javascript" src="{}"></script>
         <script type="text/javascript">{}</script>
+        <style>
+        {}
+        </style>
         {}
-        </head><body></body></html>
+        </head>
+        <body>
+        <div class="card">
+          <div class="content">
+            <h1>{}</h1>
+          </div>
+        </div>
+        <div class="card">
+          <div class="content" id="tophtml">
+          <h2>Summary</h2>
+          {}
+          </div>
+        </div>
+        <div class="card">
+          <div class="content">
+            <h2>Graph</h2>
+            <div id="chart"></div>
+          </div>
+        </div>
+        <div class="card">
+          <div class="content" id="bottomhtml">
+          <h2>Metrics</h2>
+          {}
+          </div>
+        </div>
+        </body>
+        </html>
         '''.format(escape(self.label),
-                   self.JSLIB, self.js(), self.headHTML())
+                   self.JSLIB,
+                   self.js(),
+                   self.STYLE,
+                   self.headHTML(),
+                   escape(self.label),
+                   beforechart,
+                   afterchart)
 
     def js(self):
         return 'var chartdata = {};\n{}'.format(
index a881390e47e893fb1e22eb46926716f58f2c7c9e..98be9f27025b4c3c7828626dd7676fddbe845dfd 100755 (executable)
@@ -45,6 +45,7 @@ setup(name='crunchstat_summary',
       install_requires=[
           'arvados-python-client{}'.format(pysdk_dep),
       ],
+      python_requires="~=3.8",
       test_suite='tests',
       tests_require=['pbr<1.7.0', 'mock>=1.0'],
       zip_safe=False,
index 868f07b684eedad0544723ab50cf5b90a86329bd..e00faafb00f272738605b2b09201dfb61efc09ca 100644 (file)
@@ -25,15 +25,14 @@ statfs      available       397744787456    -       397744787456
 statfs total   402611240960    -       402611240960
 statfs used    4870303744      52426.18        4866453504
 time   elapsed 20      -       20
-# Number of tasks: 1
-# Max CPU time spent by a single task: 2.45s
+# Elapsed time: 20s
 # Max CPU usage in a single interval: 23.70%
 # Overall CPU usage: 12.25%
-# Max memory used by a single task: 0.07GB
-# Max network traffic in a single task: 0.00GB
+# Requested CPU cores: 1
+# Max memory used: 66.30MB
+# Requested RAM: 2500.00MB
+# Max network traffic: 0.00GB
 # Max network speed in a single interval: 0.00MB/s
-# Keep cache miss rate 0.00%
-# Keep cache utilization 0.00%
-# Temp disk utilization 1.21%
-#!! container max RSS was 67 MiB -- try reducing runtime_constraints to "ram":1020054732
-#!! container max temp disk utilization was 1% of 383960 MiB -- consider reducing "tmpdirMin" and/or "outdirMin"
+# Keep cache miss rate: 0.00%
+# Keep cache utilization: 0.00%
+# Temp disk utilization: 1.21%
index f77059b82496f5825d9d634847a2b0537efaed72..6afdf9aa69d756c6edc2352b2dc37b8d997ac3c4 100644 (file)
@@ -11,13 +11,12 @@ net:keep0   rx      0       0       0
 net:keep0      tx      0       0       0
 net:keep0      tx+rx   0       0       0
 time   elapsed 10      -       10
-# Number of tasks: 1
-# Max CPU time spent by a single task: 0s
+# Elapsed time: 10s
 # Max CPU usage in a single interval: 0%
 # Overall CPU usage: 0.00%
-# Max memory used by a single task: 0.00GB
-# Max network traffic in a single task: 0.00GB
+# Max memory used: 0.00MB
+# Max network traffic: 0.00GB
 # Max network speed in a single interval: 0.00MB/s
-# Keep cache miss rate 0.00%
-# Keep cache utilization 0.00%
-# Temp disk utilization 0.00%
+# Keep cache miss rate: 0.00%
+# Keep cache utilization: 0.00%
+# Temp disk utilization: 0.00%
index 87db98bb37cc468c645b3ce9af03f78e5d024b3e..fa1ad04e7b5171adf3e96d6248a7c29d506eea7b 100644 (file)
@@ -14,15 +14,12 @@ statfs      available       397744787456    -       397744787456
 statfs total   402611240960    -       402611240960
 statfs used    4870303744      52426.18        4866453504
 time   elapsed 20      -       20
-# Number of tasks: 1
-# Max CPU time spent by a single task: 2.45s
+# Elapsed time: 20s
 # Max CPU usage in a single interval: 23.70%
 # Overall CPU usage: 12.25%
-# Max memory used by a single task: 0.07GB
-# Max network traffic in a single task: 0.00GB
+# Max memory used: 66.30MB
+# Max network traffic: 0.00GB
 # Max network speed in a single interval: 0.00MB/s
-# Keep cache miss rate 0.00%
-# Keep cache utilization 0.00%
-# Temp disk utilization 1.21%
-#!! label #1 max RSS was 67 MiB -- try reducing runtime_constraints to "ram":1020054732
-#!! label #1 max temp disk utilization was 1% of 383960 MiB -- consider reducing "tmpdirMin" and/or "outdirMin"
+# Keep cache miss rate: 0.00%
+# Keep cache utilization: 0.00%
+# Temp disk utilization: 1.21%
index 868f07b684eedad0544723ab50cf5b90a86329bd..e00faafb00f272738605b2b09201dfb61efc09ca 100644 (file)
@@ -25,15 +25,14 @@ statfs      available       397744787456    -       397744787456
 statfs total   402611240960    -       402611240960
 statfs used    4870303744      52426.18        4866453504
 time   elapsed 20      -       20
-# Number of tasks: 1
-# Max CPU time spent by a single task: 2.45s
+# Elapsed time: 20s
 # Max CPU usage in a single interval: 23.70%
 # Overall CPU usage: 12.25%
-# Max memory used by a single task: 0.07GB
-# Max network traffic in a single task: 0.00GB
+# Requested CPU cores: 1
+# Max memory used: 66.30MB
+# Requested RAM: 2500.00MB
+# Max network traffic: 0.00GB
 # Max network speed in a single interval: 0.00MB/s
-# Keep cache miss rate 0.00%
-# Keep cache utilization 0.00%
-# Temp disk utilization 1.21%
-#!! container max RSS was 67 MiB -- try reducing runtime_constraints to "ram":1020054732
-#!! container max temp disk utilization was 1% of 383960 MiB -- consider reducing "tmpdirMin" and/or "outdirMin"
+# Keep cache miss rate: 0.00%
+# Keep cache utilization: 0.00%
+# Temp disk utilization: 1.21%
index bf6dd5ceaff9a0e689e9caa7afb6009c724261a5..2b93639281c8a659358f074a4c84f281841bfe12 100644 (file)
@@ -1,9 +1,9 @@
-2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr 
+2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr
 2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr old error message:
 2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: read /proc/3305/net/dev: open /proc/3305/net/dev: no such file or directory
-2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr 
+2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr
 2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr new error message:
 2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: error reading /proc/3305/net/dev: open /proc/3305/net/dev: no such file or directory
 2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr
 2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr cancelled job:
-2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: caught signal: interrupt
+2016-01-07_00:15:59 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: caught signal: interrupt
diff --git a/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz b/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz
deleted file mode 100644 (file)
index bfdcdff..0000000
Binary files a/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz and /dev/null differ
diff --git a/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz.report b/tools/crunchstat-summary/tests/logfile_20151204190335.txt.gz.report
deleted file mode 100644 (file)
index 173e93f..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-category       metric  task_max        task_max_rate   job_total
-blkio:0:0      read    0       0       0
-blkio:0:0      write   0       0       0
-cpu    cpus    8.00    -       -
-cpu    sys     1.92    0.04    1.92
-cpu    user    3.83    0.09    3.83
-cpu    user+sys        5.75    0.13    5.75
-fuseops        read    0       0       0
-fuseops        write   0       0       0
-keepcache      hit     0       0       0
-keepcache      miss    0       0       0
-keepcalls      get     0       0       0
-keepcalls      put     0       0       0
-mem    cache   1678139392      -       -
-mem    pgmajfault      0       -       0
-mem    rss     349814784       -       -
-mem    swap    0       -       -
-net:eth0       rx      1754364530      41658344.87     1754364530
-net:eth0       tx      38837956        920817.97       38837956
-net:eth0       tx+rx   1793202486      42579162.83     1793202486
-net:keep0      rx      0       0       0
-net:keep0      tx      0       0       0
-net:keep0      tx+rx   0       0       0
-time   elapsed 80      -       80
-# Number of tasks: 1
-# Max CPU time spent by a single task: 5.75s
-# Max CPU usage in a single interval: 13.00%
-# Overall CPU usage: 7.19%
-# Max memory used by a single task: 0.35GB
-# Max network traffic in a single task: 1.79GB
-# Max network speed in a single interval: 42.58MB/s
-# Keep cache miss rate 0.00%
-# Keep cache utilization 0.00%
-# Temp disk utilization 0.00%
-#!! 4xphq-8i9sb-jq0ekny1xou3zoh max RSS was 334 MiB -- try reducing runtime_constraints to "min_ram_mb_per_node":972
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz b/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz
deleted file mode 100644 (file)
index 17af535..0000000
Binary files a/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz and /dev/null differ
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz.report b/tools/crunchstat-summary/tests/logfile_20151210063411.txt.gz.report
deleted file mode 100644 (file)
index b31a055..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-category       metric  task_max        task_max_rate   job_total
-cpu    cpus    8.00    -       -
-cpu    sys     0       -       0.00
-cpu    user    0       -       0.00
-cpu    user+sys        0       -       0.00
-mem    cache   12288   -       -
-mem    pgmajfault      0       -       0
-mem    rss     856064  -       -
-mem    swap    0       -       -
-net:eth0       rx      90      -       90
-net:eth0       tx      90      -       90
-net:eth0       tx+rx   180     -       180
-time   elapsed 2       -       4
-# Number of tasks: 2
-# Max CPU time spent by a single task: 0s
-# Max CPU usage in a single interval: 0%
-# Overall CPU usage: 0.00%
-# Max memory used by a single task: 0.00GB
-# Max network traffic in a single task: 0.00GB
-# Max network speed in a single interval: 0.00MB/s
-# Keep cache miss rate 0.00%
-# Keep cache utilization 0.00%
-# Temp disk utilization 0.00%
-#!! 4xphq-8i9sb-zvb2ocfycpomrup max RSS was 1 MiB -- try reducing runtime_constraints to "min_ram_mb_per_node":972
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz b/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz
deleted file mode 100644 (file)
index 8826f70..0000000
Binary files a/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz and /dev/null differ
diff --git a/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz.report b/tools/crunchstat-summary/tests/logfile_20151210063439.txt.gz.report
deleted file mode 100644 (file)
index 9ddf5ac..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-category       metric  task_max        task_max_rate   job_total
-cpu    cpus    8.00    -       -
-cpu    sys     0       -       0.00
-cpu    user    0       -       0.00
-cpu    user+sys        0       -       0.00
-mem    cache   8192    -       -
-mem    pgmajfault      0       -       0
-mem    rss     450560  -       -
-mem    swap    0       -       -
-net:eth0       rx      90      -       90
-net:eth0       tx      90      -       90
-net:eth0       tx+rx   180     -       180
-time   elapsed 2       -       3
-# Number of tasks: 2
-# Max CPU time spent by a single task: 0s
-# Max CPU usage in a single interval: 0%
-# Overall CPU usage: 0.00%
-# Max memory used by a single task: 0.00GB
-# Max network traffic in a single task: 0.00GB
-# Max network speed in a single interval: 0.00MB/s
-# Keep cache miss rate 0.00%
-# Keep cache utilization 0.00%
-# Temp disk utilization 0.00%
-#!! 4xphq-8i9sb-v831jm2uq0g2g9x max RSS was 1 MiB -- try reducing runtime_constraints to "min_ram_mb_per_node":972
index 444cfe4ef83258543f5dd8905afbd6a0b9cf4829..5a20d3283f813341cc47e51b5e46231dc92b6829 100644 (file)
@@ -16,7 +16,7 @@ import sys
 import unittest
 
 from crunchstat_summary.command import UTF8Decode
-from crunchstat_summary import logger
+from crunchstat_summary import logger, reader
 
 TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
 
@@ -71,14 +71,13 @@ class HTMLFromFile(TestCase):
 class SummarizeEdgeCases(TestCase):
     def test_error_messages(self):
         logfile = io.open(os.path.join(TESTS_DIR, 'crunchstat_error_messages.txt'), encoding='utf-8')
-        s = crunchstat_summary.summarizer.Summarizer(logfile)
+        s = crunchstat_summary.summarizer.Summarizer(reader.StubReader(logfile))
         s.run()
         self.assertRegex(self.logbuf.getvalue(), r'CPU stats are missing -- possible cluster configuration issue')
         self.assertRegex(self.logbuf.getvalue(), r'memory stats are missing -- possible cluster configuration issue')
         self.assertRegex(self.logbuf.getvalue(), r'network I/O stats are missing -- possible cluster configuration issue')
         self.assertRegex(self.logbuf.getvalue(), r'storage space stats are missing -- possible cluster configuration issue')
 
-
 class SummarizeContainerCommon(TestCase):
     fake_container = {
         'uuid': '9tee4-dz642-lymtndkpy39eibk',
@@ -106,20 +105,19 @@ class SummarizeContainerCommon(TestCase):
     @mock.patch('arvados.api')
     def check_common(self, mock_api, mock_cr):
         items = [ {'items':[self.fake_request]}] + [{'items':[]}] * 100
-        # Index and list mean the same thing, but are used in different places in the
-        # code. It's fragile, but exploit that fact to distinguish the two uses.
-        mock_api().container_requests().index().execute.return_value = {'items': [] }  # child_crs
         mock_api().container_requests().list().execute.side_effect = items # parent request
         mock_api().container_requests().get().execute.return_value = self.fake_request
         mock_api().containers().get().execute.return_value = self.fake_container
         mock_cr().__iter__.return_value = [
             'crunch-run.txt', 'stderr.txt', 'node-info.txt',
             'container.json', 'crunchstat.txt', 'arv-mount.txt']
-        def _open(n):
+        def _open(n, mode):
             if n == "crunchstat.txt":
                 return UTF8Decode(gzip.open(self.logfile))
             elif n == "arv-mount.txt":
                 return UTF8Decode(gzip.open(self.arvmountlog))
+            elif n == "node.json":
+                return io.StringIO("{}")
         mock_cr().open.side_effect = _open
         args = crunchstat_summary.command.ArgumentParser().parse_args(
             self.arg_strings)
@@ -147,184 +145,3 @@ class SummarizeContainerRequest(SummarizeContainerCommon):
         self.check_common()
         self.assertNotRegex(self.logbuf.getvalue(), r'stats are missing')
         self.assertNotRegex(self.logbuf.getvalue(), r'possible cluster configuration issue')
-
-
-class SummarizeJob(TestCase):
-    fake_job_uuid = '4xphq-8i9sb-jq0ekny1xou3zoh'
-    fake_log_id = 'fake-log-collection-id'
-    fake_job = {
-        'uuid': fake_job_uuid,
-        'log': fake_log_id,
-    }
-    logfile = os.path.join(TESTS_DIR, 'logfile_20151204190335.txt.gz')
-
-    @mock.patch('arvados.collection.CollectionReader')
-    @mock.patch('arvados.api')
-    def test_job_report(self, mock_api, mock_cr):
-        mock_api().jobs().get().execute.return_value = self.fake_job
-        mock_cr().__iter__.return_value = ['fake-logfile.txt']
-        mock_cr().open.return_value = UTF8Decode(gzip.open(self.logfile))
-        args = crunchstat_summary.command.ArgumentParser().parse_args(
-            ['--job', self.fake_job_uuid])
-        cmd = crunchstat_summary.command.Command(args)
-        cmd.run()
-        self.diff_known_report(self.logfile, cmd)
-        mock_api().jobs().get.assert_called_with(uuid=self.fake_job_uuid)
-        mock_cr.assert_called_with(self.fake_log_id)
-        mock_cr().open.assert_called_with('fake-logfile.txt')
-
-
-class SummarizePipeline(TestCase):
-    fake_instance = {
-        'uuid': 'zzzzz-d1hrv-i3e77t9z5y8j9cc',
-        'owner_uuid': 'zzzzz-tpzed-xurymjxw79nv3jz',
-        'components': collections.OrderedDict([
-            ['foo', {
-                'job': {
-                    'uuid': 'zzzzz-8i9sb-000000000000000',
-                    'log': 'fake-log-pdh-0',
-                    'runtime_constraints': {
-                        'min_ram_mb_per_node': 900,
-                        'min_cores_per_node': 1,
-                    },
-                },
-            }],
-            ['bar', {
-                'job': {
-                    'uuid': 'zzzzz-8i9sb-000000000000001',
-                    'log': 'fake-log-pdh-1',
-                    'runtime_constraints': {
-                        'min_ram_mb_per_node': 900,
-                        'min_cores_per_node': 1,
-                    },
-                },
-            }],
-            ['no-job-assigned', {}],
-            ['unfinished-job', {
-                'job': {
-                    'uuid': 'zzzzz-8i9sb-xxxxxxxxxxxxxxx',
-                },
-            }],
-            ['baz', {
-                'job': {
-                    'uuid': 'zzzzz-8i9sb-000000000000002',
-                    'log': 'fake-log-pdh-2',
-                    'runtime_constraints': {
-                        'min_ram_mb_per_node': 900,
-                        'min_cores_per_node': 1,
-                    },
-                },
-            }]]),
-    }
-
-    @mock.patch('arvados.collection.CollectionReader')
-    @mock.patch('arvados.api')
-    def test_pipeline(self, mock_api, mock_cr):
-        logfile = os.path.join(TESTS_DIR, 'logfile_20151204190335.txt.gz')
-        mock_api().pipeline_instances().get().execute. \
-            return_value = self.fake_instance
-        mock_cr().__iter__.return_value = ['fake-logfile.txt']
-        mock_cr().open.side_effect = [UTF8Decode(gzip.open(logfile)) for _ in range(3)]
-        args = crunchstat_summary.command.ArgumentParser().parse_args(
-            ['--pipeline-instance', self.fake_instance['uuid']])
-        cmd = crunchstat_summary.command.Command(args)
-        cmd.run()
-
-        with io.open(logfile+'.report', encoding='utf-8') as f:
-            job_report = [line for line in f if not line.startswith('#!! ')]
-        expect = (
-            ['### Summary for foo (zzzzz-8i9sb-000000000000000)\n'] +
-            job_report + ['\n'] +
-            ['### Summary for bar (zzzzz-8i9sb-000000000000001)\n'] +
-            job_report + ['\n'] +
-            ['### Summary for unfinished-job (partial) (zzzzz-8i9sb-xxxxxxxxxxxxxxx)\n',
-             '(no report generated)\n',
-             '\n'] +
-            ['### Summary for baz (zzzzz-8i9sb-000000000000002)\n'] +
-            job_report)
-        self.diff_report(cmd, expect)
-        mock_cr.assert_has_calls(
-            [
-                mock.call('fake-log-pdh-0'),
-                mock.call('fake-log-pdh-1'),
-                mock.call('fake-log-pdh-2'),
-            ], any_order=True)
-        mock_cr().open.assert_called_with('fake-logfile.txt')
-
-
-class SummarizeACRJob(TestCase):
-    fake_job = {
-        'uuid': 'zzzzz-8i9sb-i3e77t9z5y8j9cc',
-        'owner_uuid': 'zzzzz-tpzed-xurymjxw79nv3jz',
-        'components': {
-            'foo': 'zzzzz-8i9sb-000000000000000',
-            'bar': 'zzzzz-8i9sb-000000000000001',
-            'unfinished-job': 'zzzzz-8i9sb-xxxxxxxxxxxxxxx',
-            'baz': 'zzzzz-8i9sb-000000000000002',
-        }
-    }
-    fake_jobs_index = { 'items': [
-        {
-            'uuid': 'zzzzz-8i9sb-000000000000000',
-            'log': 'fake-log-pdh-0',
-            'runtime_constraints': {
-                'min_ram_mb_per_node': 900,
-                'min_cores_per_node': 1,
-            },
-        },
-        {
-            'uuid': 'zzzzz-8i9sb-000000000000001',
-            'log': 'fake-log-pdh-1',
-            'runtime_constraints': {
-                'min_ram_mb_per_node': 900,
-                'min_cores_per_node': 1,
-            },
-        },
-        {
-            'uuid': 'zzzzz-8i9sb-xxxxxxxxxxxxxxx',
-        },
-        {
-            'uuid': 'zzzzz-8i9sb-000000000000002',
-            'log': 'fake-log-pdh-2',
-            'runtime_constraints': {
-                'min_ram_mb_per_node': 900,
-                'min_cores_per_node': 1,
-            },
-        },
-    ]}
-    @mock.patch('arvados.collection.CollectionReader')
-    @mock.patch('arvados.api')
-    def test_acr_job(self, mock_api, mock_cr):
-        logfile = os.path.join(TESTS_DIR, 'logfile_20151204190335.txt.gz')
-        mock_api().jobs().index().execute.return_value = self.fake_jobs_index
-        mock_api().jobs().get().execute.return_value = self.fake_job
-        mock_cr().__iter__.return_value = ['fake-logfile.txt']
-        mock_cr().open.side_effect = [UTF8Decode(gzip.open(logfile)) for _ in range(3)]
-        args = crunchstat_summary.command.ArgumentParser().parse_args(
-            ['--job', self.fake_job['uuid']])
-        cmd = crunchstat_summary.command.Command(args)
-        cmd.run()
-
-        with io.open(logfile+'.report', encoding='utf-8') as f:
-            job_report = [line for line in f if not line.startswith('#!! ')]
-        expect = (
-            ['### Summary for zzzzz-8i9sb-i3e77t9z5y8j9cc (partial) (zzzzz-8i9sb-i3e77t9z5y8j9cc)\n',
-             '(no report generated)\n',
-             '\n'] +
-            ['### Summary for bar (zzzzz-8i9sb-000000000000001)\n'] +
-            job_report + ['\n'] +
-            ['### Summary for baz (zzzzz-8i9sb-000000000000002)\n'] +
-            job_report + ['\n'] +
-            ['### Summary for foo (zzzzz-8i9sb-000000000000000)\n'] +
-            job_report + ['\n'] +
-            ['### Summary for unfinished-job (partial) (zzzzz-8i9sb-xxxxxxxxxxxxxxx)\n',
-             '(no report generated)\n']
-        )
-        self.diff_report(cmd, expect)
-        mock_cr.assert_has_calls(
-            [
-                mock.call('fake-log-pdh-0'),
-                mock.call('fake-log-pdh-1'),
-                mock.call('fake-log-pdh-2'),
-            ], any_order=True)
-        mock_cr().open.assert_called_with('fake-logfile.txt')
index 4dcb47a8da02e3eea9edddf5e612dff660076147..5bd7136eaa8d060d4d78a83a492917258b887e4e 100644 (file)
@@ -48,6 +48,7 @@ func (s *ServerRequiredSuite) TearDownSuite(c *C) {
 }
 
 func (s *ServerRequiredSuite) SetUpTest(c *C) {
+       logBuffer.Reset()
        logOutput := io.MultiWriter(&logBuffer)
        log.SetOutput(logOutput)
 }
@@ -55,7 +56,7 @@ func (s *ServerRequiredSuite) SetUpTest(c *C) {
 func (s *ServerRequiredSuite) TearDownTest(c *C) {
        arvadostest.StopKeep(2)
        log.SetOutput(os.Stdout)
-       log.Printf("%v", logBuffer.String())
+       c.Log(logBuffer.String())
 }
 
 func (s *DoMainTestSuite) SetUpSuite(c *C) {
@@ -226,7 +227,9 @@ func (s *ServerRequiredSuite) TestBlockCheck_BadSignature(c *C) {
        setupTestData(c)
        err := performKeepBlockCheck(kc, blobSignatureTTL, "badblobsigningkey", []string{TestHash, TestHash2}, false)
        c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix")
-       checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "HTTP 403")
+       // older versions of keepstore return 403 Forbidden for
+       // invalid signatures, newer versions return 400 Bad Request.
+       checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "HTTP 40[03]")
        // verbose logging not requested
        c.Assert(strings.Contains(logBuffer.String(), "Verifying block 1 of 2"), Equals, false)
 }
index dc5b957125c731d13ae51969b7a47ec765ffcf33..1d2d6b5c1917115e39775bb3066273aa9de62106 100644 (file)
@@ -161,7 +161,7 @@ func testNoCrosstalk(c *C, testData string, kc1, kc2 *keepclient.KeepClient) {
        locator, _, err := kc1.PutB([]byte(testData))
        c.Assert(err, Equals, nil)
 
-       locator = strings.Split(locator, "+")[0]
+       locator = strings.Join(strings.Split(locator, "+")[:2], "+")
        _, _, _, err = kc2.Get(keepclient.SignLocator(locator, kc2.Arvados.ApiToken, time.Now().AddDate(0, 0, 1), blobSignatureTTL, []byte(blobSigningKey)))
        c.Assert(err, NotNil)
        c.Check(err.Error(), Equals, "Block not found")
@@ -330,7 +330,7 @@ func (s *ServerRequiredSuite) TestErrorDuringRsync_ErrorGettingBlockFromSrc(c *C
 
        err := performKeepRsync(kcSrc, kcDst, blobSignatureTTL, blobSigningKey, "")
        c.Assert(err, NotNil)
-       c.Check(err.Error(), Matches, ".*HTTP 403 \"Forbidden\".*")
+       c.Check(err.Error(), Matches, ".*HTTP 400 \"invalid signature\".*")
 }
 
 // Test rsync with error during Put to src.
index 275c2c78ab7ac81bfa106a3da0d8949838c366d6..271ab502908578c70a7373787cb0d29213a576f5 100644 (file)
@@ -77,13 +77,6 @@ arvados:
       user: {{ database_user }}
       extra_conn_params:
         client_encoding: UTF8
-      # Centos7 does not enable SSL by default, so we disable
-      # it here just for testing of the formula purposes only.
-      # You should not do this in production, and should
-      # configure Postgres certificates correctly
-      {%- if grains.os_family in ('RedHat',) %}
-        sslmode: disable
-      {%- endif %}
 
     tls:
       # certificate: ''
index edb961ebaaeccca0899d0c2633ca7c0957369805..ade544764a9e8aa35269bc33669fa4a0833ebb13 100644 (file)
@@ -5,25 +5,9 @@
 
 ### POSTGRESQL
 postgres:
-  # Centos-7's postgres package is too old, so we need to force using upstream's
-  # This is not required in Debian's family as they already ship with PG +11
-  {%- if salt['grains.get']('os_family') == 'RedHat' %}
-  use_upstream_repo: true
-  version: '12'
-
-  pkgs_deps:
-    - libicu
-    - libxslt
-    - systemd-sysv
-
-  pkgs_extra:
-    - postgresql12-contrib
-
-  {%- else %}
   use_upstream_repo: false
   pkgs_extra:
     - postgresql-contrib
-  {%- endif %}
   postgresconf: |-
     listen_addresses = '*'  # listen on all interfaces
     #ssl = on
index 6518646a74bbd40a19199f15df9156c9d9ce4e28..a8b487e29ad239080d855778b5a4dc1ab6a211a3 100644 (file)
@@ -46,24 +46,11 @@ extra_snakeoil_certs_dependencies_pkg_installed:
       - openssl
       - ca-certificates
 
-# Remove the RANDFILE parameter in openssl.cnf as it makes openssl fail in Ubuntu 18.04
-# Saving and restoring the rng state is not necessary anymore in the openssl 1.1.1
-# random generator, cf
-#   https://github.com/openssl/openssl/issues/7754
-#
-extra_snakeoil_certs_file_comment_etc_openssl_conf:
-  file.comment:
-    - name: /etc/ssl/openssl.cnf
-    - regex: ^RANDFILE.*
-    - onlyif: grep -q ^RANDFILE /etc/ssl/openssl.cnf
-    - require_in:
-      - cmd: extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run
-
 extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run:
   # Taken from https://github.com/arvados/arvados/blob/master/tools/arvbox/lib/arvbox/docker/service/certificate/run
   cmd.run:
     - name: |
-        # These dirs are not to CentOS-ish, but this is a helper script
+        # These dirs are not too CentOS-ish, but this is a helper script
         # and they should be enough
         /bin/bash -c "mkdir -p /etc/ssl/certs/ /etc/ssl/private/ && \
         openssl req \
index f83984b01a93c0d6851470f6b8740370f23f7a63..9e3a293110afaa76c0ad3d9ca27300174747a287 100644 (file)
@@ -77,13 +77,6 @@ arvados:
       user: {{ database_user }}
       extra_conn_params:
         client_encoding: UTF8
-      # Centos7 does not enable SSL by default, so we disable
-      # it here just for testing of the formula purposes only.
-      # You should not do this in production, and should
-      # configure Postgres certificates correctly
-      {%- if grains.os_family in ('RedHat',) %}
-        sslmode: disable
-      {%- endif %}
 
     tls:
       # certificate: ''
index 14452a990541bf47fee379a33345895f6652cbd8..82a4f7120a68d41f8a1a188cbec46c5743573f8c 100644 (file)
@@ -5,25 +5,9 @@
 
 ### POSTGRESQL
 postgres:
-  # Centos-7's postgres package is too old, so we need to force using upstream's
-  # This is not required in Debian's family as they already ship with PG +11
-  {%- if salt['grains.get']('os_family') == 'RedHat' %}
-  use_upstream_repo: true
-  version: '12'
-
-  pkgs_deps:
-    - libicu
-    - libxslt
-    - systemd-sysv
-
-  pkgs_extra:
-    - postgresql12-contrib
-
-  {%- else %}
   use_upstream_repo: false
   pkgs_extra:
     - postgresql-contrib
-  {%- endif %}
   postgresconf: |-
     listen_addresses = '*'  # listen on all interfaces
     # If you want to enable communications' encryption to the DB server,
index 2cee5c9b49bd73750ebf07646d4113ff0a67a37a..df8dcc7f3096ddcb4205f98d4ce8bf46018276b4 100644 (file)
@@ -43,19 +43,6 @@ extra_snakeoil_certs_dependencies_pkg_installed:
       - openssl
       - ca-certificates
 
-# Remove the RANDFILE parameter in openssl.cnf as it makes openssl fail in Ubuntu 18.04
-# Saving and restoring the rng state is not necessary anymore in the openssl 1.1.1
-# random generator, cf
-#   https://github.com/openssl/openssl/issues/7754
-#
-extra_snakeoil_certs_file_comment_etc_openssl_conf:
-  file.comment:
-    - name: /etc/ssl/openssl.cnf
-    - regex: ^RANDFILE.*
-    - onlyif: grep -q ^RANDFILE /etc/ssl/openssl.cnf
-    - require_in:
-      - cmd: extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run
-
 extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run:
   # Taken from https://github.com/arvados/arvados/blob/master/tools/arvbox/lib/arvbox/docker/service/certificate/run
   cmd.run:
index eb09dddf2c9fd69101f3e73f39945208b6a21e69..bb95b2702aca0771cb2faaed9d44f5792c80f99d 100755 (executable)
@@ -364,24 +364,25 @@ if [ "${DUMP_CONFIG}" = "yes" ]; then
 else
   # Install a few dependency packages
   # First, let's figure out the OS we're working on
-  OS_ID=$(grep ^ID= /etc/os-release |cut -f 2 -d=  |cut -f 2 -d \")
-  echo "Detected distro: ${OS_ID}"
-
-  case ${OS_ID} in
-    "centos")
-      echo "WARNING! Disabling SELinux, see https://dev.arvados.org/issues/18019"
-      sed -i 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/sysconfig/selinux
-      setenforce permissive
-      yum install -y  curl git jq
-      ;;
-    "debian"|"ubuntu")
-      # Wait 2 minutes for any apt locks to clear
-      # This option is supported from apt 1.9.1 and ignored in older apt versions.
-      # Cf. https://blog.sinjakli.co.uk/2021/10/25/waiting-for-apt-locks-without-the-hacky-bash-scripts/
-      DEBIAN_FRONTEND=noninteractive apt -o DPkg::Lock::Timeout=120 update
-      DEBIAN_FRONTEND=noninteractive apt install -y curl git jq
-      ;;
-  esac
+  OS_IDS="$(. /etc/os-release && echo "${ID:-} ${ID_LIKE:-}")"
+  echo "Detected distro families: $OS_IDS"
+
+  for OS_ID in $OS_IDS; do
+    case "$OS_ID" in
+      rhel)
+        echo "WARNING! Disabling SELinux, see https://dev.arvados.org/issues/18019"
+        sed -i 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/sysconfig/selinux
+        setenforce permissive
+        yum install -y  curl git jq
+        break
+        ;;
+      debian)
+        DEBIAN_FRONTEND=noninteractive apt -o DPkg::Lock::Timeout=120 update
+        DEBIAN_FRONTEND=noninteractive apt install -y curl git jq
+        break
+        ;;
+    esac
+  done
 
   if which salt-call; then
     echo "Salt already installed"
index 41f8f66a079a0cd7025165d6381c9945a6f0c3bd..4b7ec16b934881540e45081e77f9c67ba01519c5 100755 (executable)
@@ -33,5 +33,6 @@ setup(name='arvados-user-activity',
       install_requires=[
           'arvados-python-client >= 2.2.0.dev20201118185221',
       ],
+      python_requires="~=3.8",
       zip_safe=True,
 )